Commit d7714308 authored by Vladislav Vaintroub's avatar Vladislav Vaintroub Committed by Sergei Golubchik

MDEV-9566 Add Percona Xtrabackup 2.3.7

parent 9c4b7cad
# Copyright (c) 2013 Percona LLC and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
INCLUDE(gcrypt)
INCLUDE(curl)
INCLUDE(libev)
ADD_SUBDIRECTORY(libarchive)
ADD_SUBDIRECTORY(jsmn)
FIND_GCRYPT()
FIND_CURL()
FIND_EV()
# xxd is needed to embed version_check script
FIND_PROGRAM(XXD_PATH xxd)
IF(NOT XXD_PATH)
MESSAGE(FATAL_ERROR "xxd not found. Try to install vim-common.")
ENDIF(NOT XXD_PATH)
INCLUDE_DIRECTORIES(
${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/storage/innobase/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/libarchive/libarchive
${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/quicklz
${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/jsmn
${GCRYPT_INCLUDE_DIR}
${CURL_INCLUDE_DIRS}
${LIBEV_INCLUDE_DIRS}
${CMAKE_CURRENT_BINARY_DIR}
)
ADD_DEFINITIONS(${SSL_DEFINES})
########################################################################
# xtrabackup binary
########################################################################
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/xtrabackup_version.h.in
${CMAKE_CURRENT_BINARY_DIR}/xtrabackup_version.h )
ADD_CUSTOM_COMMAND(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h
COMMAND ${XXD_PATH} --include version_check.pl
${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
ADD_CUSTOM_TARGET(GenVersionCheck
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h)
SET_SOURCE_FILES_PROPERTIES(
xtrabackup.cc
backup_mysql.cc
PROPERTIES COMPILE_FLAGS -DMYSQL_CLIENT)
MYSQL_ADD_EXECUTABLE(xtrabackup
xtrabackup.cc
innobackupex.cc
changed_page_bitmap.cc
compact.cc
datasink.c
ds_archive.c
ds_buffer.c
ds_compress.c
ds_encrypt.c
ds_local.c
ds_stdout.c
ds_tmpfile.c
ds_xbstream.c
fil_cur.cc
quicklz/quicklz.c
read_filt.cc
write_filt.cc
wsrep.cc
xbcrypt_common.c
xbcrypt_write.c
xbstream_write.c
backup_mysql.cc
backup_copy.cc
../../../../sql-common/client_authentication.cc
)
SET_TARGET_PROPERTIES(xtrabackup PROPERTIES ENABLE_EXPORTS TRUE)
TARGET_LINK_LIBRARIES(xtrabackup
mysqlserver
${GCRYPT_LIBS}
archive_static
)
ADD_DEPENDENCIES(xtrabackup GenVersionCheck)
########################################################################
# innobackupex symlink
########################################################################
ADD_CUSTOM_COMMAND(TARGET xtrabackup
COMMAND ${CMAKE_COMMAND} ARGS -E create_symlink
xtrabackup innobackupex)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/innobackupex DESTINATION bin)
########################################################################
# xbstream binary
########################################################################
MYSQL_ADD_EXECUTABLE(xbstream
ds_buffer.c
ds_local.c
ds_stdout.c
datasink.c
xbstream.c
xbstream_read.c
xbstream_write.c
)
SET_TARGET_PROPERTIES(xbstream
PROPERTIES LINKER_LANGUAGE CXX
)
TARGET_LINK_LIBRARIES(xbstream
mysys
mysys_ssl
)
########################################################################
# xbcrypt binary
########################################################################
MYSQL_ADD_EXECUTABLE(xbcrypt
xbcrypt.c
xbcrypt_common.c
xbcrypt_read.c
xbcrypt_write.c
)
SET_TARGET_PROPERTIES(xbcrypt
PROPERTIES LINKER_LANGUAGE CXX
)
TARGET_LINK_LIBRARIES(xbcrypt
${GCRYPT_LIBS}
mysys
mysys_ssl
)
########################################################################
# xbcloud binary
########################################################################
MYSQL_ADD_EXECUTABLE(xbcloud
xbcloud.cc
)
SET_TARGET_PROPERTIES(xbcloud
PROPERTIES LINKER_LANGUAGE CXX
)
TARGET_LINK_LIBRARIES(xbcloud
${GCRYPT_LIBS}
${LIBEV_LIBRARIES}
${CURL_LIBRARIES}
mysys
mysys_ssl
jsmn
)
/******************************************************
hot backup tool for InnoDB
(c) 2009-2015 Percona LLC and/or its affiliates
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************
This file incorporates work covered by the following copyright and
permission notice:
Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*******************************************************/
#include <my_global.h>
#include <os0file.h>
#include <my_dir.h>
#include <ut0mem.h>
#include <srv0start.h>
#include <fil0fil.h>
#include <set>
#include <string>
#include <mysqld.h>
#include <version_check_pl.h>
#include <sstream>
#include "fil_cur.h"
#include "xtrabackup.h"
#include "common.h"
#include "backup_copy.h"
#include "backup_mysql.h"
/* list of files to sync for --rsync mode */
static std::set<std::string> rsync_list;
/* locations of tablespaces read from .isl files */
static std::map<std::string, std::string> tablespace_locations;
/* Whether LOCK BINLOG FOR BACKUP has been issued during backup */
bool binlog_locked;
/************************************************************************
Struct represents file or directory. */
struct datadir_node_t {
ulint dbpath_len;
char *filepath;
ulint filepath_len;
char *filepath_rel;
ulint filepath_rel_len;
bool is_empty_dir;
bool is_file;
};
/************************************************************************
Holds the state needed to enumerate files in MySQL data directory. */
struct datadir_iter_t {
char *datadir_path;
char *dbpath;
ulint dbpath_len;
char *filepath;
ulint filepath_len;
char *filepath_rel;
ulint filepath_rel_len;
os_ib_mutex_t mutex;
os_file_dir_t dir;
os_file_dir_t dbdir;
os_file_stat_t dbinfo;
os_file_stat_t fileinfo;
dberr_t err;
bool is_empty_dir;
bool is_file;
bool skip_first_level;
};
/************************************************************************
Represents the context of the thread processing MySQL data directory. */
struct datadir_thread_ctxt_t {
datadir_iter_t *it;
uint n_thread;
uint *count;
os_ib_mutex_t count_mutex;
os_thread_id_t id;
bool ret;
};
/************************************************************************
Retirn true if character if file separator */
bool
is_path_separator(char c)
{
return(c == FN_LIBCHAR || c == FN_LIBCHAR2);
}
/************************************************************************
Fill the node struct. Memory for node need to be allocated and freed by
the caller. It is caller responsibility to initialize node with
datadir_node_init and cleanup the memory with datadir_node_free.
Node can not be shared between threads. */
static
void
datadir_node_fill(datadir_node_t *node, datadir_iter_t *it)
{
if (node->filepath_len < it->filepath_len) {
free(node->filepath);
node->filepath = (char*)(ut_malloc(it->filepath_len));
node->filepath_len = it->filepath_len;
}
if (node->filepath_rel_len < it->filepath_rel_len) {
free(node->filepath_rel);
node->filepath_rel = (char*)(ut_malloc(it->filepath_rel_len));
node->filepath_rel_len = it->filepath_rel_len;
}
strcpy(node->filepath, it->filepath);
strcpy(node->filepath_rel, it->filepath_rel);
node->is_empty_dir = it->is_empty_dir;
node->is_file = it->is_file;
}
static
void
datadir_node_free(datadir_node_t *node)
{
ut_free(node->filepath);
ut_free(node->filepath_rel);
memset(node, 0, sizeof(datadir_node_t));
}
static
void
datadir_node_init(datadir_node_t *node)
{
memset(node, 0, sizeof(datadir_node_t));
}
/************************************************************************
Create the MySQL data directory iterator. Memory needs to be released
with datadir_iter_free. Position should be advanced with
datadir_iter_next_file. Iterator can be shared between multiple
threads. It is guaranteed that each thread receives unique file from
data directory into its local node struct. */
static
datadir_iter_t *
datadir_iter_new(const char *path, bool skip_first_level = true)
{
datadir_iter_t *it;
it = static_cast<datadir_iter_t *>(ut_malloc(sizeof(datadir_iter_t)));
memset(it, 0, sizeof(datadir_iter_t));
it->mutex = os_mutex_create();
it->datadir_path = strdup(path);
it->dir = os_file_opendir(it->datadir_path, TRUE);
if (it->dir == NULL) {
goto error;
}
it->err = DB_SUCCESS;
it->dbpath_len = FN_REFLEN;
it->dbpath = static_cast<char*>(ut_malloc(it->dbpath_len));
it->filepath_len = FN_REFLEN;
it->filepath = static_cast<char*>(ut_malloc(it->filepath_len));
it->filepath_rel_len = FN_REFLEN;
it->filepath_rel = static_cast<char*>(ut_malloc(it->filepath_rel_len));
it->skip_first_level = skip_first_level;
return(it);
error:
ut_free(it);
return(NULL);
}
static
bool
datadir_iter_next_database(datadir_iter_t *it)
{
if (it->dbdir != NULL) {
if (os_file_closedir(it->dbdir) != 0) {
msg("Warning: could not"
" close database directory %s\n", it->dbpath);
it->err = DB_ERROR;
}
it->dbdir = NULL;
}
while (fil_file_readdir_next_file(&it->err, it->datadir_path,
it->dir, &it->dbinfo) == 0) {
ulint len;
if ((it->dbinfo.type == OS_FILE_TYPE_FILE
&& it->skip_first_level)
|| it->dbinfo.type == OS_FILE_TYPE_UNKNOWN) {
continue;
}
/* We found a symlink or a directory; try opening it to see
if a symlink is a directory */
len = strlen(it->datadir_path)
+ strlen (it->dbinfo.name) + 2;
if (len > it->dbpath_len) {
it->dbpath_len = len;
if (it->dbpath) {
ut_free(it->dbpath);
}
it->dbpath = static_cast<char*>
(ut_malloc(it->dbpath_len));
}
ut_snprintf(it->dbpath, it->dbpath_len,
"%s/%s", it->datadir_path,
it->dbinfo.name);
srv_normalize_path_for_win(it->dbpath);
if (it->dbinfo.type == OS_FILE_TYPE_FILE) {
it->is_file = true;
return(true);
}
/* We want wrong directory permissions to be a fatal error for
XtraBackup. */
it->dbdir = os_file_opendir(it->dbpath, TRUE);
if (it->dbdir != NULL) {
it->is_file = false;
return(true);
}
}
return(false);
}
/************************************************************************
Concatenate n parts into single path */
static
void
make_path_n(int n, char **path, ulint *path_len, ...)
{
ulint len_needed = n + 1;
char *p;
int i;
va_list vl;
ut_ad(n > 0);
va_start(vl, path_len);
for (i = 0; i < n; i++) {
p = va_arg(vl, char*);
len_needed += strlen(p);
}
va_end(vl);
if (len_needed < *path_len) {
ut_free(*path);
*path = static_cast<char*>(ut_malloc(len_needed));
}
va_start(vl, path_len);
p = va_arg(vl, char*);
strcpy(*path, p);
for (i = 1; i < n; i++) {
size_t plen;
p = va_arg(vl, char*);
plen = strlen(*path);
if (!is_path_separator((*path)[plen - 1])) {
(*path)[plen] = FN_LIBCHAR;
(*path)[plen + 1] = 0;
}
strcat(*path + plen, p);
}
va_end(vl);
}
static
bool
datadir_iter_next_file(datadir_iter_t *it)
{
if (it->is_file && it->dbpath) {
make_path_n(2, &it->filepath, &it->filepath_len,
it->datadir_path, it->dbinfo.name);
make_path_n(1, &it->filepath_rel, &it->filepath_rel_len,
it->dbinfo.name);
it->is_empty_dir = false;
it->is_file = false;
return(true);
}
if (!it->dbpath || !it->dbdir) {
return(false);
}
while (fil_file_readdir_next_file(&it->err, it->dbpath, it->dbdir,
&it->fileinfo) == 0) {
if (it->fileinfo.type == OS_FILE_TYPE_DIR) {
continue;
}
/* We found a symlink or a file */
make_path_n(3, &it->filepath, &it->filepath_len,
it->datadir_path, it->dbinfo.name,
it->fileinfo.name);
make_path_n(2, &it->filepath_rel, &it->filepath_rel_len,
it->dbinfo.name, it->fileinfo.name);
it->is_empty_dir = false;
return(true);
}
return(false);
}
static
bool
datadir_iter_next(datadir_iter_t *it, datadir_node_t *node)
{
bool ret = true;
os_mutex_enter(it->mutex);
if (datadir_iter_next_file(it)) {
datadir_node_fill(node, it);
goto done;
}
while (datadir_iter_next_database(it)) {
if (datadir_iter_next_file(it)) {
datadir_node_fill(node, it);
goto done;
}
make_path_n(2, &it->filepath, &it->filepath_len,
it->datadir_path, it->dbinfo.name);
make_path_n(1, &it->filepath_rel, &it->filepath_rel_len,
it->dbinfo.name);
it->is_empty_dir = true;
datadir_node_fill(node, it);
goto done;
}
/* nothing found */
ret = false;
done:
os_mutex_exit(it->mutex);
return(ret);
}
/************************************************************************
Interface to read MySQL data file sequentially. One should open file
with datafile_open to get cursor and close the cursor with
datafile_close. Cursor can not be shared between multiple
threads. */
static
void
datadir_iter_free(datadir_iter_t *it)
{
os_mutex_free(it->mutex);
if (it->dbdir) {
os_file_closedir(it->dbdir);
}
if (it->dir) {
os_file_closedir(it->dir);
}
ut_free(it->dbpath);
ut_free(it->filepath);
ut_free(it->filepath_rel);
free(it->datadir_path);
ut_free(it);
}
/************************************************************************
Holds the state needed to copy single data file. */
struct datafile_cur_t {
os_file_t file;
char rel_path[FN_REFLEN];
char abs_path[FN_REFLEN];
MY_STAT statinfo;
uint thread_n;
byte* orig_buf;
byte* buf;
ib_int64_t buf_size;
ib_int64_t buf_read;
ib_int64_t buf_offset;
};
static
void
datafile_close(datafile_cur_t *cursor)
{
if (cursor->file != 0) {
os_file_close(cursor->file);
}
ut_free(cursor->buf);
}
static
bool
datafile_open(const char *file, datafile_cur_t *cursor, uint thread_n)
{
ulint success;
memset(cursor, 0, sizeof(datafile_cur_t));
strncpy(cursor->abs_path, file, sizeof(cursor->abs_path));
/* Get the relative path for the destination tablespace name, i.e. the
one that can be appended to the backup root directory. Non-system
tablespaces may have absolute paths for remote tablespaces in MySQL
5.6+. We want to make "local" copies for the backup. */
strncpy(cursor->rel_path,
xb_get_relative_path(cursor->abs_path, FALSE),
sizeof(cursor->rel_path));
cursor->file = os_file_create_simple_no_error_handling(0,
cursor->abs_path,
OS_FILE_OPEN,
OS_FILE_READ_ONLY,
&success);
if (!success) {
/* The following call prints an error message */
os_file_get_last_error(TRUE);
msg("[%02u] error: cannot open "
"file %s\n",
thread_n, cursor->abs_path);
return(false);
}
if (my_fstat(cursor->file, &cursor->statinfo, MYF(MY_WME))) {
msg("[%02u] error: cannot stat %s\n",
thread_n, cursor->abs_path);
datafile_close(cursor);
return(false);
}
posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
cursor->buf_size = 10 * 1024 * 1024;
cursor->buf = static_cast<byte *>(ut_malloc(cursor->buf_size));
return(true);
}
static
xb_fil_cur_result_t
datafile_read(datafile_cur_t *cursor)
{
ulint success;
ulint to_read;
xtrabackup_io_throttling();
to_read = min(cursor->statinfo.st_size - cursor->buf_offset,
cursor->buf_size);
if (to_read == 0) {
return(XB_FIL_CUR_EOF);
}
success = os_file_read(cursor->file, cursor->buf, cursor->buf_offset,
to_read);
if (!success) {
return(XB_FIL_CUR_ERROR);
}
posix_fadvise(cursor->file, cursor->buf_offset, to_read,
POSIX_FADV_DONTNEED);
cursor->buf_read = to_read;
cursor->buf_offset += to_read;
return(XB_FIL_CUR_SUCCESS);
}
/************************************************************************
Check to see if a file exists.
Takes name of the file to check.
@return true if file exists. */
static
bool
file_exists(const char *filename)
{
MY_STAT stat_arg;
if (!my_stat(filename, &stat_arg, MYF(0))) {
return(false);
}
return(true);
}
/************************************************************************
Trim leading slashes from absolute path so it becomes relative */
static
const char *
trim_dotslash(const char *path)
{
while (*path) {
if (is_path_separator(*path)) {
++path;
continue;
}
if (*path == '.' && is_path_separator(path[1])) {
path += 2;
continue;
}
break;
}
return(path);
}
/************************************************************************
Check if string ends with given suffix.
@return true if string ends with given suffix. */
static
bool
ends_with(const char *str, const char *suffix)
{
size_t suffix_len = strlen(suffix);
size_t str_len = strlen(str);
return(str_len >= suffix_len
&& strcmp(str + str_len - suffix_len, suffix) == 0);
}
/************************************************************************
Create directories recursively.
@return 0 if directories created successfully. */
static
int
mkdirp(const char *pathname, int Flags, myf MyFlags)
{
char parent[PATH_MAX], *p;
/* make a parent directory path */
strncpy(parent, pathname, sizeof(parent));
parent[sizeof(parent) - 1] = 0;
for (p = parent + strlen(parent);
!is_path_separator(*p) && p != parent; p--);
*p = 0;
/* try to make parent directory */
if (p != parent && mkdirp(parent, Flags, MyFlags) != 0) {
return(-1);
}
/* make this one if parent has been made */
if (my_mkdir(pathname, Flags, MyFlags) == 0) {
return(0);
}
/* if it already exists that is fine */
if (errno == EEXIST) {
return(0);
}
return(-1);
}
/************************************************************************
Return true if first and second arguments are the same path. */
bool
equal_paths(const char *first, const char *second)
{
char real_first[PATH_MAX];
char real_second[PATH_MAX];
if (realpath(first, real_first) == NULL) {
return false;
}
if (realpath(second, real_second) == NULL) {
return false;
}
return (strcmp(real_first, real_second) == 0);
}
/************************************************************************
Check if directory exists. Optionally create directory if doesn't
exist.
@return true if directory exists and if it was created successfully. */
bool
directory_exists(const char *dir, bool create)
{
os_file_dir_t os_dir;
MY_STAT stat_arg;
char errbuf[MYSYS_STRERROR_SIZE];
if (my_stat(dir, &stat_arg, MYF(0)) == NULL) {
if (!create) {
return(false);
}
if (mkdirp(dir, 0777, MYF(0)) < 0) {
msg("Can not create directory %s: %s\n", dir,
my_strerror(errbuf, sizeof(errbuf), my_errno));
return(false);
}
}
/* could be symlink */
os_dir = os_file_opendir(dir, FALSE);
if (os_dir == NULL) {
msg("Can not open directory %s: %s\n", dir,
my_strerror(errbuf, sizeof(errbuf), my_errno));
return(false);
}
os_file_closedir(os_dir);
return(true);
}
/************************************************************************
Check that directory exists and it is empty. */
static
bool
directory_exists_and_empty(const char *dir, const char *comment)
{
os_file_dir_t os_dir;
dberr_t err;
os_file_stat_t info;
bool empty;
if (!directory_exists(dir, true)) {
return(false);
}
os_dir = os_file_opendir(dir, FALSE);
if (os_dir == NULL) {
msg("%s can not open directory %s\n", comment, dir);
return(false);
}
empty = (fil_file_readdir_next_file(&err, dir, os_dir, &info) != 0);
os_file_closedir(os_dir);
if (!empty) {
msg("%s directory %s is not empty!\n", comment, dir);
}
return(empty);
}
/************************************************************************
Check if file name ends with given set of suffixes.
@return true if it does. */
static
bool
filename_matches(const char *filename, const char **ext_list)
{
const char **ext;
for (ext = ext_list; *ext; ext++) {
if (ends_with(filename, *ext)) {
return(true);
}
}
return(false);
}
/************************************************************************
Copy data file for backup. Also check if it is allowed to copy by
comparing its name to the list of known data file types and checking
if passes the rules for partial backup.
@return true if file backed up or skipped successfully. */
static
bool
datafile_copy_backup(const char *filepath, uint thread_n)
{
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
NULL};
/* Get the name and the path for the tablespace. node->name always
contains the path (which may be absolute for remote tablespaces in
5.6+). space->name contains the tablespace name in the form
"./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
multi-node shared tablespace, space->name contains the name of the first
node, but that's irrelevant, since we only need node_name to match them
against filters, and the shared tablespace is always copied regardless
of the filters value. */
if (check_if_skip_table(filepath)) {
msg_ts("[%02u] Skipping %s.\n", thread_n, filepath);
return(true);
}
if (filename_matches(filepath, ext_list)) {
return copy_file(ds_data, filepath, filepath, thread_n);
}
return(true);
}
/************************************************************************
Same as datafile_copy_backup, but put file name into the list for
rsync command. */
static
bool
datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f)
{
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
NULL};
/* Get the name and the path for the tablespace. node->name always
contains the path (which may be absolute for remote tablespaces in
5.6+). space->name contains the tablespace name in the form
"./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
multi-node shared tablespace, space->name contains the name of the first
node, but that's irrelevant, since we only need node_name to match them
against filters, and the shared tablespace is always copied regardless
of the filters value. */
if (check_if_skip_table(filepath)) {
return(true);
}
if (filename_matches(filepath, ext_list)) {
fprintf(f, "%s\n", filepath);
if (save_to_list) {
rsync_list.insert(filepath);
}
}
return(true);
}
static
bool
backup_file_vprintf(const char *filename, const char *fmt, va_list ap)
{
ds_file_t *dstfile = NULL;
MY_STAT stat; /* unused for now */
char *buf = 0;
int buf_len;
const char *action;
memset(&stat, 0, sizeof(stat));
buf_len = vasprintf(&buf, fmt, ap);
stat.st_size = buf_len;
stat.st_mtime = my_time(0);
dstfile = ds_open(ds_data, filename, &stat);
if (dstfile == NULL) {
msg("[%02u] error: "
"cannot open the destination stream for %s\n",
0, filename);
goto error;
}
action = xb_get_copy_action("Writing");
msg_ts("[%02u] %s %s\n", 0, action, filename);
if (buf_len == -1) {
goto error;
}
if (ds_write(dstfile, buf, buf_len)) {
goto error;
}
/* close */
msg_ts("[%02u] ...done\n", 0);
free(buf);
if (ds_close(dstfile)) {
goto error_close;
}
return(true);
error:
free(buf);
if (dstfile != NULL) {
ds_close(dstfile);
}
error_close:
msg("[%02u] Error: backup file failed.\n", 0);
return(false); /*ERROR*/
}
bool
backup_file_printf(const char *filename, const char *fmt, ...)
{
bool result;
va_list ap;
va_start(ap, fmt);
result = backup_file_vprintf(filename, fmt, ap);
va_end(ap);
return(result);
}
static
bool
run_data_threads(datadir_iter_t *it, os_thread_func_t func, uint n)
{
datadir_thread_ctxt_t *data_threads;
uint i, count;
os_ib_mutex_t count_mutex;
bool ret;
data_threads = (datadir_thread_ctxt_t*)
(ut_malloc(sizeof(datadir_thread_ctxt_t) * n));
count_mutex = os_mutex_create();
count = n;
for (i = 0; i < n; i++) {
data_threads[i].it = it;
data_threads[i].n_thread = i + 1;
data_threads[i].count = &count;
data_threads[i].count_mutex = count_mutex;
os_thread_create(func, data_threads + i, &data_threads[i].id);
}
/* Wait for threads to exit */
while (1) {
os_thread_sleep(100000);
os_mutex_enter(count_mutex);
if (count == 0) {
os_mutex_exit(count_mutex);
break;
}
os_mutex_exit(count_mutex);
}
os_mutex_free(count_mutex);
ret = true;
for (i = 0; i < n; i++) {
ret = data_threads[i].ret && ret;
if (!data_threads[i].ret) {
msg("Error: thread %u failed.\n", i);
}
}
ut_free(data_threads);
return(ret);
}
/************************************************************************
Copy file for backup/restore.
@return true in case of success. */
bool
copy_file(ds_ctxt_t *datasink,
const char *src_file_path,
const char *dst_file_path,
uint thread_n)
{
char dst_name[FN_REFLEN];
ds_file_t *dstfile = NULL;
datafile_cur_t cursor;
xb_fil_cur_result_t res;
const char *action;
if (!datafile_open(src_file_path, &cursor, thread_n)) {
goto error;
}
strncpy(dst_name, cursor.rel_path, sizeof(dst_name));
dstfile = ds_open(datasink, trim_dotslash(dst_file_path),
&cursor.statinfo);
if (dstfile == NULL) {
msg("[%02u] error: "
"cannot open the destination stream for %s\n",
thread_n, dst_name);
goto error;
}
action = xb_get_copy_action();
msg_ts("[%02u] %s %s to %s\n",
thread_n, action, src_file_path, dstfile->path);
/* The main copy loop */
while ((res = datafile_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
if (ds_write(dstfile, cursor.buf, cursor.buf_read)) {
goto error;
}
}
if (res == XB_FIL_CUR_ERROR) {
goto error;
}
/* close */
msg_ts("[%02u] ...done\n", thread_n);
datafile_close(&cursor);
if (ds_close(dstfile)) {
goto error_close;
}
return(true);
error:
datafile_close(&cursor);
if (dstfile != NULL) {
ds_close(dstfile);
}
error_close:
msg("[%02u] Error: copy_file() failed.\n", thread_n);
return(false); /*ERROR*/
}
/************************************************************************
Try to move file by renaming it. If source and destination are on
different devices fall back to copy and unlink.
@return true in case of success. */
static
bool
move_file(ds_ctxt_t *datasink,
const char *src_file_path,
const char *dst_file_path,
const char *dst_dir, uint thread_n)
{
char errbuf[MYSYS_STRERROR_SIZE];
char dst_file_path_abs[FN_REFLEN];
char dst_dir_abs[FN_REFLEN];
size_t dirname_length;
ut_snprintf(dst_file_path_abs, sizeof(dst_file_path_abs),
"%s/%s", dst_dir, dst_file_path);
dirname_part(dst_dir_abs, dst_file_path_abs, &dirname_length);
if (!directory_exists(dst_dir_abs, true)) {
return(false);
}
if (file_exists(dst_file_path_abs)) {
msg("Error: Move file %s to %s failed: Destination "
"file exists\n",
src_file_path, dst_file_path_abs);
return(false);
}
msg_ts("[%02u] Moving %s to %s\n",
thread_n, src_file_path, dst_file_path_abs);
if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) {
if (my_errno == EXDEV) {
bool ret;
ret = copy_file(datasink, src_file_path,
dst_file_path, thread_n);
msg_ts("[%02u] Removing %s\n", thread_n, src_file_path);
if (unlink(src_file_path) != 0) {
msg("Error: unlink %s failed: %s\n",
src_file_path,
my_strerror(errbuf,
sizeof(errbuf), errno));
}
return(ret);
}
msg("Can not move file %s to %s: %s\n",
src_file_path, dst_file_path_abs,
my_strerror(errbuf, sizeof(errbuf), my_errno));
return(false);
}
msg_ts("[%02u] ...done\n", thread_n);
return(true);
}
/************************************************************************
Read link from .isl file if any and store it in the global map associated
with given tablespace. */
static
void
read_link_file(const char *ibd_filepath, const char *link_filepath)
{
char *filepath= NULL;
FILE *file = fopen(link_filepath, "r+b");
if (file) {
filepath = static_cast<char*>(malloc(OS_FILE_MAX_PATH));
os_file_read_string(file, filepath, OS_FILE_MAX_PATH);
fclose(file);
if (strlen(filepath)) {
/* Trim whitespace from end of filepath */
ulint lastch = strlen(filepath) - 1;
while (lastch > 4 && filepath[lastch] <= 0x20) {
filepath[lastch--] = 0x00;
}
srv_normalize_path_for_win(filepath);
}
tablespace_locations[ibd_filepath] = filepath;
}
free(filepath);
}
/************************************************************************
Return the location of given .ibd if it was previously read
from .isl file.
@return NULL or destination .ibd file path. */
static
const char *
tablespace_filepath(const char *ibd_filepath)
{
std::map<std::string, std::string>::iterator it;
it = tablespace_locations.find(ibd_filepath);
if (it != tablespace_locations.end()) {
return it->second.c_str();
}
return NULL;
}
/************************************************************************
Copy or move file depending on current mode.
@return true in case of success. */
static
bool
copy_or_move_file(const char *src_file_path,
const char *dst_file_path,
const char *dst_dir,
uint thread_n)
{
ds_ctxt_t *datasink = ds_data; /* copy to datadir by default */
char filedir[FN_REFLEN];
size_t filedir_len;
bool ret;
/* read the link from .isl file */
if (ends_with(src_file_path, ".isl")) {
char *ibd_filepath;
ibd_filepath = strdup(src_file_path);
strcpy(ibd_filepath + strlen(ibd_filepath) - 3, "ibd");
read_link_file(ibd_filepath, src_file_path);
free(ibd_filepath);
}
/* check if there is .isl file */
if (ends_with(src_file_path, ".ibd")) {
char *link_filepath;
const char *filepath;
link_filepath = strdup(src_file_path);
strcpy(link_filepath + strlen(link_filepath) - 3, "isl");
read_link_file(src_file_path, link_filepath);
filepath = tablespace_filepath(src_file_path);
if (filepath != NULL) {
dirname_part(filedir, filepath, &filedir_len);
dst_file_path = filepath + filedir_len;
dst_dir = filedir;
if (!directory_exists(dst_dir, true)) {
ret = false;
goto cleanup;
}
datasink = ds_create(dst_dir, DS_TYPE_LOCAL);
}
free(link_filepath);
}
ret = (xtrabackup_copy_back ?
copy_file(datasink, src_file_path, dst_file_path, thread_n) :
move_file(datasink, src_file_path, dst_file_path,
dst_dir, thread_n));
cleanup:
if (datasink != ds_data) {
ds_destroy(datasink);
}
return(ret);
}
bool
backup_files(const char *from, bool prep_mode)
{
char rsync_tmpfile_name[FN_REFLEN];
FILE *rsync_tmpfile = NULL;
datadir_iter_t *it;
datadir_node_t node;
bool ret = true;
if (prep_mode && !opt_rsync) {
return(true);
}
if (opt_rsync) {
snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
"%s/%s%d", opt_mysql_tmpdir,
"xtrabackup_rsyncfiles_pass",
prep_mode ? 1 : 2);
rsync_tmpfile = fopen(rsync_tmpfile_name, "w");
if (rsync_tmpfile == NULL) {
msg("Error: can't create file %s\n",
rsync_tmpfile_name);
return(false);
}
}
msg_ts("Starting %s non-InnoDB tables and files\n",
prep_mode ? "prep copy of" : "to backup");
datadir_node_init(&node);
it = datadir_iter_new(from);
while (datadir_iter_next(it, &node)) {
if (!node.is_empty_dir) {
if (opt_rsync) {
ret = datafile_rsync_backup(node.filepath,
!prep_mode, rsync_tmpfile);
} else {
ret = datafile_copy_backup(node.filepath, 1);
}
if (!ret) {
msg("Failed to copy file %s\n", node.filepath);
goto out;
}
} else if (!prep_mode) {
/* backup fake file into empty directory */
char path[FN_REFLEN];
ut_snprintf(path, sizeof(path),
"%s/db.opt", node.filepath);
if (!(ret = backup_file_printf(
trim_dotslash(path), "%s", ""))) {
msg("Failed to create file %s\n", path);
goto out;
}
}
}
if (opt_rsync) {
std::stringstream cmd;
int err;
if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
fprintf(rsync_tmpfile, "%s\n", buffer_pool_filename);
rsync_list.insert(buffer_pool_filename);
}
if (file_exists("ib_lru_dump")) {
fprintf(rsync_tmpfile, "%s\n", "ib_lru_dump");
rsync_list.insert("ib_lru_dump");
}
fclose(rsync_tmpfile);
rsync_tmpfile = NULL;
cmd << "rsync -t . --files-from=" << rsync_tmpfile_name
<< " " << xtrabackup_target_dir;
msg_ts("Starting rsync as: %s\n", cmd.str().c_str());
if ((err = system(cmd.str().c_str()) && !prep_mode) != 0) {
msg_ts("Error: rsync failed with error code %d\n", err);
ret = false;
goto out;
}
msg_ts("rsync finished successfully.\n");
if (!prep_mode && !opt_no_lock) {
char path[FN_REFLEN];
char dst_path[FN_REFLEN];
char *newline;
/* Remove files that have been removed between first and
second passes. Cannot use "rsync --delete" because it
does not work with --files-from. */
snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
"%s/%s", opt_mysql_tmpdir,
"xtrabackup_rsyncfiles_pass1");
rsync_tmpfile = fopen(rsync_tmpfile_name, "r");
if (rsync_tmpfile == NULL) {
msg("Error: can't open file %s\n",
rsync_tmpfile_name);
return(false);
}
while (fgets(path, sizeof(path), rsync_tmpfile)) {
newline = strchr(path, '\n');
if (newline) {
*newline = 0;
}
if (rsync_list.count(path) < 1) {
snprintf(dst_path, sizeof(dst_path),
"%s/%s", xtrabackup_target_dir,
path);
msg_ts("Removing %s\n", dst_path);
unlink(dst_path);
}
}
fclose(rsync_tmpfile);
rsync_tmpfile = NULL;
}
}
msg_ts("Finished %s non-InnoDB tables and files\n",
prep_mode ? "a prep copy of" : "backing up");
out:
datadir_iter_free(it);
datadir_node_free(&node);
if (rsync_tmpfile != NULL) {
fclose(rsync_tmpfile);
}
return(ret);
}
bool
backup_start()
{
if (!opt_no_lock) {
if (opt_safe_slave_backup) {
if (!wait_for_safe_slave(mysql_connection)) {
return(false);
}
}
if (!backup_files(fil_path_to_mysql_datadir, true)) {
return(false);
}
history_lock_time = time(NULL);
if (!lock_tables(mysql_connection)) {
return(false);
}
}
if (!backup_files(fil_path_to_mysql_datadir, false)) {
return(false);
}
// There is no need to stop slave thread before coping non-Innodb data when
// --no-lock option is used because --no-lock option requires that no DDL or
// DML to non-transaction tables can occur.
if (opt_no_lock) {
if (opt_safe_slave_backup) {
if (!wait_for_safe_slave(mysql_connection)) {
return(false);
}
}
}
if (opt_slave_info) {
lock_binlog_maybe(mysql_connection);
if (!write_slave_info(mysql_connection)) {
return(false);
}
}
/* The only reason why Galera/binlog info is written before
wait_for_ibbackup_log_copy_finish() is that after that call the xtrabackup
binary will start streamig a temporary copy of REDO log to stdout and
thus, any streaming from innobackupex would interfere. The only way to
avoid that is to have a single process, i.e. merge innobackupex and
xtrabackup. */
if (opt_galera_info) {
if (!write_galera_info(mysql_connection)) {
return(false);
}
write_current_binlog_file(mysql_connection);
}
if (opt_binlog_info == BINLOG_INFO_ON) {
lock_binlog_maybe(mysql_connection);
write_binlog_info(mysql_connection);
}
if (have_flush_engine_logs) {
msg_ts("Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...\n");
xb_mysql_query(mysql_connection,
"FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false);
}
return(true);
}
bool
backup_finish()
{
/* release all locks */
if (!opt_no_lock) {
unlock_all(mysql_connection);
history_lock_time = 0;
} else {
history_lock_time = time(NULL) - history_lock_time;
}
if (opt_safe_slave_backup && sql_thread_started) {
msg("Starting slave SQL thread\n");
xb_mysql_query(mysql_connection,
"START SLAVE SQL_THREAD", false);
}
/* Copy buffer pool dump or LRU dump */
if (!opt_rsync) {
if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
const char *dst_name;
dst_name = trim_dotslash(buffer_pool_filename);
copy_file(ds_data, buffer_pool_filename, dst_name, 0);
}
if (file_exists("ib_lru_dump")) {
copy_file(ds_data, "ib_lru_dump", "ib_lru_dump", 0);
}
}
msg_ts("Backup created in directory '%s'\n", xtrabackup_target_dir);
if (mysql_binlog_position != NULL) {
msg("MySQL binlog position: %s\n", mysql_binlog_position);
}
if (mysql_slave_position && opt_slave_info) {
msg("MySQL slave binlog position: %s\n",
mysql_slave_position);
}
if (!write_backup_config_file()) {
return(false);
}
if (!write_xtrabackup_info(mysql_connection)) {
return(false);
}
return(true);
}
bool
ibx_copy_incremental_over_full()
{
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
"MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
NULL};
const char *sup_files[] = {"xtrabackup_binlog_info",
"xtrabackup_galera_info",
"xtrabackup_slave_info",
"xtrabackup_info",
"ib_lru_dump",
NULL};
datadir_iter_t *it = NULL;
datadir_node_t node;
bool ret = true;
char path[FN_REFLEN];
int i;
datadir_node_init(&node);
/* If we were applying an incremental change set, we need to make
sure non-InnoDB files and xtrabackup_* metainfo files are copied
to the full backup directory. */
if (xtrabackup_incremental) {
ds_data = ds_create(xtrabackup_target_dir, DS_TYPE_LOCAL);
it = datadir_iter_new(xtrabackup_incremental_dir);
while (datadir_iter_next(it, &node)) {
/* copy only non-innodb files */
if (node.is_empty_dir
|| !filename_matches(node.filepath, ext_list)) {
continue;
}
if (file_exists(node.filepath_rel)) {
unlink(node.filepath_rel);
}
if (!(ret = copy_file(ds_data, node.filepath,
node.filepath_rel, 1))) {
msg("Failed to copy file %s\n",
node.filepath);
goto cleanup;
}
}
/* copy buffer pool dump */
if (innobase_buffer_pool_filename) {
const char *src_name;
src_name = trim_dotslash(innobase_buffer_pool_filename);
snprintf(path, sizeof(path), "%s/%s",
xtrabackup_incremental_dir,
src_name);
if (file_exists(path)) {
copy_file(ds_data, path,
innobase_buffer_pool_filename, 0);
}
}
/* copy supplementary files */
for (i = 0; sup_files[i]; i++) {
snprintf(path, sizeof(path), "%s/%s",
xtrabackup_incremental_dir,
sup_files[i]);
if (file_exists(path))
{
if (file_exists(sup_files[i])) {
unlink(sup_files[i]);
}
copy_file(ds_data, path, sup_files[i], 0);
}
}
}
cleanup:
if (it != NULL) {
datadir_iter_free(it);
}
if (ds_data != NULL) {
ds_destroy(ds_data);
}
datadir_node_free(&node);
return(ret);
}
bool
ibx_cleanup_full_backup()
{
const char *ext_list[] = {"delta", "meta", "ibd", NULL};
datadir_iter_t *it = NULL;
datadir_node_t node;
bool ret = true;
datadir_node_init(&node);
/* If we are applying an incremental change set, we need to make
sure non-InnoDB files are cleaned up from full backup dir before
we copy files from incremental dir. */
it = datadir_iter_new(xtrabackup_target_dir);
while (datadir_iter_next(it, &node)) {
if (node.is_empty_dir) {
rmdir(node.filepath);
}
if (xtrabackup_incremental && !node.is_empty_dir
&& !filename_matches(node.filepath, ext_list)) {
unlink(node.filepath);
}
}
datadir_iter_free(it);
datadir_node_free(&node);
return(ret);
}
bool
apply_log_finish()
{
if (!ibx_cleanup_full_backup()
|| !ibx_copy_incremental_over_full()) {
return(false);
}
return(true);
}
bool
copy_back()
{
char *innobase_data_file_path_copy;
ulint i;
bool ret;
datadir_iter_t *it = NULL;
datadir_node_t node;
char *dst_dir;
memset(&node, 0, sizeof(node));
if (!opt_force_non_empty_dirs) {
if (!directory_exists_and_empty(mysql_data_home,
"Original data")) {
return(false);
}
} else {
if (!directory_exists(mysql_data_home, true)) {
return(false);
}
}
if (srv_undo_dir && *srv_undo_dir
&& !directory_exists(srv_undo_dir, true)) {
return(false);
}
if (innobase_data_home_dir && *innobase_data_home_dir
&& !directory_exists(innobase_data_home_dir, true)) {
return(false);
}
if (srv_log_group_home_dir && *srv_log_group_home_dir
&& !directory_exists(srv_log_group_home_dir, true)) {
return(false);
}
/* cd to backup directory */
if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
{
msg("cannot my_setwd %s\n", xtrabackup_target_dir);
return(false);
}
/* parse data file path */
if (!innobase_data_file_path) {
innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
}
innobase_data_file_path_copy = strdup(innobase_data_file_path);
if (!(ret = srv_parse_data_file_paths_and_sizes(
innobase_data_file_path_copy))) {
msg("syntax error in innodb_data_file_path\n");
return(false);
}
srv_max_n_threads = 1000;
os_sync_mutex = NULL;
ut_mem_init();
/* temporally dummy value to avoid crash */
srv_page_size_shift = 14;
srv_page_size = (1 << srv_page_size_shift);
os_sync_init();
sync_init();
os_io_init_simple();
mem_init(srv_mem_pool_size);
ut_crc32_init();
/* copy undo tablespaces */
if (srv_undo_tablespaces > 0) {
dst_dir = (srv_undo_dir && *srv_undo_dir)
? srv_undo_dir : mysql_data_home;
ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
for (i = 1; i <= srv_undo_tablespaces; i++) {
char filename[20];
sprintf(filename, "undo%03lu", i);
if (!(ret = copy_or_move_file(filename, filename,
dst_dir, 1))) {
goto cleanup;
}
}
ds_destroy(ds_data);
ds_data = NULL;
}
/* copy redo logs */
dst_dir = (srv_log_group_home_dir && *srv_log_group_home_dir)
? srv_log_group_home_dir : mysql_data_home;
ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
for (i = 0; i < (ulong)innobase_log_files_in_group; i++) {
char filename[20];
sprintf(filename, "ib_logfile%lu", i);
if (!file_exists(filename)) {
continue;
}
if (!(ret = copy_or_move_file(filename, filename,
dst_dir, 1))) {
goto cleanup;
}
}
ds_destroy(ds_data);
ds_data = NULL;
/* copy innodb system tablespace(s) */
dst_dir = (innobase_data_home_dir && *innobase_data_home_dir)
? innobase_data_home_dir : mysql_data_home;
ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
for (i = 0; i < srv_n_data_files; i++) {
const char *filename = base_name(srv_data_file_names[i]);
if (!(ret = copy_or_move_file(filename, srv_data_file_names[i],
dst_dir, 1))) {
goto cleanup;
}
}
ds_destroy(ds_data);
ds_data = NULL;
/* copy the rest of tablespaces */
ds_data = ds_create(mysql_data_home, DS_TYPE_LOCAL);
it = datadir_iter_new(".", false);
datadir_node_init(&node);
while (datadir_iter_next(it, &node)) {
const char *ext_list[] = {"backup-my.cnf", "xtrabackup_logfile",
"xtrabackup_binary", "xtrabackup_binlog_info",
"xtrabackup_checkpoints", ".qp", ".pmap", ".tmp",
".xbcrypt", NULL};
const char *filename;
char c_tmp;
int i_tmp;
bool is_ibdata_file;
/* create empty directories */
if (node.is_empty_dir) {
char path[FN_REFLEN];
snprintf(path, sizeof(path), "%s/%s",
mysql_data_home, node.filepath_rel);
msg_ts("[%02u] Creating directory %s\n", 1, path);
if (mkdirp(path, 0777, MYF(0)) < 0) {
char errbuf[MYSYS_STRERROR_SIZE];
msg("Can not create directory %s: %s\n",
path, my_strerror(errbuf,
sizeof(errbuf), my_errno));
ret = false;
goto cleanup;
}
msg_ts("[%02u] ...done.", 1);
continue;
}
filename = base_name(node.filepath);
/* skip .qp and .xbcrypt files */
if (filename_matches(filename, ext_list)) {
continue;
}
/* skip undo tablespaces */
if (sscanf(filename, "undo%d%c", &i_tmp, &c_tmp) == 1) {
continue;
}
/* skip redo logs */
if (sscanf(filename, "ib_logfile%d%c", &i_tmp, &c_tmp) == 1) {
continue;
}
/* skip innodb data files */
is_ibdata_file = false;
for (i = 0; i < srv_n_data_files; i++) {
const char *ibfile;
ibfile = base_name(srv_data_file_names[i]);
if (strcmp(ibfile, filename) == 0) {
is_ibdata_file = true;
continue;
}
}
if (is_ibdata_file) {
continue;
}
if (!(ret = copy_or_move_file(node.filepath, node.filepath_rel,
mysql_data_home, 1))) {
goto cleanup;
}
}
/* copy buufer pool dump */
if (innobase_buffer_pool_filename) {
const char *src_name;
char path[FN_REFLEN];
src_name = trim_dotslash(innobase_buffer_pool_filename);
snprintf(path, sizeof(path), "%s/%s",
mysql_data_home,
src_name);
/* could be already copied with other files
from data directory */
if (file_exists(src_name) &&
!file_exists(innobase_buffer_pool_filename)) {
copy_or_move_file(src_name,
innobase_buffer_pool_filename,
mysql_data_home, 0);
}
}
cleanup:
if (it != NULL) {
datadir_iter_free(it);
}
datadir_node_free(&node);
free(innobase_data_file_path_copy);
if (ds_data != NULL) {
ds_destroy(ds_data);
}
ds_data = NULL;
sync_close();
sync_initialized = FALSE;
os_sync_free();
mem_close();
os_sync_mutex = NULL;
ut_free_all_mem();
return(ret);
}
bool
decrypt_decompress_file(const char *filepath, uint thread_n)
{
std::stringstream cmd, message;
char *dest_filepath = strdup(filepath);
bool needs_action = false;
cmd << "cat " << filepath;
if (ends_with(filepath, ".xbcrypt") && opt_decrypt) {
cmd << " | xbcrypt --decrypt --encrypt-algo="
<< xtrabackup_encrypt_algo_names[opt_decrypt_algo];
if (xtrabackup_encrypt_key) {
cmd << " --encrypt-key=" << xtrabackup_encrypt_key;
} else {
cmd << " --encrypt-key-file="
<< xtrabackup_encrypt_key_file;
}
dest_filepath[strlen(dest_filepath) - 8] = 0;
message << "decrypting";
needs_action = true;
}
if (opt_decompress
&& (ends_with(filepath, ".qp")
|| (ends_with(filepath, ".qp.xbcrypt")
&& opt_decrypt))) {
cmd << " | qpress -dio ";
dest_filepath[strlen(dest_filepath) - 3] = 0;
if (needs_action) {
message << " and ";
}
message << "decompressing";
needs_action = true;
}
cmd << " > " << dest_filepath;
message << " " << filepath;
free(dest_filepath);
if (needs_action) {
msg_ts("[%02u] %s\n", thread_n, message.str().c_str());
if (system(cmd.str().c_str()) != 0) {
return(false);
}
if (opt_remove_original) {
msg_ts("[%02u] removing %s\n", thread_n, filepath);
if (my_delete(filepath, MYF(MY_WME)) != 0) {
return(false);
}
}
}
return(true);
}
static
os_thread_ret_t
decrypt_decompress_thread_func(void *arg)
{
bool ret = true;
datadir_node_t node;
datadir_thread_ctxt_t *ctxt = (datadir_thread_ctxt_t *)(arg);
datadir_node_init(&node);
while (datadir_iter_next(ctxt->it, &node)) {
/* skip empty directories in backup */
if (node.is_empty_dir) {
continue;
}
if (!ends_with(node.filepath, ".qp")
&& !ends_with(node.filepath, ".xbcrypt")) {
continue;
}
if (!(ret = decrypt_decompress_file(node.filepath,
ctxt->n_thread))) {
goto cleanup;
}
}
cleanup:
datadir_node_free(&node);
os_mutex_enter(ctxt->count_mutex);
--(*ctxt->count);
os_mutex_exit(ctxt->count_mutex);
ctxt->ret = ret;
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
bool
decrypt_decompress()
{
bool ret;
datadir_iter_t *it = NULL;
srv_max_n_threads = 1000;
os_sync_mutex = NULL;
ut_mem_init();
os_sync_init();
sync_init();
/* cd to backup directory */
if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
{
msg("cannot my_setwd %s\n", xtrabackup_target_dir);
return(false);
}
/* copy the rest of tablespaces */
ds_data = ds_create(".", DS_TYPE_LOCAL);
it = datadir_iter_new(".", false);
ut_a(xtrabackup_parallel >= 0);
ret = run_data_threads(it, decrypt_decompress_thread_func,
xtrabackup_parallel ? xtrabackup_parallel : 1);
if (it != NULL) {
datadir_iter_free(it);
}
if (ds_data != NULL) {
ds_destroy(ds_data);
}
ds_data = NULL;
sync_close();
sync_initialized = FALSE;
os_sync_free();
os_sync_mutex = NULL;
ut_free_all_mem();
return(ret);
}
void
version_check()
{
if (opt_password != NULL) {
setenv("option_mysql_password", opt_password, 1);
}
if (opt_user != NULL) {
setenv("option_mysql_user", opt_user, 1);
}
if (opt_host != NULL) {
setenv("option_mysql_host", opt_host, 1);
}
if (opt_socket != NULL) {
setenv("option_mysql_socket", opt_socket, 1);
}
if (opt_port != 0) {
char port[20];
snprintf(port, sizeof(port), "%u", opt_port);
setenv("option_mysql_port", port, 1);
}
FILE *pipe = popen("perl", "w");
if (pipe == NULL) {
return;
}
fputs((const char *)version_check_pl, pipe);
pclose(pipe);
}
#ifndef XTRABACKUP_BACKUP_COPY_H
#define XTRABACKUP_BACKUP_COPY_H
#include <my_global.h>
#include "datasink.h"
/* special files */
#define XTRABACKUP_SLAVE_INFO "xtrabackup_slave_info"
#define XTRABACKUP_GALERA_INFO "xtrabackup_galera_info"
#define XTRABACKUP_BINLOG_INFO "xtrabackup_binlog_info"
#define XTRABACKUP_INFO "xtrabackup_info"
extern bool binlog_locked;
bool
backup_file_printf(const char *filename, const char *fmt, ...)
ATTRIBUTE_FORMAT(printf, 2, 0);
/************************************************************************
Return true if first and second arguments are the same path. */
bool
equal_paths(const char *first, const char *second);
/************************************************************************
Copy file for backup/restore.
@return true in case of success. */
bool
copy_file(ds_ctxt_t *datasink,
const char *src_file_path,
const char *dst_file_path,
uint thread_n);
bool
backup_start();
bool
backup_finish();
bool
apply_log_finish();
bool
copy_back();
bool
decrypt_decompress();
void
version_check();
bool
is_path_separator(char);
bool
directory_exists(const char *dir, bool create);
#endif
/******************************************************
hot backup tool for InnoDB
(c) 2009-2015 Percona LLC and/or its affiliates
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************
This file incorporates work covered by the following copyright and
permission notice:
Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*******************************************************/
#include <my_global.h>
#include <mysql.h>
#include <mysqld.h>
#include <my_sys.h>
#include <string.h>
#include <limits>
#include "common.h"
#include "xtrabackup.h"
#include "xtrabackup_version.h"
#include "backup_copy.h"
#include "backup_mysql.h"
#include "mysqld.h"
char *tool_name;
char tool_args[2048];
/* mysql flavor and version */
mysql_flavor_t server_flavor = FLAVOR_UNKNOWN;
unsigned long mysql_server_version = 0;
/* server capabilities */
bool have_changed_page_bitmaps = false;
bool have_backup_locks = false;
bool have_backup_safe_binlog_info = false;
bool have_lock_wait_timeout = false;
bool have_galera_enabled = false;
bool have_flush_engine_logs = false;
bool have_multi_threaded_slave = false;
bool have_gtid_slave = false;
/* Kill long selects */
os_thread_id_t kill_query_thread_id;
os_event_t kill_query_thread_started;
os_event_t kill_query_thread_stopped;
os_event_t kill_query_thread_stop;
bool sql_thread_started = false;
char *mysql_slave_position = NULL;
char *mysql_binlog_position = NULL;
char *buffer_pool_filename = NULL;
/* History on server */
time_t history_start_time;
time_t history_end_time;
time_t history_lock_time;
MYSQL *mysql_connection;
extern "C" {
MYSQL * STDCALL
cli_mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
const char *passwd, const char *db,
uint port, const char *unix_socket,ulong client_flag);
}
#define mysql_real_connect cli_mysql_real_connect
MYSQL *
xb_mysql_connect()
{
MYSQL *connection = mysql_init(NULL);
char mysql_port_str[std::numeric_limits<int>::digits10 + 3];
sprintf(mysql_port_str, "%d", opt_port);
if (connection == NULL) {
msg("Failed to init MySQL struct: %s.\n",
mysql_error(connection));
return(NULL);
}
if (!opt_secure_auth) {
mysql_options(connection, MYSQL_SECURE_AUTH,
(char *) &opt_secure_auth);
}
msg_ts("Connecting to MySQL server host: %s, user: %s, password: %s, "
"port: %s, socket: %s\n", opt_host ? opt_host : "localhost",
opt_user ? opt_user : "not set",
opt_password ? "set" : "not set",
opt_port != 0 ? mysql_port_str : "not set",
opt_socket ? opt_socket : "not set");
#ifdef HAVE_OPENSSL
if (opt_use_ssl)
{
mysql_ssl_set(connection, opt_ssl_key, opt_ssl_cert,
opt_ssl_ca, opt_ssl_capath,
opt_ssl_cipher);
mysql_options(connection, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
mysql_options(connection, MYSQL_OPT_SSL_CRLPATH,
opt_ssl_crlpath);
}
mysql_options(connection,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#if !defined(HAVE_YASSL)
if (opt_server_public_key && *opt_server_public_key)
mysql_options(connection, MYSQL_SERVER_PUBLIC_KEY,
opt_server_public_key);
#endif
#endif
if (!mysql_real_connect(connection,
opt_host ? opt_host : "localhost",
opt_user,
opt_password,
"" /*database*/, opt_port,
opt_socket, 0)) {
msg("Failed to connect to MySQL server: %s.\n",
mysql_error(connection));
mysql_close(connection);
return(NULL);
}
xb_mysql_query(connection, "SET SESSION wait_timeout=2147483",
false, true);
return(connection);
}
/*********************************************************************//**
Execute mysql query. */
MYSQL_RES *
xb_mysql_query(MYSQL *connection, const char *query, bool use_result,
bool die_on_error)
{
MYSQL_RES *mysql_result = NULL;
if (mysql_query(connection, query)) {
msg("Error: failed to execute query %s: %s\n", query,
mysql_error(connection));
if (die_on_error) {
exit(EXIT_FAILURE);
}
return(NULL);
}
/* store result set on client if there is a result */
if (mysql_field_count(connection) > 0) {
if ((mysql_result = mysql_store_result(connection)) == NULL) {
msg("Error: failed to fetch query result %s: %s\n",
query, mysql_error(connection));
exit(EXIT_FAILURE);
}
if (!use_result) {
mysql_free_result(mysql_result);
}
}
return mysql_result;
}
struct mysql_variable {
const char *name;
char **value;
};
static
void
read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
bool vertical_result)
{
MYSQL_RES *mysql_result;
MYSQL_ROW row;
mysql_variable *var;
mysql_result = xb_mysql_query(connection, query, true);
ut_ad(!vertical_result || mysql_num_fields(mysql_result) == 2);
if (vertical_result) {
while ((row = mysql_fetch_row(mysql_result))) {
char *name = row[0];
char *value = row[1];
for (var = vars; var->name; var++) {
if (strcmp(var->name, name) == 0
&& value != NULL) {
*(var->value) = strdup(value);
}
}
}
} else {
MYSQL_FIELD *field;
if ((row = mysql_fetch_row(mysql_result)) != NULL) {
int i = 0;
while ((field = mysql_fetch_field(mysql_result))
!= NULL) {
char *name = field->name;
char *value = row[i];
for (var = vars; var->name; var++) {
if (strcmp(var->name, name) == 0
&& value != NULL) {
*(var->value) = strdup(value);
}
}
++i;
}
}
}
mysql_free_result(mysql_result);
}
static
void
free_mysql_variables(mysql_variable *vars)
{
mysql_variable *var;
for (var = vars; var->name; var++) {
free(*(var->value));
}
}
static
char *
read_mysql_one_value(MYSQL *connection, const char *query)
{
MYSQL_RES *mysql_result;
MYSQL_ROW row;
char *result = NULL;
mysql_result = xb_mysql_query(connection, query, true);
ut_ad(mysql_num_fields(mysql_result) == 1);
if ((row = mysql_fetch_row(mysql_result))) {
result = strdup(row[0]);
}
mysql_free_result(mysql_result);
return(result);
}
static
bool
check_server_version(unsigned long version_number,
const char *version_string,
const char *version_comment,
const char *innodb_version)
{
bool version_supported = false;
bool mysql51 = false;
mysql_server_version = version_number;
server_flavor = FLAVOR_UNKNOWN;
if (strstr(version_comment, "Percona") != NULL) {
server_flavor = FLAVOR_PERCONA_SERVER;
} else if (strstr(version_comment, "MariaDB") != NULL ||
strstr(version_string, "MariaDB") != NULL) {
server_flavor = FLAVOR_MARIADB;
} else if (strstr(version_comment, "MySQL") != NULL) {
server_flavor = FLAVOR_MYSQL;
}
mysql51 = version_number > 50100 && version_number < 50500;
version_supported = version_supported
|| (mysql51 && innodb_version != NULL);
version_supported = version_supported
|| (version_number > 50500 && version_number < 50700);
version_supported = version_supported
|| ((version_number > 100000 && version_number < 100300)
&& server_flavor == FLAVOR_MARIADB);
if (mysql51 && innodb_version == NULL) {
msg("Error: Built-in InnoDB in MySQL 5.1 is not "
"supported in this release. You can either use "
"Percona XtraBackup 2.0, or upgrade to InnoDB "
"plugin.\n");
} else if (!version_supported) {
msg("Error: Unsupported server version: '%s'. Please "
"report a bug at "
"https://bugs.launchpad.net/percona-xtrabackup\n",
version_string);
}
return(version_supported);
}
/*********************************************************************//**
Receive options important for XtraBackup from MySQL server.
@return true on success. */
bool
get_mysql_vars(MYSQL *connection)
{
char *gtid_mode_var = NULL;
char *version_var = NULL;
char *version_comment_var = NULL;
char *innodb_version_var = NULL;
char *have_backup_locks_var = NULL;
char *have_backup_safe_binlog_info_var = NULL;
char *log_bin_var = NULL;
char *lock_wait_timeout_var= NULL;
char *wsrep_on_var = NULL;
char *slave_parallel_workers_var = NULL;
char *gtid_slave_pos_var = NULL;
char *innodb_buffer_pool_filename_var = NULL;
char *datadir_var = NULL;
char *innodb_log_group_home_dir_var = NULL;
char *innodb_log_file_size_var = NULL;
char *innodb_log_files_in_group_var = NULL;
char *innodb_data_file_path_var = NULL;
char *innodb_data_home_dir_var = NULL;
char *innodb_undo_directory_var = NULL;
char *innodb_page_size_var = NULL;
unsigned long server_version = mysql_get_server_version(connection);
bool ret = true;
mysql_variable mysql_vars[] = {
{"have_backup_locks", &have_backup_locks_var},
{"have_backup_safe_binlog_info",
&have_backup_safe_binlog_info_var},
{"log_bin", &log_bin_var},
{"lock_wait_timeout", &lock_wait_timeout_var},
{"gtid_mode", &gtid_mode_var},
{"version", &version_var},
{"version_comment", &version_comment_var},
{"innodb_version", &innodb_version_var},
{"wsrep_on", &wsrep_on_var},
{"slave_parallel_workers", &slave_parallel_workers_var},
{"gtid_slave_pos", &gtid_slave_pos_var},
{"innodb_buffer_pool_filename",
&innodb_buffer_pool_filename_var},
{"datadir", &datadir_var},
{"innodb_log_group_home_dir", &innodb_log_group_home_dir_var},
{"innodb_log_file_size", &innodb_log_file_size_var},
{"innodb_log_files_in_group", &innodb_log_files_in_group_var},
{"innodb_data_file_path", &innodb_data_file_path_var},
{"innodb_data_home_dir", &innodb_data_home_dir_var},
{"innodb_undo_directory", &innodb_undo_directory_var},
{"innodb_page_size", &innodb_page_size_var},
{NULL, NULL}
};
read_mysql_variables(connection, "SHOW VARIABLES",
mysql_vars, true);
if (have_backup_locks_var != NULL && !opt_no_backup_locks) {
have_backup_locks = true;
}
if (opt_binlog_info == BINLOG_INFO_AUTO) {
if (have_backup_safe_binlog_info_var != NULL)
opt_binlog_info = BINLOG_INFO_LOCKLESS;
else if (log_bin_var != NULL && !strcmp(log_bin_var, "ON"))
opt_binlog_info = BINLOG_INFO_ON;
else
opt_binlog_info = BINLOG_INFO_OFF;
}
if (have_backup_safe_binlog_info_var == NULL &&
opt_binlog_info == BINLOG_INFO_LOCKLESS) {
msg("Error: --binlog-info=LOCKLESS is not supported by the "
"server\n");
return(false);
}
if (lock_wait_timeout_var != NULL) {
have_lock_wait_timeout = true;
}
if (wsrep_on_var != NULL) {
have_galera_enabled = true;
}
/* Check server version compatibility and detect server flavor */
if (!(ret = check_server_version(server_version, version_var,
version_comment_var,
innodb_version_var))) {
goto out;
}
if (server_version > 50500) {
have_flush_engine_logs = true;
}
if (slave_parallel_workers_var != NULL
&& atoi(slave_parallel_workers_var) > 0) {
have_multi_threaded_slave = true;
}
if (innodb_buffer_pool_filename_var != NULL) {
buffer_pool_filename = strdup(innodb_buffer_pool_filename_var);
}
if ((gtid_mode_var && strcmp(gtid_mode_var, "ON") == 0) ||
(gtid_slave_pos_var && *gtid_slave_pos_var)) {
have_gtid_slave = true;
}
msg("Using server version %s\n", version_var);
if (!(ret = detect_mysql_capabilities_for_backup())) {
goto out;
}
/* make sure datadir value is the same in configuration file */
if (check_if_param_set("datadir")) {
if (!directory_exists(mysql_data_home, false)) {
msg("Warning: option 'datadir' points to "
"nonexistent directory '%s'\n", mysql_data_home);
}
if (!directory_exists(datadir_var, false)) {
msg("Warning: MySQL variable 'datadir' points to "
"nonexistent directory '%s'\n", datadir_var);
}
if (!equal_paths(mysql_data_home, datadir_var)) {
msg("Warning: option 'datadir' has different "
"values:\n"
" '%s' in defaults file\n"
" '%s' in SHOW VARIABLES\n",
mysql_data_home, datadir_var);
}
}
/* get some default values is they are missing from my.cnf */
if (!check_if_param_set("datadir") && datadir_var && *datadir_var) {
strmake(mysql_real_data_home, datadir_var, FN_REFLEN - 1);
mysql_data_home= mysql_real_data_home;
}
if (!check_if_param_set("innodb_data_file_path")
&& innodb_data_file_path_var && *innodb_data_file_path_var) {
innobase_data_file_path = my_strdup(
innodb_data_file_path_var, MYF(MY_FAE));
}
if (!check_if_param_set("innodb_data_home_dir")
&& innodb_data_home_dir_var && *innodb_data_home_dir_var) {
innobase_data_home_dir = my_strdup(
innodb_data_home_dir_var, MYF(MY_FAE));
}
if (!check_if_param_set("innodb_log_group_home_dir")
&& innodb_log_group_home_dir_var
&& *innodb_log_group_home_dir_var) {
srv_log_group_home_dir = my_strdup(
innodb_log_group_home_dir_var, MYF(MY_FAE));
}
if (!check_if_param_set("innodb_undo_directory")
&& innodb_undo_directory_var && *innodb_undo_directory_var) {
srv_undo_dir = my_strdup(
innodb_undo_directory_var, MYF(MY_FAE));
}
if (!check_if_param_set("innodb_log_files_in_group")
&& innodb_log_files_in_group_var) {
char *endptr;
innobase_log_files_in_group = strtol(
innodb_log_files_in_group_var, &endptr, 10);
ut_ad(*endptr == 0);
}
if (!check_if_param_set("innodb_log_file_size")
&& innodb_log_file_size_var) {
char *endptr;
innobase_log_file_size = strtoll(
innodb_log_file_size_var, &endptr, 10);
ut_ad(*endptr == 0);
}
if (!check_if_param_set("innodb_page_size") && innodb_page_size_var) {
char *endptr;
innobase_page_size = strtoll(
innodb_page_size_var, &endptr, 10);
ut_ad(*endptr == 0);
}
out:
free_mysql_variables(mysql_vars);
return(ret);
}
/*********************************************************************//**
Query the server to find out what backup capabilities it supports.
@return true on success. */
bool
detect_mysql_capabilities_for_backup()
{
const char *query = "SELECT 'INNODB_CHANGED_PAGES', COUNT(*) FROM "
"INFORMATION_SCHEMA.PLUGINS "
"WHERE PLUGIN_NAME LIKE 'INNODB_CHANGED_PAGES'";
char *innodb_changed_pages = NULL;
mysql_variable vars[] = {
{"INNODB_CHANGED_PAGES", &innodb_changed_pages}, {NULL, NULL}};
if (xtrabackup_incremental) {
read_mysql_variables(mysql_connection, query, vars, true);
ut_ad(innodb_changed_pages != NULL);
have_changed_page_bitmaps = (atoi(innodb_changed_pages) == 1);
/* INNODB_CHANGED_PAGES are listed in
INFORMATION_SCHEMA.PLUGINS in MariaDB, but
FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS
is not supported for versions below 10.1.6
(see MDEV-7472) */
if (server_flavor == FLAVOR_MARIADB &&
mysql_server_version < 100106) {
have_changed_page_bitmaps = false;
}
free_mysql_variables(vars);
}
/* do some sanity checks */
if (opt_galera_info && !have_galera_enabled) {
msg("--galera-info is specified on the command "
"line, but the server does not support Galera "
"replication. Ignoring the option.\n");
opt_galera_info = false;
}
if (opt_slave_info && have_multi_threaded_slave &&
!have_gtid_slave) {
msg("The --slave-info option requires GTID enabled for a "
"multi-threaded slave.\n");
return(false);
}
return(true);
}
static
bool
select_incremental_lsn_from_history(lsn_t *incremental_lsn)
{
MYSQL_RES *mysql_result;
MYSQL_ROW row;
char query[1000];
char buf[100];
if (opt_incremental_history_name) {
mysql_real_escape_string(mysql_connection, buf,
opt_incremental_history_name,
strlen(opt_incremental_history_name));
ut_snprintf(query, sizeof(query),
"SELECT innodb_to_lsn "
"FROM PERCONA_SCHEMA.xtrabackup_history "
"WHERE name = '%s' "
"AND innodb_to_lsn IS NOT NULL "
"ORDER BY innodb_to_lsn DESC LIMIT 1",
buf);
}
if (opt_incremental_history_uuid) {
mysql_real_escape_string(mysql_connection, buf,
opt_incremental_history_uuid,
strlen(opt_incremental_history_uuid));
ut_snprintf(query, sizeof(query),
"SELECT innodb_to_lsn "
"FROM PERCONA_SCHEMA.xtrabackup_history "
"WHERE uuid = '%s' "
"AND innodb_to_lsn IS NOT NULL "
"ORDER BY innodb_to_lsn DESC LIMIT 1",
buf);
}
mysql_result = xb_mysql_query(mysql_connection, query, true);
ut_ad(mysql_num_fields(mysql_result) == 1);
if (!(row = mysql_fetch_row(mysql_result))) {
msg("Error while attempting to find history record "
"for %s %s\n",
opt_incremental_history_uuid ? "uuid" : "name",
opt_incremental_history_uuid ?
opt_incremental_history_uuid :
opt_incremental_history_name);
return(false);
}
*incremental_lsn = strtoull(row[0], NULL, 10);
mysql_free_result(mysql_result);
msg("Found and using lsn: " LSN_PF " for %s %s\n", *incremental_lsn,
opt_incremental_history_uuid ? "uuid" : "name",
opt_incremental_history_uuid ?
opt_incremental_history_uuid :
opt_incremental_history_name);
return(true);
}
static
const char *
eat_sql_whitespace(const char *query)
{
bool comment = false;
while (*query) {
if (comment) {
if (query[0] == '*' && query[1] == '/') {
query += 2;
comment = false;
continue;
}
++query;
continue;
}
if (query[0] == '/' && query[1] == '*') {
query += 2;
comment = true;
continue;
}
if (strchr("\t\n\r (", query[0])) {
++query;
continue;
}
break;
}
return(query);
}
static
bool
is_query_from_list(const char *query, const char **list)
{
const char **item;
query = eat_sql_whitespace(query);
item = list;
while (*item) {
if (strncasecmp(query, *item, strlen(*item)) == 0) {
return(true);
}
++item;
}
return(false);
}
static
bool
is_query(const char *query)
{
const char *query_list[] = {"insert", "update", "delete", "replace",
"alter", "load", "select", "do", "handler", "call", "execute",
"begin", NULL};
return is_query_from_list(query, query_list);
}
static
bool
is_select_query(const char *query)
{
const char *query_list[] = {"select", NULL};
return is_query_from_list(query, query_list);
}
static
bool
is_update_query(const char *query)
{
const char *query_list[] = {"insert", "update", "delete", "replace",
"alter", "load", NULL};
return is_query_from_list(query, query_list);
}
static
bool
have_queries_to_wait_for(MYSQL *connection, uint threshold)
{
MYSQL_RES *result;
MYSQL_ROW row;
bool all_queries;
result = xb_mysql_query(connection, "SHOW FULL PROCESSLIST", true);
all_queries = (opt_lock_wait_query_type == QUERY_TYPE_ALL);
while ((row = mysql_fetch_row(result)) != NULL) {
const char *info = row[7];
int duration = atoi(row[5]);
char *id = row[0];
if (info != NULL
&& duration >= (int)threshold
&& ((all_queries && is_query(info))
|| is_update_query(info))) {
msg_ts("Waiting for query %s (duration %d sec): %s",
id, duration, info);
return(true);
}
}
return(false);
}
static
void
kill_long_queries(MYSQL *connection, uint timeout)
{
MYSQL_RES *result;
MYSQL_ROW row;
bool all_queries;
char kill_stmt[100];
result = xb_mysql_query(connection, "SHOW FULL PROCESSLIST", true);
all_queries = (opt_kill_long_query_type == QUERY_TYPE_ALL);
while ((row = mysql_fetch_row(result)) != NULL) {
const char *info = row[7];
int duration = atoi(row[5]);
char *id = row[0];
if (info != NULL &&
duration >= (int)timeout &&
((all_queries && is_query(info)) ||
is_select_query(info))) {
msg_ts("Killing query %s (duration %d sec): %s\n",
id, duration, info);
ut_snprintf(kill_stmt, sizeof(kill_stmt),
"KILL %s", id);
xb_mysql_query(connection, kill_stmt, false, false);
}
}
}
static
bool
wait_for_no_updates(MYSQL *connection, uint timeout, uint threshold)
{
time_t start_time;
start_time = time(NULL);
msg_ts("Waiting %u seconds for queries running longer than %u seconds "
"to finish\n", timeout, threshold);
while (time(NULL) <= (time_t)(start_time + timeout)) {
if (!have_queries_to_wait_for(connection, threshold)) {
return(true);
}
os_thread_sleep(1000000);
}
msg_ts("Unable to obtain lock. Please try again later.");
return(false);
}
static
os_thread_ret_t
kill_query_thread(
/*===============*/
void *arg __attribute__((unused)))
{
MYSQL *mysql;
time_t start_time;
start_time = time(NULL);
os_event_set(kill_query_thread_started);
msg_ts("Kill query timeout %d seconds.\n",
opt_kill_long_queries_timeout);
while (time(NULL) - start_time <
(time_t)opt_kill_long_queries_timeout) {
if (os_event_wait_time(kill_query_thread_stop, 1000) !=
OS_SYNC_TIME_EXCEEDED) {
goto stop_thread;
}
}
if ((mysql = xb_mysql_connect()) == NULL) {
msg("Error: kill query thread failed\n");
goto stop_thread;
}
while (true) {
kill_long_queries(mysql, time(NULL) - start_time);
if (os_event_wait_time(kill_query_thread_stop, 1000) !=
OS_SYNC_TIME_EXCEEDED) {
break;
}
}
mysql_close(mysql);
stop_thread:
msg_ts("Kill query thread stopped\n");
os_event_set(kill_query_thread_stopped);
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
static
void
start_query_killer()
{
kill_query_thread_stop = os_event_create();
kill_query_thread_started = os_event_create();
kill_query_thread_stopped = os_event_create();
os_thread_create(kill_query_thread, NULL, &kill_query_thread_id);
os_event_wait(kill_query_thread_started);
}
static
void
stop_query_killer()
{
os_event_set(kill_query_thread_stop);
os_event_wait_time(kill_query_thread_stopped, 60000);
}
/*********************************************************************//**
Function acquires either a backup tables lock, if supported
by the server, or a global read lock (FLUSH TABLES WITH READ LOCK)
otherwise.
@returns true if lock acquired */
bool
lock_tables(MYSQL *connection)
{
if (have_lock_wait_timeout) {
/* Set the maximum supported session value for
lock_wait_timeout to prevent unnecessary timeouts when the
global value is changed from the default */
xb_mysql_query(connection,
"SET SESSION lock_wait_timeout=31536000", false);
}
if (have_backup_locks) {
msg_ts("Executing LOCK TABLES FOR BACKUP...\n");
xb_mysql_query(connection, "LOCK TABLES FOR BACKUP", false);
return(true);
}
if (!opt_lock_wait_timeout && !opt_kill_long_queries_timeout) {
/* We do first a FLUSH TABLES. If a long update is running, the
FLUSH TABLES will wait but will not stall the whole mysqld, and
when the long update is done the FLUSH TABLES WITH READ LOCK
will start and succeed quickly. So, FLUSH TABLES is to lower
the probability of a stage where both mysqldump and most client
connections are stalled. Of course, if a second long update
starts between the two FLUSHes, we have that bad stall.
Option lock_wait_timeout serve the same purpose and is not
compatible with this trick.
*/
msg_ts("Executing FLUSH NO_WRITE_TO_BINLOG TABLES...\n");
xb_mysql_query(connection,
"FLUSH NO_WRITE_TO_BINLOG TABLES", false);
}
if (opt_lock_wait_timeout) {
if (!wait_for_no_updates(connection, opt_lock_wait_timeout,
opt_lock_wait_threshold)) {
return(false);
}
}
msg_ts("Executing FLUSH TABLES WITH READ LOCK...\n");
if (opt_kill_long_queries_timeout) {
start_query_killer();
}
if (have_galera_enabled) {
xb_mysql_query(connection,
"SET SESSION wsrep_causal_reads=0", false);
}
xb_mysql_query(connection, "FLUSH TABLES WITH READ LOCK", false);
if (opt_kill_long_queries_timeout) {
stop_query_killer();
}
return(true);
}
/*********************************************************************//**
If backup locks are used, execute LOCK BINLOG FOR BACKUP provided that we are
not in the --no-lock mode and the lock has not been acquired already.
@returns true if lock acquired */
bool
lock_binlog_maybe(MYSQL *connection)
{
if (have_backup_locks && !opt_no_lock && !binlog_locked) {
msg_ts("Executing LOCK BINLOG FOR BACKUP...\n");
xb_mysql_query(connection, "LOCK BINLOG FOR BACKUP", false);
binlog_locked = true;
return(true);
}
return(false);
}
/*********************************************************************//**
Releases either global read lock acquired with FTWRL and the binlog
lock acquired with LOCK BINLOG FOR BACKUP, depending on
the locking strategy being used */
void
unlock_all(MYSQL *connection)
{
if (opt_debug_sleep_before_unlock) {
msg_ts("Debug sleep for %u seconds\n",
opt_debug_sleep_before_unlock);
os_thread_sleep(opt_debug_sleep_before_unlock * 1000);
}
if (binlog_locked) {
msg_ts("Executing UNLOCK BINLOG\n");
xb_mysql_query(connection, "UNLOCK BINLOG", false);
}
msg_ts("Executing UNLOCK TABLES\n");
xb_mysql_query(connection, "UNLOCK TABLES", false);
msg_ts("All tables unlocked\n");
}
static
int
get_open_temp_tables(MYSQL *connection)
{
char *slave_open_temp_tables = NULL;
mysql_variable status[] = {
{"Slave_open_temp_tables", &slave_open_temp_tables},
{NULL, NULL}
};
int result = false;
read_mysql_variables(connection,
"SHOW STATUS LIKE 'slave_open_temp_tables'", status, true);
result = slave_open_temp_tables ? atoi(slave_open_temp_tables) : 0;
free_mysql_variables(status);
return(result);
}
/*********************************************************************//**
Wait until it's safe to backup a slave. Returns immediately if
the host isn't a slave. Currently there's only one check:
Slave_open_temp_tables has to be zero. Dies on timeout. */
bool
wait_for_safe_slave(MYSQL *connection)
{
char *read_master_log_pos = NULL;
char *slave_sql_running = NULL;
int n_attempts = 1;
const int sleep_time = 3;
int open_temp_tables = 0;
bool result = true;
mysql_variable status[] = {
{"Read_Master_Log_Pos", &read_master_log_pos},
{"Slave_SQL_Running", &slave_sql_running},
{NULL, NULL}
};
sql_thread_started = false;
read_mysql_variables(connection, "SHOW SLAVE STATUS", status, false);
if (!(read_master_log_pos && slave_sql_running)) {
msg("Not checking slave open temp tables for "
"--safe-slave-backup because host is not a slave\n");
goto cleanup;
}
if (strcmp(slave_sql_running, "Yes") == 0) {
sql_thread_started = true;
xb_mysql_query(connection, "STOP SLAVE SQL_THREAD", false);
}
if (opt_safe_slave_backup_timeout > 0) {
n_attempts = opt_safe_slave_backup_timeout / sleep_time;
}
open_temp_tables = get_open_temp_tables(connection);
msg_ts("Slave open temp tables: %d\n", open_temp_tables);
while (open_temp_tables && n_attempts--) {
msg_ts("Starting slave SQL thread, waiting %d seconds, then "
"checking Slave_open_temp_tables again (%d attempts "
"remaining)...\n", sleep_time, n_attempts);
xb_mysql_query(connection, "START SLAVE SQL_THREAD", false);
os_thread_sleep(sleep_time * 1000000);
xb_mysql_query(connection, "STOP SLAVE SQL_THREAD", false);
open_temp_tables = get_open_temp_tables(connection);
msg_ts("Slave open temp tables: %d\n", open_temp_tables);
}
/* Restart the slave if it was running at start */
if (open_temp_tables == 0) {
msg_ts("Slave is safe to backup\n");
goto cleanup;
}
result = false;
if (sql_thread_started) {
msg_ts("Restarting slave SQL thread.\n");
xb_mysql_query(connection, "START SLAVE SQL_THREAD", false);
}
msg_ts("Slave_open_temp_tables did not become zero after "
"%d seconds\n", opt_safe_slave_backup_timeout);
cleanup:
free_mysql_variables(status);
return(result);
}
/*********************************************************************//**
Retrieves MySQL binlog position of the master server in a replication
setup and saves it in a file. It also saves it in mysql_slave_position
variable. */
bool
write_slave_info(MYSQL *connection)
{
char *master = NULL;
char *filename = NULL;
char *gtid_executed = NULL;
char *position = NULL;
char *gtid_slave_pos = NULL;
char *ptr;
bool result = false;
mysql_variable status[] = {
{"Master_Host", &master},
{"Relay_Master_Log_File", &filename},
{"Exec_Master_Log_Pos", &position},
{"Executed_Gtid_Set", &gtid_executed},
{NULL, NULL}
};
mysql_variable variables[] = {
{"gtid_slave_pos", &gtid_slave_pos},
{NULL, NULL}
};
read_mysql_variables(connection, "SHOW SLAVE STATUS", status, false);
read_mysql_variables(connection, "SHOW VARIABLES", variables, true);
if (master == NULL || filename == NULL || position == NULL) {
msg("Failed to get master binlog coordinates "
"from SHOW SLAVE STATUS\n");
msg("This means that the server is not a "
"replication slave. Ignoring the --slave-info "
"option\n");
/* we still want to continue the backup */
result = true;
goto cleanup;
}
/* Print slave status to a file.
If GTID mode is used, construct a CHANGE MASTER statement with
MASTER_AUTO_POSITION and correct a gtid_purged value. */
if (gtid_executed != NULL && *gtid_executed) {
/* MySQL >= 5.6 with GTID enabled */
for (ptr = strchr(gtid_executed, '\n');
ptr;
ptr = strchr(ptr, '\n')) {
*ptr = ' ';
}
result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
"SET GLOBAL gtid_purged='%s';\n"
"CHANGE MASTER TO MASTER_AUTO_POSITION=1\n",
gtid_executed);
ut_a(asprintf(&mysql_slave_position,
"master host '%s', purge list '%s'",
master, gtid_executed) != -1);
} else if (gtid_slave_pos && *gtid_slave_pos) {
/* MariaDB >= 10.0 with GTID enabled */
result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
"SET GLOBAL gtid_slave_pos = '%s';\n"
"CHANGE MASTER TO master_use_gtid = slave_pos\n",
gtid_slave_pos);
ut_a(asprintf(&mysql_slave_position,
"master host '%s', gtid_slave_pos %s",
master, gtid_slave_pos) != -1);
} else {
result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
"CHANGE MASTER TO MASTER_LOG_FILE='%s', "
"MASTER_LOG_POS=%s\n", filename, position);
ut_a(asprintf(&mysql_slave_position,
"master host '%s', filename '%s', position '%s'",
master, filename, position) != -1);
}
cleanup:
free_mysql_variables(status);
free_mysql_variables(variables);
return(result);
}
/*********************************************************************//**
Retrieves MySQL Galera and
saves it in a file. It also prints it to stdout. */
bool
write_galera_info(MYSQL *connection)
{
char *state_uuid = NULL, *state_uuid55 = NULL;
char *last_committed = NULL, *last_committed55 = NULL;
bool result;
mysql_variable status[] = {
{"Wsrep_local_state_uuid", &state_uuid},
{"wsrep_local_state_uuid", &state_uuid55},
{"Wsrep_last_committed", &last_committed},
{"wsrep_last_committed", &last_committed55},
{NULL, NULL}
};
/* When backup locks are supported by the server, we should skip
creating xtrabackup_galera_info file on the backup stage, because
wsrep_local_state_uuid and wsrep_last_committed will be inconsistent
without blocking commits. The state file will be created on the prepare
stage using the WSREP recovery procedure. */
if (have_backup_locks) {
return(true);
}
read_mysql_variables(connection, "SHOW STATUS", status, true);
if ((state_uuid == NULL && state_uuid55 == NULL)
|| (last_committed == NULL && last_committed55 == NULL)) {
msg("Failed to get master wsrep state from SHOW STATUS.\n");
result = false;
goto cleanup;
}
result = backup_file_printf(XTRABACKUP_GALERA_INFO,
"%s:%s\n", state_uuid ? state_uuid : state_uuid55,
last_committed ? last_committed : last_committed55);
cleanup:
free_mysql_variables(status);
return(result);
}
/*********************************************************************//**
Flush and copy the current binary log file into the backup,
if GTID is enabled */
bool
write_current_binlog_file(MYSQL *connection)
{
char *executed_gtid_set = NULL;
char *gtid_binlog_state = NULL;
char *log_bin_file = NULL;
char *log_bin_dir = NULL;
bool gtid_exists;
bool result = true;
char filepath[FN_REFLEN];
mysql_variable status[] = {
{"Executed_Gtid_Set", &executed_gtid_set},
{NULL, NULL}
};
mysql_variable status_after_flush[] = {
{"File", &log_bin_file},
{NULL, NULL}
};
mysql_variable vars[] = {
{"gtid_binlog_state", &gtid_binlog_state},
{"log_bin_basename", &log_bin_dir},
{NULL, NULL}
};
read_mysql_variables(connection, "SHOW MASTER STATUS", status, false);
read_mysql_variables(connection, "SHOW VARIABLES", vars, true);
gtid_exists = (executed_gtid_set && *executed_gtid_set)
|| (gtid_binlog_state && *gtid_binlog_state);
if (gtid_exists) {
size_t log_bin_dir_length;
lock_binlog_maybe(connection);
xb_mysql_query(connection, "FLUSH BINARY LOGS", false);
read_mysql_variables(connection, "SHOW MASTER STATUS",
status_after_flush, false);
if (opt_log_bin != NULL && strchr(opt_log_bin, FN_LIBCHAR)) {
/* If log_bin is set, it has priority */
if (log_bin_dir) {
free(log_bin_dir);
}
log_bin_dir = strdup(opt_log_bin);
} else if (log_bin_dir == NULL) {
/* Default location is MySQL datadir */
log_bin_dir = strdup("./");
}
dirname_part(log_bin_dir, log_bin_dir, &log_bin_dir_length);
/* strip final slash if it is not the only path component */
if (log_bin_dir_length > 1 &&
log_bin_dir[log_bin_dir_length - 1] == FN_LIBCHAR) {
log_bin_dir[log_bin_dir_length - 1] = 0;
}
if (log_bin_dir == NULL || log_bin_file == NULL) {
msg("Failed to get master binlog coordinates from "
"SHOW MASTER STATUS");
result = false;
goto cleanup;
}
ut_snprintf(filepath, sizeof(filepath), "%s%c%s",
log_bin_dir, FN_LIBCHAR, log_bin_file);
result = copy_file(ds_data, filepath, log_bin_file, 0);
}
cleanup:
free_mysql_variables(status_after_flush);
free_mysql_variables(status);
free_mysql_variables(vars);
return(result);
}
/*********************************************************************//**
Retrieves MySQL binlog position and
saves it in a file. It also prints it to stdout. */
bool
write_binlog_info(MYSQL *connection)
{
char *filename = NULL;
char *position = NULL;
char *gtid_mode = NULL;
char *gtid_current_pos = NULL;
char *gtid_executed = NULL;
char *gtid = NULL;
bool result;
bool mysql_gtid;
bool mariadb_gtid;
mysql_variable status[] = {
{"File", &filename},
{"Position", &position},
{"Executed_Gtid_Set", &gtid_executed},
{NULL, NULL}
};
mysql_variable vars[] = {
{"gtid_mode", &gtid_mode},
{"gtid_current_pos", &gtid_current_pos},
{NULL, NULL}
};
read_mysql_variables(connection, "SHOW MASTER STATUS", status, false);
read_mysql_variables(connection, "SHOW VARIABLES", vars, true);
if (filename == NULL || position == NULL) {
/* Do not create xtrabackup_binlog_info if binary
log is disabled */
result = true;
goto cleanup;
}
mysql_gtid = ((gtid_mode != NULL) && (strcmp(gtid_mode, "ON") == 0));
mariadb_gtid = (gtid_current_pos != NULL);
gtid = (gtid_executed != NULL ? gtid_executed : gtid_current_pos);
if (mariadb_gtid || mysql_gtid) {
ut_a(asprintf(&mysql_binlog_position,
"filename '%s', position '%s', "
"GTID of the last change '%s'",
filename, position, gtid) != -1);
result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
"%s\t%s\t%s\n", filename, position,
gtid);
} else {
ut_a(asprintf(&mysql_binlog_position,
"filename '%s', position '%s'",
filename, position) != -1);
result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
"%s\t%s\n", filename, position);
}
cleanup:
free_mysql_variables(status);
free_mysql_variables(vars);
return(result);
}
/*********************************************************************//**
Writes xtrabackup_info file and if backup_history is enable creates
PERCONA_SCHEMA.xtrabackup_history and writes a new history record to the
table containing all the history info particular to the just completed
backup. */
bool
write_xtrabackup_info(MYSQL *connection)
{
MYSQL_STMT *stmt;
MYSQL_BIND bind[19];
char *uuid = NULL;
char *server_version = NULL;
char buf_start_time[100];
char buf_end_time[100];
int idx;
tm tm;
my_bool null = TRUE;
const char *xb_stream_name[] = {"file", "tar", "xbstream"};
const char *ins_query = "insert into PERCONA_SCHEMA.xtrabackup_history("
"uuid, name, tool_name, tool_command, tool_version, "
"ibbackup_version, server_version, start_time, end_time, "
"lock_time, binlog_pos, innodb_from_lsn, innodb_to_lsn, "
"partial, incremental, format, compact, compressed, "
"encrypted) "
"values(?,?,?,?,?,?,?,from_unixtime(?),from_unixtime(?),"
"?,?,?,?,?,?,?,?,?,?)";
ut_ad(xtrabackup_stream_fmt < 3);
uuid = read_mysql_one_value(connection, "SELECT UUID()");
server_version = read_mysql_one_value(connection, "SELECT VERSION()");
localtime_r(&history_start_time, &tm);
strftime(buf_start_time, sizeof(buf_start_time),
"%Y-%m-%d %H:%M:%S", &tm);
history_end_time = time(NULL);
localtime_r(&history_end_time, &tm);
strftime(buf_end_time, sizeof(buf_end_time),
"%Y-%m-%d %H:%M:%S", &tm);
backup_file_printf(XTRABACKUP_INFO,
"uuid = %s\n"
"name = %s\n"
"tool_name = %s\n"
"tool_command = %s\n"
"tool_version = %s\n"
"ibbackup_version = %s\n"
"server_version = %s\n"
"start_time = %s\n"
"end_time = %s\n"
"lock_time = %d\n"
"binlog_pos = %s\n"
"innodb_from_lsn = %llu\n"
"innodb_to_lsn = %llu\n"
"partial = %s\n"
"incremental = %s\n"
"format = %s\n"
"compact = %s\n"
"compressed = %s\n"
"encrypted = %s\n",
uuid, /* uuid */
opt_history ? opt_history : "", /* name */
tool_name, /* tool_name */
tool_args, /* tool_command */
XTRABACKUP_VERSION, /* tool_version */
XTRABACKUP_VERSION, /* ibbackup_version */
server_version, /* server_version */
buf_start_time, /* start_time */
buf_end_time, /* end_time */
history_lock_time, /* lock_time */
mysql_binlog_position ?
mysql_binlog_position : "", /* binlog_pos */
incremental_lsn, /* innodb_from_lsn */
metadata_to_lsn, /* innodb_to_lsn */
(xtrabackup_tables /* partial */
|| xtrabackup_tables_file
|| xtrabackup_databases
|| xtrabackup_databases_file) ? "Y" : "N",
xtrabackup_incremental ? "Y" : "N", /* incremental */
xb_stream_name[xtrabackup_stream_fmt], /* format */
xtrabackup_compact ? "Y" : "N", /* compact */
xtrabackup_compress ? "compressed" : "N", /* compressed */
xtrabackup_encrypt ? "Y" : "N"); /* encrypted */
if (!opt_history) {
goto cleanup;
}
xb_mysql_query(connection,
"CREATE DATABASE IF NOT EXISTS PERCONA_SCHEMA", false);
xb_mysql_query(connection,
"CREATE TABLE IF NOT EXISTS PERCONA_SCHEMA.xtrabackup_history("
"uuid VARCHAR(40) NOT NULL PRIMARY KEY,"
"name VARCHAR(255) DEFAULT NULL,"
"tool_name VARCHAR(255) DEFAULT NULL,"
"tool_command TEXT DEFAULT NULL,"
"tool_version VARCHAR(255) DEFAULT NULL,"
"ibbackup_version VARCHAR(255) DEFAULT NULL,"
"server_version VARCHAR(255) DEFAULT NULL,"
"start_time TIMESTAMP NULL DEFAULT NULL,"
"end_time TIMESTAMP NULL DEFAULT NULL,"
"lock_time BIGINT UNSIGNED DEFAULT NULL,"
"binlog_pos VARCHAR(128) DEFAULT NULL,"
"innodb_from_lsn BIGINT UNSIGNED DEFAULT NULL,"
"innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL,"
"partial ENUM('Y', 'N') DEFAULT NULL,"
"incremental ENUM('Y', 'N') DEFAULT NULL,"
"format ENUM('file', 'tar', 'xbstream') DEFAULT NULL,"
"compact ENUM('Y', 'N') DEFAULT NULL,"
"compressed ENUM('Y', 'N') DEFAULT NULL,"
"encrypted ENUM('Y', 'N') DEFAULT NULL"
") CHARACTER SET utf8 ENGINE=innodb", false);
stmt = mysql_stmt_init(connection);
mysql_stmt_prepare(stmt, ins_query, strlen(ins_query));
memset(bind, 0, sizeof(bind));
idx = 0;
/* uuid */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = uuid;
bind[idx].buffer_length = strlen(uuid);
++idx;
/* name */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(opt_history);
bind[idx].buffer_length = strlen(opt_history);
if (!(opt_history && *opt_history)) {
bind[idx].is_null = &null;
}
++idx;
/* tool_name */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = tool_name;
bind[idx].buffer_length = strlen(tool_name);
++idx;
/* tool_command */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = tool_args;
bind[idx].buffer_length = strlen(tool_args);
++idx;
/* tool_version */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(XTRABACKUP_VERSION);
bind[idx].buffer_length = strlen(XTRABACKUP_VERSION);
++idx;
/* ibbackup_version */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(XTRABACKUP_VERSION);
bind[idx].buffer_length = strlen(XTRABACKUP_VERSION);
++idx;
/* server_version */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = server_version;
bind[idx].buffer_length = strlen(server_version);
++idx;
/* start_time */
bind[idx].buffer_type = MYSQL_TYPE_LONG;
bind[idx].buffer = &history_start_time;
++idx;
/* end_time */
bind[idx].buffer_type = MYSQL_TYPE_LONG;
bind[idx].buffer = &history_end_time;
++idx;
/* lock_time */
bind[idx].buffer_type = MYSQL_TYPE_LONG;
bind[idx].buffer = &history_lock_time;
++idx;
/* binlog_pos */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = mysql_binlog_position;
if (mysql_binlog_position != NULL) {
bind[idx].buffer_length = strlen(mysql_binlog_position);
} else {
bind[idx].is_null = &null;
}
++idx;
/* innodb_from_lsn */
bind[idx].buffer_type = MYSQL_TYPE_LONGLONG;
bind[idx].buffer = (char*)(&incremental_lsn);
++idx;
/* innodb_to_lsn */
bind[idx].buffer_type = MYSQL_TYPE_LONGLONG;
bind[idx].buffer = (char*)(&metadata_to_lsn);
++idx;
/* partial (Y | N) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)((xtrabackup_tables
|| xtrabackup_tables_file
|| xtrabackup_databases
|| xtrabackup_databases_file) ? "Y" : "N");
bind[idx].buffer_length = 1;
++idx;
/* incremental (Y | N) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(
(xtrabackup_incremental
|| xtrabackup_incremental_basedir
|| opt_incremental_history_name
|| opt_incremental_history_uuid) ? "Y" : "N");
bind[idx].buffer_length = 1;
++idx;
/* format (file | tar | xbstream) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(xb_stream_name[xtrabackup_stream_fmt]);
bind[idx].buffer_length = strlen(xb_stream_name[xtrabackup_stream_fmt]);
++idx;
/* compact (Y | N) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(xtrabackup_compact ? "Y" : "N");
bind[idx].buffer_length = 1;
++idx;
/* compressed (Y | N) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(xtrabackup_compress ? "Y" : "N");
bind[idx].buffer_length = 1;
++idx;
/* encrypted (Y | N) */
bind[idx].buffer_type = MYSQL_TYPE_STRING;
bind[idx].buffer = (char*)(xtrabackup_encrypt ? "Y" : "N");
bind[idx].buffer_length = 1;
++idx;
ut_ad(idx == 19);
mysql_stmt_bind_param(stmt, bind);
mysql_stmt_execute(stmt);
mysql_stmt_close(stmt);
cleanup:
free(uuid);
free(server_version);
return(true);
}
bool
write_backup_config_file()
{
return backup_file_printf("backup-my.cnf",
"# This MySQL options file was generated by innobackupex.\n\n"
"# The MySQL server\n"
"[mysqld]\n"
"innodb_checksum_algorithm=%s\n"
"innodb_log_checksum_algorithm=%s\n"
"innodb_data_file_path=%s\n"
"innodb_log_files_in_group=%lu\n"
"innodb_log_file_size=%lld\n"
"innodb_fast_checksum=%s\n"
"innodb_page_size=%lu\n"
"innodb_log_block_size=%lu\n"
"innodb_undo_directory=%s\n"
"innodb_undo_tablespaces=%lu\n"
"%s%s\n"
"%s%s\n",
innodb_checksum_algorithm_names[srv_checksum_algorithm],
innodb_checksum_algorithm_names[srv_log_checksum_algorithm],
innobase_data_file_path,
srv_n_log_files,
innobase_log_file_size,
srv_fast_checksum ? "true" : "false",
srv_page_size,
srv_log_block_size,
srv_undo_dir,
srv_undo_tablespaces,
innobase_doublewrite_file ? "innodb_doublewrite_file=" : "",
innobase_doublewrite_file ? innobase_doublewrite_file : "",
innobase_buffer_pool_filename ?
"innodb_buffer_pool_filename=" : "",
innobase_buffer_pool_filename ?
innobase_buffer_pool_filename : "");
}
static
char *make_argv(char *buf, size_t len, int argc, char **argv)
{
size_t left= len;
const char *arg;
buf[0]= 0;
++argv; --argc;
while (argc > 0 && left > 0)
{
arg = *argv;
if (strncmp(*argv, "--password", strlen("--password")) == 0) {
arg = "--password=...";
}
if (strncmp(*argv, "--encrypt-key",
strlen("--encrypt-key")) == 0) {
arg = "--encrypt-key=...";
}
if (strncmp(*argv, "--encrypt_key",
strlen("--encrypt_key")) == 0) {
arg = "--encrypt_key=...";
}
left-= ut_snprintf(buf + len - left, left,
"%s%c", arg, argc > 1 ? ' ' : 0);
++argv; --argc;
}
return buf;
}
void
capture_tool_command(int argc, char **argv)
{
/* capture tool name tool args */
tool_name = strrchr(argv[0], '/');
tool_name = tool_name ? tool_name + 1 : argv[0];
make_argv(tool_args, sizeof(tool_args), argc, argv);
}
bool
select_history()
{
if (opt_incremental_history_name || opt_incremental_history_uuid) {
if (!select_incremental_lsn_from_history(
&incremental_lsn)) {
return(false);
}
}
return(true);
}
bool
flush_changed_page_bitmaps()
{
if (xtrabackup_incremental && have_changed_page_bitmaps &&
!xtrabackup_incremental_force_scan) {
xb_mysql_query(mysql_connection,
"FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS", false);
}
return(true);
}
/*********************************************************************//**
Deallocate memory, disconnect from MySQL server, etc.
@return true on success. */
void
backup_cleanup()
{
free(mysql_slave_position);
free(mysql_binlog_position);
free(buffer_pool_filename);
if (mysql_connection) {
mysql_close(mysql_connection);
}
}
#ifndef XTRABACKUP_BACKUP_MYSQL_H
#define XTRABACKUP_BACKUP_MYSQL_H
#include <mysql.h>
/* mysql flavor and version */
enum mysql_flavor_t { FLAVOR_UNKNOWN, FLAVOR_MYSQL,
FLAVOR_PERCONA_SERVER, FLAVOR_MARIADB };
extern mysql_flavor_t server_flavor;
extern unsigned long mysql_server_version;
/* server capabilities */
extern bool have_changed_page_bitmaps;
extern bool have_backup_locks;
extern bool have_lock_wait_timeout;
extern bool have_galera_enabled;
extern bool have_flush_engine_logs;
extern bool have_multi_threaded_slave;
extern bool have_gtid_slave;
/* History on server */
extern time_t history_start_time;
extern time_t history_end_time;
extern time_t history_lock_time;
extern bool sql_thread_started;
extern char *mysql_slave_position;
extern char *mysql_binlog_position;
extern char *buffer_pool_filename;
/** connection to mysql server */
extern MYSQL *mysql_connection;
void
capture_tool_command(int argc, char **argv);
bool
select_history();
bool
flush_changed_page_bitmaps();
void
backup_cleanup();
bool
get_mysql_vars(MYSQL *connection);
bool
detect_mysql_capabilities_for_backup();
MYSQL *
xb_mysql_connect();
MYSQL_RES *
xb_mysql_query(MYSQL *connection, const char *query, bool use_result,
bool die_on_error = true);
void
unlock_all(MYSQL *connection);
bool
write_current_binlog_file(MYSQL *connection);
bool
write_binlog_info(MYSQL *connection);
bool
write_xtrabackup_info(MYSQL *connection);
bool
write_backup_config_file();
bool
lock_binlog_maybe(MYSQL *connection);
bool
lock_tables(MYSQL *connection);
bool
wait_for_safe_slave(MYSQL *connection);
bool
write_galera_info(MYSQL *connection);
bool
write_slave_info(MYSQL *connection);
#endif
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2012 Percona Inc.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Changed page bitmap implementation */
#include "changed_page_bitmap.h"
#include "common.h"
#include "xtrabackup.h"
/* TODO: copy-pasted shared definitions from the XtraDB bitmap write code.
Remove these on the first opportunity, i.e. single-binary XtraBackup. */
/* log0online.h */
/** Single bitmap file information */
struct log_online_bitmap_file_t {
char name[FN_REFLEN]; /*!< Name with full path */
os_file_t file; /*!< Handle to opened file */
ib_uint64_t size; /*!< Size of the file */
ib_uint64_t offset; /*!< Offset of the next read,
or count of already-read bytes
*/
};
/** A set of bitmap files containing some LSN range */
struct log_online_bitmap_file_range_t {
size_t count; /*!< Number of files */
/*!< Dynamically-allocated array of info about individual files */
struct files_t {
char name[FN_REFLEN];/*!< Name of a file */
lsn_t start_lsn; /*!< Starting LSN of data in this
file */
ulong seq_num; /*!< Sequence number of this file */
} *files;
};
/* log0online.c */
/** File name stem for bitmap files. */
static const char* bmp_file_name_stem = "ib_modified_log_";
/** The bitmap file block size in bytes. All writes will be multiples of this.
*/
enum {
MODIFIED_PAGE_BLOCK_SIZE = 4096
};
/** Offsets in a file bitmap block */
enum {
MODIFIED_PAGE_IS_LAST_BLOCK = 0,/* 1 if last block in the current
write, 0 otherwise. */
MODIFIED_PAGE_START_LSN = 4, /* The starting tracked LSN of this and
other blocks in the same write */
MODIFIED_PAGE_END_LSN = 12, /* The ending tracked LSN of this and
other blocks in the same write */
MODIFIED_PAGE_SPACE_ID = 20, /* The space ID of tracked pages in
this block */
MODIFIED_PAGE_1ST_PAGE_ID = 24, /* The page ID of the first tracked
page in this block */
MODIFIED_PAGE_BLOCK_UNUSED_1 = 28,/* Unused in order to align the start
of bitmap at 8 byte boundary */
MODIFIED_PAGE_BLOCK_BITMAP = 32,/* Start of the bitmap itself */
MODIFIED_PAGE_BLOCK_UNUSED_2 = MODIFIED_PAGE_BLOCK_SIZE - 8,
/* Unused in order to align the end of
bitmap at 8 byte boundary */
MODIFIED_PAGE_BLOCK_CHECKSUM = MODIFIED_PAGE_BLOCK_SIZE - 4
/* The checksum of the current block */
};
/** Length of the bitmap data in a block */
enum { MODIFIED_PAGE_BLOCK_BITMAP_LEN
= MODIFIED_PAGE_BLOCK_UNUSED_2 - MODIFIED_PAGE_BLOCK_BITMAP };
/** Length of the bitmap data in a block in page ids */
enum { MODIFIED_PAGE_BLOCK_ID_COUNT = MODIFIED_PAGE_BLOCK_BITMAP_LEN * 8 };
typedef ib_uint64_t bitmap_word_t;
/****************************************************************//**
Calculate a bitmap block checksum. Algorithm borrowed from
log_block_calc_checksum.
@return checksum */
UNIV_INLINE
ulint
log_online_calc_checksum(
/*=====================*/
const byte* block); /*!<in: bitmap block */
/****************************************************************//**
Provide a comparisson function for the RB-tree tree (space,
block_start_page) pairs. Actual implementation does not matter as
long as the ordering is full.
@return -1 if p1 < p2, 0 if p1 == p2, 1 if p1 > p2
*/
static
int
log_online_compare_bmp_keys(
/*========================*/
const void* p1, /*!<in: 1st key to compare */
const void* p2) /*!<in: 2nd key to compare */
{
const byte *k1 = (const byte *)p1;
const byte *k2 = (const byte *)p2;
ulint k1_space = mach_read_from_4(k1 + MODIFIED_PAGE_SPACE_ID);
ulint k2_space = mach_read_from_4(k2 + MODIFIED_PAGE_SPACE_ID);
if (k1_space == k2_space) {
ulint k1_start_page
= mach_read_from_4(k1 + MODIFIED_PAGE_1ST_PAGE_ID);
ulint k2_start_page
= mach_read_from_4(k2 + MODIFIED_PAGE_1ST_PAGE_ID);
return k1_start_page < k2_start_page
? -1 : k1_start_page > k2_start_page ? 1 : 0;
}
return k1_space < k2_space ? -1 : 1;
}
/****************************************************************//**
Calculate a bitmap block checksum. Algorithm borrowed from
log_block_calc_checksum.
@return checksum */
UNIV_INLINE
ulint
log_online_calc_checksum(
/*=====================*/
const byte* block) /*!<in: bitmap block */
{
ulint sum;
ulint sh;
ulint i;
sum = 1;
sh = 0;
for (i = 0; i < MODIFIED_PAGE_BLOCK_CHECKSUM; i++) {
ulint b = block[i];
sum &= 0x7FFFFFFFUL;
sum += b;
sum += b << sh;
sh++;
if (sh > 24) {
sh = 0;
}
}
return sum;
}
/****************************************************************//**
Read one bitmap data page and check it for corruption.
@return TRUE if page read OK, FALSE if I/O error */
static
ibool
log_online_read_bitmap_page(
/*========================*/
log_online_bitmap_file_t *bitmap_file, /*!<in/out: bitmap
file */
byte *page, /*!<out: read page. Must be at
least MODIFIED_PAGE_BLOCK_SIZE
bytes long */
ibool *checksum_ok) /*!<out: TRUE if page
checksum OK */
{
ulint checksum;
ulint actual_checksum;
ibool success;
ut_a(bitmap_file->size >= MODIFIED_PAGE_BLOCK_SIZE);
ut_a(bitmap_file->offset
<= bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE);
ut_a(bitmap_file->offset % MODIFIED_PAGE_BLOCK_SIZE == 0);
success = os_file_read(bitmap_file->file, page, bitmap_file->offset,
MODIFIED_PAGE_BLOCK_SIZE);
if (UNIV_UNLIKELY(!success)) {
/* The following call prints an error message */
os_file_get_last_error(TRUE);
msg("InnoDB: Warning: failed reading changed page bitmap "
"file \'%s\'\n", bitmap_file->name);
return FALSE;
}
bitmap_file->offset += MODIFIED_PAGE_BLOCK_SIZE;
ut_ad(bitmap_file->offset <= bitmap_file->size);
checksum = mach_read_from_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM);
actual_checksum = log_online_calc_checksum(page);
*checksum_ok = (checksum == actual_checksum);
return TRUE;
}
/*********************************************************************//**
Check the name of a given file if it's a changed page bitmap file and
return file sequence and start LSN name components if it is. If is not,
the values of output parameters are undefined.
@return TRUE if a given file is a changed page bitmap file. */
static
ibool
log_online_is_bitmap_file(
/*======================*/
const os_file_stat_t* file_info, /*!<in: file to
check */
ulong* bitmap_file_seq_num, /*!<out: bitmap file
sequence number */
lsn_t* bitmap_file_start_lsn) /*!<out: bitmap file
start LSN */
{
char stem[FN_REFLEN];
ut_ad (strlen(file_info->name) < OS_FILE_MAX_PATH);
return ((file_info->type == OS_FILE_TYPE_FILE
|| file_info->type == OS_FILE_TYPE_LINK)
&& (sscanf(file_info->name, "%[a-z_]%lu_" LSN_PF ".xdb", stem,
bitmap_file_seq_num, bitmap_file_start_lsn) == 3)
&& (!strcmp(stem, bmp_file_name_stem)));
}
/*********************************************************************//**
List the bitmap files in srv_data_home and setup their range that contains the
specified LSN interval. This range, if non-empty, will start with a file that
has the greatest LSN equal to or less than the start LSN and will include all
the files up to the one with the greatest LSN less than the end LSN. Caller
must free bitmap_files->files when done if bitmap_files set to non-NULL and
this function returned TRUE. Field bitmap_files->count might be set to a
larger value than the actual count of the files, and space for the unused array
slots will be allocated but cleared to zeroes.
@return TRUE if succeeded
*/
static
ibool
log_online_setup_bitmap_file_range(
/*===============================*/
log_online_bitmap_file_range_t *bitmap_files, /*!<in/out: bitmap file
range */
lsn_t range_start, /*!<in: start LSN */
lsn_t range_end) /*!<in: end LSN */
{
os_file_dir_t bitmap_dir;
os_file_stat_t bitmap_dir_file_info;
ulong first_file_seq_num = ULONG_MAX;
ulong last_file_seq_num = 0;
lsn_t first_file_start_lsn = LSN_MAX;
xb_ad(range_end >= range_start);
bitmap_files->count = 0;
bitmap_files->files = NULL;
/* 1st pass: size the info array */
bitmap_dir = os_file_opendir(srv_data_home, FALSE);
if (UNIV_UNLIKELY(!bitmap_dir)) {
msg("InnoDB: Error: failed to open bitmap directory \'%s\'\n",
srv_data_home);
return FALSE;
}
while (!os_file_readdir_next_file(srv_data_home, bitmap_dir,
&bitmap_dir_file_info)) {
ulong file_seq_num;
lsn_t file_start_lsn;
if (!log_online_is_bitmap_file(&bitmap_dir_file_info,
&file_seq_num,
&file_start_lsn)
|| file_start_lsn >= range_end) {
continue;
}
if (file_seq_num > last_file_seq_num) {
last_file_seq_num = file_seq_num;
}
if (file_start_lsn >= range_start
|| file_start_lsn == first_file_start_lsn
|| first_file_start_lsn > range_start) {
/* A file that falls into the range */
if (file_start_lsn < first_file_start_lsn) {
first_file_start_lsn = file_start_lsn;
}
if (file_seq_num < first_file_seq_num) {
first_file_seq_num = file_seq_num;
}
} else if (file_start_lsn > first_file_start_lsn) {
/* A file that has LSN closer to the range start
but smaller than it, replacing another such file */
first_file_start_lsn = file_start_lsn;
first_file_seq_num = file_seq_num;
}
}
if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
os_file_get_last_error(TRUE);
msg("InnoDB: Error: cannot close \'%s\'\n",srv_data_home);
return FALSE;
}
if (first_file_seq_num == ULONG_MAX && last_file_seq_num == 0) {
bitmap_files->count = 0;
return TRUE;
}
bitmap_files->count = last_file_seq_num - first_file_seq_num + 1;
/* 2nd pass: get the file names in the file_seq_num order */
bitmap_dir = os_file_opendir(srv_data_home, FALSE);
if (UNIV_UNLIKELY(!bitmap_dir)) {
msg("InnoDB: Error: failed to open bitmap directory \'%s\'\n",
srv_data_home);
return FALSE;
}
bitmap_files->files =
static_cast<log_online_bitmap_file_range_t::files_t *>
(ut_malloc(bitmap_files->count
* sizeof(bitmap_files->files[0])));
memset(bitmap_files->files, 0,
bitmap_files->count * sizeof(bitmap_files->files[0]));
while (!os_file_readdir_next_file(srv_data_home, bitmap_dir,
&bitmap_dir_file_info)) {
ulong file_seq_num;
lsn_t file_start_lsn;
size_t array_pos;
if (!log_online_is_bitmap_file(&bitmap_dir_file_info,
&file_seq_num,
&file_start_lsn)
|| file_start_lsn >= range_end
|| file_start_lsn < first_file_start_lsn) {
continue;
}
array_pos = file_seq_num - first_file_seq_num;
if (UNIV_UNLIKELY(array_pos >= bitmap_files->count)) {
msg("InnoDB: Error: inconsistent bitmap file "
"directory\n");
free(bitmap_files->files);
return FALSE;
}
if (file_seq_num > bitmap_files->files[array_pos].seq_num) {
bitmap_files->files[array_pos].seq_num = file_seq_num;
strncpy(bitmap_files->files[array_pos].name,
bitmap_dir_file_info.name, FN_REFLEN);
bitmap_files->files[array_pos].name[FN_REFLEN - 1]
= '\0';
bitmap_files->files[array_pos].start_lsn
= file_start_lsn;
}
}
if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
os_file_get_last_error(TRUE);
msg("InnoDB: Error: cannot close \'%s\'\n", srv_data_home);
free(bitmap_files->files);
return FALSE;
}
#ifdef UNIV_DEBUG
ut_ad(bitmap_files->files[0].seq_num == first_file_seq_num);
for (size_t i = 1; i < bitmap_files->count; i++) {
if (!bitmap_files->files[i].seq_num) {
break;
}
ut_ad(bitmap_files->files[i].seq_num
> bitmap_files->files[i - 1].seq_num);
ut_ad(bitmap_files->files[i].start_lsn
>= bitmap_files->files[i - 1].start_lsn);
}
#endif
return TRUE;
}
/****************************************************************//**
Open a bitmap file for reading.
@return TRUE if opened successfully */
static
ibool
log_online_open_bitmap_file_read_only(
/*==================================*/
const char* name, /*!<in: bitmap file
name without directory,
which is assumed to be
srv_data_home */
log_online_bitmap_file_t* bitmap_file) /*!<out: opened bitmap
file */
{
ibool success = FALSE;
xb_ad(name[0] != '\0');
ut_snprintf(bitmap_file->name, FN_REFLEN, "%s%s", srv_data_home, name);
bitmap_file->file
= os_file_create_simple_no_error_handling(0, bitmap_file->name,
OS_FILE_OPEN,
OS_FILE_READ_ONLY,
&success);
if (UNIV_UNLIKELY(!success)) {
/* Here and below assume that bitmap file names do not
contain apostrophes, thus no need for ut_print_filename(). */
msg("InnoDB: Warning: error opening the changed page "
"bitmap \'%s\'\n", bitmap_file->name);
return FALSE;
}
bitmap_file->size = os_file_get_size(bitmap_file->file);
bitmap_file->offset = 0;
#ifdef UNIV_LINUX
posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_SEQUENTIAL);
posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_NOREUSE);
#endif
return TRUE;
}
/****************************************************************//**
Diagnose one or both of the following situations if we read close to
the end of bitmap file:
1) Warn if the remainder of the file is less than one page.
2) Error if we cannot read any more full pages but the last read page
did not have the last-in-run flag set.
@return FALSE for the error */
static
ibool
log_online_diagnose_bitmap_eof(
/*===========================*/
const log_online_bitmap_file_t* bitmap_file, /*!< in: bitmap file */
ibool last_page_in_run)/*!< in: "last page in
run" flag value in the
last read page */
{
/* Check if we are too close to EOF to read a full page */
if ((bitmap_file->size < MODIFIED_PAGE_BLOCK_SIZE)
|| (bitmap_file->offset
> bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE)) {
if (UNIV_UNLIKELY(bitmap_file->offset != bitmap_file->size)) {
/* If we are not at EOF and we have less than one page
to read, it's junk. This error is not fatal in
itself. */
msg("InnoDB: Warning: junk at the end of changed "
"page bitmap file \'%s\'.\n", bitmap_file->name);
}
if (UNIV_UNLIKELY(!last_page_in_run)) {
/* We are at EOF but the last read page did not finish
a run */
/* It's a "Warning" here because it's not a fatal error
for the whole server */
msg("InnoDB: Warning: changed page bitmap "
"file \'%s\' does not contain a complete run "
"at the end.\n", bitmap_file->name);
return FALSE;
}
}
return TRUE;
}
/* End of copy-pasted definitions */
/** Iterator structure over changed page bitmap */
struct xb_page_bitmap_range_struct {
const xb_page_bitmap *bitmap; /* Bitmap with data */
ulint space_id; /* Space id for this
iterator */
ulint bit_i; /* Bit index of the iterator
position in the current page */
const ib_rbt_node_t *bitmap_node; /* Current bitmap tree node */
const byte *bitmap_page; /* Current bitmap page */
ulint current_page_id;/* Current page id */
};
/****************************************************************//**
Print a diagnostic message on missing bitmap data for an LSN range. */
static
void
xb_msg_missing_lsn_data(
/*====================*/
lsn_t missing_interval_start, /*!<in: interval start */
lsn_t missing_interval_end) /*!<in: interval end */
{
msg("xtrabackup: warning: changed page data missing for LSNs between "
LSN_PF " and " LSN_PF "\n", missing_interval_start,
missing_interval_end);
}
/****************************************************************//**
Scan a bitmap file until data for a desired LSN or EOF is found and check that
the page before the starting one is not corrupted to ensure that the found page
indeed contains the very start of the desired LSN data. The caller must check
the page LSN values to determine if the bitmap file was scanned until the data
was found or until EOF. Page must be at least MODIFIED_PAGE_BLOCK_SIZE big.
@return TRUE if the scan successful without corruption detected
*/
static
ibool
xb_find_lsn_in_bitmap_file(
/*=======================*/
log_online_bitmap_file_t *bitmap_file, /*!<in/out: bitmap
file */
byte *page, /*!<in/out: last read
bitmap page */
lsn_t *page_end_lsn, /*!<out: end LSN of the
last read page */
lsn_t lsn) /*!<in: LSN to find */
{
ibool last_page_ok = TRUE;
ibool next_to_last_page_ok = TRUE;
xb_ad (bitmap_file->size >= MODIFIED_PAGE_BLOCK_SIZE);
*page_end_lsn = 0;
while ((*page_end_lsn <= lsn)
&& (bitmap_file->offset
<= bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE)) {
next_to_last_page_ok = last_page_ok;
if (!log_online_read_bitmap_page(bitmap_file, page,
&last_page_ok)) {
return FALSE;
}
*page_end_lsn = mach_read_from_8(page + MODIFIED_PAGE_END_LSN);
}
/* We check two pages here because the last read page already contains
the required LSN data. If the next to the last one page is corrupted,
then we have no way of telling if that page contained the required LSN
range data too */
return last_page_ok && next_to_last_page_ok;
}
/****************************************************************//**
Read the disk bitmap and build the changed page bitmap tree for the
LSN interval incremental_lsn to checkpoint_lsn_start.
@return the built bitmap tree or NULL if unable to read the full interval for
any reason. */
xb_page_bitmap*
xb_page_bitmap_init(void)
/*=====================*/
{
log_online_bitmap_file_t bitmap_file;
lsn_t bmp_start_lsn = incremental_lsn;
lsn_t bmp_end_lsn = checkpoint_lsn_start;
byte page[MODIFIED_PAGE_BLOCK_SIZE];
lsn_t current_page_end_lsn;
xb_page_bitmap *result;
ibool last_page_in_run= FALSE;
log_online_bitmap_file_range_t bitmap_files;
size_t bmp_i;
ibool last_page_ok = TRUE;
if (UNIV_UNLIKELY(bmp_start_lsn > bmp_end_lsn)) {
msg("xtrabackup: incremental backup LSN " LSN_PF
" is larger than than the last checkpoint LSN " LSN_PF
"\n", bmp_start_lsn, bmp_end_lsn);
return NULL;
}
if (!log_online_setup_bitmap_file_range(&bitmap_files, bmp_start_lsn,
bmp_end_lsn)) {
return NULL;
}
/* Only accept no bitmap files returned if start LSN == end LSN */
if (bitmap_files.count == 0 && bmp_end_lsn != bmp_start_lsn) {
return NULL;
}
result = rbt_create(MODIFIED_PAGE_BLOCK_SIZE,
log_online_compare_bmp_keys);
if (bmp_start_lsn == bmp_end_lsn) {
/* Empty range - empty bitmap */
return result;
}
bmp_i = 0;
if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].start_lsn
> bmp_start_lsn)) {
/* The 1st file does not have the starting LSN data */
xb_msg_missing_lsn_data(bmp_start_lsn,
bitmap_files.files[bmp_i].start_lsn);
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
/* Skip any zero-sized files at the start */
while ((bmp_i < bitmap_files.count - 1)
&& (bitmap_files.files[bmp_i].start_lsn
== bitmap_files.files[bmp_i + 1].start_lsn)) {
bmp_i++;
}
/* Is the 1st bitmap file missing? */
if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].name[0] == '\0')) {
/* TODO: this is not the exact missing range */
xb_msg_missing_lsn_data(bmp_start_lsn, bmp_end_lsn);
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
/* Open the 1st bitmap file */
if (UNIV_UNLIKELY(!log_online_open_bitmap_file_read_only(
bitmap_files.files[bmp_i].name,
&bitmap_file))) {
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
/* If the 1st file is truncated, no data. Not merged with the case
below because zero-length file indicates not a corruption but missing
subsequent files instead. */
if (UNIV_UNLIKELY(bitmap_file.size < MODIFIED_PAGE_BLOCK_SIZE)) {
xb_msg_missing_lsn_data(bmp_start_lsn, bmp_end_lsn);
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
/* Find the start of the required LSN range in the file */
if (UNIV_UNLIKELY(!xb_find_lsn_in_bitmap_file(&bitmap_file, page,
&current_page_end_lsn,
bmp_start_lsn))) {
msg("xtrabackup: Warning: changed page bitmap file "
"\'%s\' corrupted\n", bitmap_file.name);
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
last_page_in_run
= mach_read_from_4(page + MODIFIED_PAGE_IS_LAST_BLOCK);
if (UNIV_UNLIKELY(!log_online_diagnose_bitmap_eof(&bitmap_file,
last_page_in_run))) {
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
if (UNIV_UNLIKELY(current_page_end_lsn < bmp_start_lsn)) {
xb_msg_missing_lsn_data(current_page_end_lsn, bmp_start_lsn);
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
/* 1st bitmap page found, add it to the tree. */
rbt_insert(result, page, page);
/* Read next pages/files until all required data is read */
while (last_page_ok
&& (current_page_end_lsn < bmp_end_lsn
|| (current_page_end_lsn == bmp_end_lsn
&& !last_page_in_run))) {
ib_rbt_bound_t tree_search_pos;
/* If EOF, advance the file skipping over any empty files */
while (bitmap_file.size < MODIFIED_PAGE_BLOCK_SIZE
|| (bitmap_file.offset
> bitmap_file.size - MODIFIED_PAGE_BLOCK_SIZE)) {
os_file_close(bitmap_file.file);
if (UNIV_UNLIKELY(
!log_online_diagnose_bitmap_eof(
&bitmap_file, last_page_in_run))) {
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
bmp_i++;
if (UNIV_UNLIKELY(bmp_i == bitmap_files.count
|| (bitmap_files.files[bmp_i].seq_num
== 0))) {
xb_msg_missing_lsn_data(current_page_end_lsn,
bmp_end_lsn);
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
/* Is the next file missing? */
if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].name[0]
== '\0')) {
/* TODO: this is not the exact missing range */
xb_msg_missing_lsn_data(bitmap_files.files
[bmp_i - 1].start_lsn,
bmp_end_lsn);
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
if (UNIV_UNLIKELY(
!log_online_open_bitmap_file_read_only(
bitmap_files.files[bmp_i].name,
&bitmap_file))) {
rbt_free(result);
free(bitmap_files.files);
return NULL;
}
}
if (UNIV_UNLIKELY(
!log_online_read_bitmap_page(&bitmap_file, page,
&last_page_ok))) {
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
if (UNIV_UNLIKELY(!last_page_ok)) {
msg("xtrabackup: warning: changed page bitmap file "
"\'%s\' corrupted.\n", bitmap_file.name);
rbt_free(result);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return NULL;
}
/* Merge the current page with an existing page or insert a new
page into the tree */
if (!rbt_search(result, &tree_search_pos, page)) {
/* Merge the bitmap pages */
byte *existing_page
= rbt_value(byte, tree_search_pos.last);
bitmap_word_t *bmp_word_1 = (bitmap_word_t *)
(existing_page + MODIFIED_PAGE_BLOCK_BITMAP);
bitmap_word_t *bmp_end = (bitmap_word_t *)
(existing_page + MODIFIED_PAGE_BLOCK_UNUSED_2);
bitmap_word_t *bmp_word_2 = (bitmap_word_t *)
(page + MODIFIED_PAGE_BLOCK_BITMAP);
while (bmp_word_1 < bmp_end) {
*bmp_word_1++ |= *bmp_word_2++;
}
xb_a (bmp_word_1 == bmp_end);
} else {
/* Add a new page */
rbt_add_node(result, &tree_search_pos, page);
}
current_page_end_lsn
= mach_read_from_8(page + MODIFIED_PAGE_END_LSN);
last_page_in_run
= mach_read_from_4(page + MODIFIED_PAGE_IS_LAST_BLOCK);
}
xb_a (current_page_end_lsn >= bmp_end_lsn);
free(bitmap_files.files);
os_file_close(bitmap_file.file);
return result;
}
/****************************************************************//**
Free the bitmap tree. */
void
xb_page_bitmap_deinit(
/*==================*/
xb_page_bitmap* bitmap) /*!<in/out: bitmap tree */
{
if (bitmap) {
rbt_free(bitmap);
}
}
/****************************************************************//**
Advance to the next bitmap page or setup the first bitmap page for the
given bitmap range. Assumes that bitmap_range->bitmap_page has been
already found/bumped by rbt_search()/rbt_next().
@return FALSE if no more bitmap data for the range space ID */
static
ibool
xb_page_bitmap_setup_next_page(
/*===========================*/
xb_page_bitmap_range* bitmap_range) /*!<in/out: the bitmap range */
{
ulint new_space_id;
ulint new_1st_page_id;
if (bitmap_range->bitmap_node == NULL) {
bitmap_range->current_page_id = ULINT_UNDEFINED;
return FALSE;
}
bitmap_range->bitmap_page = rbt_value(byte, bitmap_range->bitmap_node);
new_space_id = mach_read_from_4(bitmap_range->bitmap_page
+ MODIFIED_PAGE_SPACE_ID);
if (new_space_id != bitmap_range->space_id) {
/* No more data for the current page id. */
xb_a(new_space_id > bitmap_range->space_id);
bitmap_range->current_page_id = ULINT_UNDEFINED;
return FALSE;
}
new_1st_page_id = mach_read_from_4(bitmap_range->bitmap_page +
MODIFIED_PAGE_1ST_PAGE_ID);
xb_a (new_1st_page_id >= bitmap_range->current_page_id
|| bitmap_range->current_page_id == ULINT_UNDEFINED);
bitmap_range->current_page_id = new_1st_page_id;
bitmap_range->bit_i = 0;
return TRUE;
}
/****************************************************************//**
Set up a new bitmap range iterator over a given space id changed
pages in a given bitmap.
@return bitmap range iterator */
xb_page_bitmap_range*
xb_page_bitmap_range_init(
/*======================*/
xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
ulint space_id) /*!< in: space id */
{
byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
xb_page_bitmap_range *result
= static_cast<xb_page_bitmap_range *>
(ut_malloc(sizeof(*result)));
memset(result, 0, sizeof(*result));
result->bitmap = bitmap;
result->space_id = space_id;
result->current_page_id = ULINT_UNDEFINED;
/* Search for the 1st page for the given space id */
/* This also sets MODIFIED_PAGE_1ST_PAGE_ID to 0, which is what we
want. */
memset(search_page, 0, MODIFIED_PAGE_BLOCK_SIZE);
mach_write_to_4(search_page + MODIFIED_PAGE_SPACE_ID, space_id);
result->bitmap_node = rbt_lower_bound(result->bitmap, search_page);
xb_page_bitmap_setup_next_page(result);
return result;
}
/****************************************************************//**
Get the value of the bitmap->range->bit_i bitmap bit
@return the current bit value */
static inline
ibool
is_bit_set(
/*=======*/
const xb_page_bitmap_range* bitmap_range) /*!< in: bitmap
range */
{
return ((*(((bitmap_word_t *)(bitmap_range->bitmap_page
+ MODIFIED_PAGE_BLOCK_BITMAP))
+ (bitmap_range->bit_i >> 6)))
& (1ULL << (bitmap_range->bit_i & 0x3F))) ? TRUE : FALSE;
}
/****************************************************************//**
Get the next page id that has its bit set or cleared, i.e. equal to
bit_value.
@return page id */
ulint
xb_page_bitmap_range_get_next_bit(
/*==============================*/
xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
ibool bit_value) /*!< in: bit value */
{
if (UNIV_UNLIKELY(bitmap_range->current_page_id
== ULINT_UNDEFINED)) {
return ULINT_UNDEFINED;
}
do {
while (bitmap_range->bit_i < MODIFIED_PAGE_BLOCK_ID_COUNT) {
while (is_bit_set(bitmap_range) != bit_value
&& (bitmap_range->bit_i
< MODIFIED_PAGE_BLOCK_ID_COUNT)) {
bitmap_range->current_page_id++;
bitmap_range->bit_i++;
}
if (bitmap_range->bit_i
< MODIFIED_PAGE_BLOCK_ID_COUNT) {
ulint result = bitmap_range->current_page_id;
bitmap_range->current_page_id++;
bitmap_range->bit_i++;
return result;
}
}
bitmap_range->bitmap_node
= rbt_next(bitmap_range->bitmap,
bitmap_range->bitmap_node);
} while (xb_page_bitmap_setup_next_page(bitmap_range));
return ULINT_UNDEFINED;
}
/****************************************************************//**
Free the bitmap range iterator. */
void
xb_page_bitmap_range_deinit(
/*========================*/
xb_page_bitmap_range* bitmap_range) /*! in/out: bitmap range */
{
ut_free(bitmap_range);
}
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2012 Percona Inc.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Changed page bitmap interface */
#ifndef XB_CHANGED_PAGE_BITMAP_H
#define XB_CHANGED_PAGE_BITMAP_H
#include <ut0rbt.h>
#include <fil0fil.h>
/* The changed page bitmap structure */
typedef ib_rbt_t xb_page_bitmap;
struct xb_page_bitmap_range_struct;
/* The bitmap range iterator over one space id */
typedef struct xb_page_bitmap_range_struct xb_page_bitmap_range;
/****************************************************************//**
Read the disk bitmap and build the changed page bitmap tree for the
LSN interval incremental_lsn to checkpoint_lsn_start.
@return the built bitmap tree */
xb_page_bitmap*
xb_page_bitmap_init(void);
/*=====================*/
/****************************************************************//**
Free the bitmap tree. */
void
xb_page_bitmap_deinit(
/*==================*/
xb_page_bitmap* bitmap); /*!<in/out: bitmap tree */
/****************************************************************//**
Set up a new bitmap range iterator over a given space id changed
pages in a given bitmap.
@return bitmap range iterator */
xb_page_bitmap_range*
xb_page_bitmap_range_init(
/*======================*/
xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
ulint space_id); /*!< in: space id */
/****************************************************************//**
Get the next page id that has its bit set or cleared, i.e. equal to
bit_value.
@return page id */
ulint
xb_page_bitmap_range_get_next_bit(
/*==============================*/
xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
ibool bit_value); /*!< in: bit value */
/****************************************************************//**
Free the bitmap range iterator. */
void
xb_page_bitmap_range_deinit(
/*========================*/
xb_page_bitmap_range* bitmap_range); /*! in/out: bitmap range */
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Common declarations for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XB_COMMON_H
#define XB_COMMON_H
#include <my_global.h>
#include <mysql_version.h>
#include <fcntl.h>
#include <stdarg.h>
#define xb_a(expr) \
do { \
if (!(expr)) { \
msg("Assertion \"%s\" failed at %s:%lu\n", \
#expr, __FILE__, (ulong) __LINE__); \
abort(); \
} \
} while (0);
#ifdef XB_DEBUG
#define xb_ad(expr) xb_a(expr)
#else
#define xb_ad(expr)
#endif
#define XB_DELTA_INFO_SUFFIX ".meta"
static inline int msg(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
static inline int msg(const char *fmt, ...)
{
int result;
va_list args;
va_start(args, fmt);
result = vfprintf(stderr, fmt, args);
va_end(args);
return result;
}
static inline int msg_ts(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
static inline int msg_ts(const char *fmt, ...)
{
int result;
time_t t = time(NULL);
char date[100];
char *line;
va_list args;
strftime(date, sizeof(date), "%y%m%d %H:%M:%S", localtime(&t));
va_start(args, fmt);
result = vasprintf(&line, fmt, args);
va_end(args);
if (result != -1) {
result = fprintf(stderr, "%s %s", date, line);
free(line);
}
return result;
}
/* Use POSIX_FADV_NORMAL when available */
#ifdef POSIX_FADV_NORMAL
# define USE_POSIX_FADVISE
#else
# define POSIX_FADV_NORMAL
# define POSIX_FADV_SEQUENTIAL
# define POSIX_FADV_DONTNEED
# define posix_fadvise(a,b,c,d) do {} while(0)
#endif
/***********************************************************************
Computes bit shift for a given value. If the argument is not a power
of 2, returns 0.*/
static inline ulong
get_bit_shift(ulong value)
{
ulong shift;
if (value == 0)
return 0;
for (shift = 0; !(value & 1UL); shift++) {
value >>= 1;
}
return (value >> 1) ? 0 : shift;
}
/****************************************************************************
Read 'len' bytes from 'fd'. It is identical to my_read(..., MYF(MY_FULL_IO)),
i.e. tries to combine partial reads into a single block of size 'len', except
that it bails out on EOF or error, and returns the number of successfully read
bytes instead. */
static inline size_t
xb_read_full(File fd, uchar *buf, size_t len)
{
size_t tlen = 0;
size_t tbytes;
while (tlen < len) {
tbytes = my_read(fd, buf, len - tlen, MYF(MY_WME));
if (tbytes == 0 || tbytes == MY_FILE_ERROR) {
break;
}
buf += tbytes;
tlen += tbytes;
}
return tlen;
}
#endif
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2014 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Compact backups implementation */
#include <my_base.h>
#include <table.h>
#include <univ.i>
#include <dict0mem.h>
#include <dict0priv.h>
#include <fsp0fsp.h>
#include <handler0alter.h>
#include <ibuf0ibuf.h>
#include <page0page.h>
#include <row0merge.h>
#include "common.h"
#include "write_filt.h"
#include "fil_cur.h"
#include "xtrabackup.h"
#include "ds_buffer.h"
#include "xb0xb.h"
/* Number of the first primary key page in an .ibd file */
#define XB_FIRST_CLUSTERED_INDEX_PAGE_NO 3
/* Suffix for page map files */
#define XB_PAGE_MAP_SUFFIX ".pmap"
#define XB_TMPFILE_SUFFIX ".tmp"
/* Page range */
struct page_range_t {
ulint from; /*!< range start */
ulint to; /*!< range end */
};
/* Cursor in a page map file */
struct page_map_cursor_t {
File fd; /*!< file descriptor */
IO_CACHE cache; /*!< IO_CACHE associated with fd */
};
/* Table descriptor for the index rebuild operation */
struct index_rebuild_table_t {
char* name; /* table name */
ulint space_id; /* space ID */
UT_LIST_NODE_T(index_rebuild_table_t) list; /* list node */
};
/* Thread descriptor for the index rebuild operation */
struct index_rebuild_thread_t {
ulint num; /* thread number */
pthread_t id; /* thread ID */
};
/* Empty page use to replace skipped pages in the data files */
static byte empty_page[UNIV_PAGE_SIZE_MAX];
static const char compacted_page_magic[] = "COMPACTP";
static const size_t compacted_page_magic_size =
sizeof(compacted_page_magic) - 1;
static const ulint compacted_page_magic_offset = FIL_PAGE_DATA;
/* Mutex protecting table_list */
static pthread_mutex_t table_list_mutex;
/* List of tablespaces to process by the index rebuild operation */
static UT_LIST_BASE_NODE_T(index_rebuild_table_t) table_list;
/************************************************************************
Compact page filter. */
static my_bool wf_compact_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor);
static my_bool wf_compact_process(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile);
static my_bool wf_compact_finalize(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile);
xb_write_filt_t wf_compact = {
&wf_compact_init,
&wf_compact_process,
&wf_compact_finalize,
NULL
};
/************************************************************************
Initialize the compact page filter.
@return TRUE on success, FALSE on error. */
static my_bool
wf_compact_init(xb_write_filt_ctxt_t *ctxt,
char *dst_name __attribute__((unused)), xb_fil_cur_t *cursor)
{
xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
char page_map_name[FN_REFLEN];
MY_STAT mystat;
ctxt->cursor = cursor;
cp->clustered_index_found = FALSE;
cp->inside_skipped_range = FALSE;
cp->free_limit = 0;
/* Don't compact the system table space */
cp->skip = cursor->is_system;
if (cp->skip) {
return(TRUE);
}
snprintf(page_map_name, sizeof(page_map_name), "%s%s", dst_name,
XB_PAGE_MAP_SUFFIX);
cp->ds_buffer = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
if (cp->ds_buffer == NULL) {
return(FALSE);
}
ds_set_pipe(cp->ds_buffer, ds_meta);
memset(&mystat, 0, sizeof(mystat));
mystat.st_mtime = my_time(0);
cp->buffer = ds_open(cp->ds_buffer, page_map_name, &mystat);
if (cp->buffer == NULL) {
msg("xtrabackup: Error: cannot open output stream for %s\n",
page_map_name);
return(FALSE);
}
return(TRUE);
}
/************************************************************************
Check if the specified page should be skipped. We currently skip all
non-clustered index pages for compact backups.
@return TRUE if the page should be skipped. */
static my_bool
check_if_skip_page(xb_wf_compact_ctxt_t *cp, xb_fil_cur_t *cursor, ulint offset)
{
byte *page;
ulint page_no;
ulint page_type;
index_id_t index_id;
xb_ad(cursor->is_system == FALSE);
page = cursor->buf + cursor->page_size * offset;
page_no = cursor->buf_page_no + offset;
page_type = fil_page_get_type(page);
if (UNIV_UNLIKELY(page_no == 0)) {
cp->free_limit = mach_read_from_4(page + FSP_HEADER_OFFSET +
FSP_FREE_LIMIT);
} else if (UNIV_UNLIKELY(page_no == XB_FIRST_CLUSTERED_INDEX_PAGE_NO)) {
xb_ad(cp->clustered_index_found == FALSE);
if (page_type != FIL_PAGE_INDEX) {
/* Uninitialized clustered index root page, there's
nothing we can do to compact the space.*/
msg("[%02u] Uninitialized page type value (%lu) in the "
"clustered index root page of tablespace %s. "
"Will not be compacted.\n",
cursor->thread_n,
page_type, cursor->rel_path);
cp->skip = TRUE;
return(FALSE);
}
cp->clustered_index =
mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID);
cp->clustered_index_found = TRUE;
} else if (UNIV_UNLIKELY(page_no >= cp->free_limit)) {
/* Skip unused pages above free limit, if that value is set in
the FSP header.*/
return(cp->free_limit > 0);
} else if (cp->clustered_index_found && page_type == FIL_PAGE_INDEX) {
index_id = mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID);
if (index_id != cp->clustered_index) {
ulint fseg_hdr_space =
mach_read_from_4(page + PAGE_HEADER +
PAGE_BTR_SEG_TOP);
ulint fseg_hdr_page_no =
mach_read_from_4(page + PAGE_HEADER +
PAGE_BTR_SEG_TOP + 4);
ulint fseg_hdr_offset =
mach_read_from_2(page + PAGE_HEADER +
PAGE_BTR_SEG_TOP + 8);
/* Don't skip root index pages, i.e. the ones where the
above fields are defined. We need root index pages to be
able to correctly drop the indexes later, as they
contain fseg inode pointers. */
return(fseg_hdr_space == 0 &&
fseg_hdr_page_no == 0 &&
fseg_hdr_offset == 0);
}
}
return(FALSE);
}
/************************************************************************
Run the next batch of pages through the compact page filter.
@return TRUE on success, FALSE on error. */
static my_bool
wf_compact_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
{
xb_fil_cur_t *cursor = ctxt->cursor;
ulint page_size = cursor->page_size;
byte *page;
byte *buf_end;
byte *write_from;
xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
ulint i;
ulint page_no;
byte tmp[4];
if (cp->skip) {
return(!ds_write(dstfile, cursor->buf, cursor->buf_read));
}
write_from = NULL;
buf_end = cursor->buf + cursor->buf_read;
for (i = 0, page = cursor->buf; page < buf_end;
i++, page += page_size) {
page_no = cursor->buf_page_no + i;
if (!check_if_skip_page(cp, cursor, i)) {
if (write_from == NULL) {
write_from = page;
}
if (cp->inside_skipped_range) {
cp->inside_skipped_range = FALSE;
/* Write the last range endpoint to the
skipped pages map */
xb_ad(page_no > 0);
mach_write_to_4(tmp, page_no - 1);
if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
return(FALSE);
}
}
continue;
}
if (write_from != NULL) {
/* The first skipped page in this block, write the
non-skipped ones to the data file */
if (ds_write(dstfile, write_from, page - write_from)) {
return(FALSE);
}
write_from = NULL;
}
if (!cp->inside_skipped_range) {
/* The first skipped page in range, write the first
range endpoint to the skipped pages map */
cp->inside_skipped_range = TRUE;
mach_write_to_4(tmp, page_no);
if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
return(FALSE);
}
}
}
/* Write the remaining pages in the buffer, if any */
if (write_from != NULL &&
ds_write(dstfile, write_from, buf_end - write_from)) {
return(FALSE);
}
return(TRUE);
}
/************************************************************************
Close the compact filter's page map stream.
@return TRUE on success, FALSE on error. */
static my_bool
wf_compact_finalize(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile __attribute__((unused)))
{
xb_fil_cur_t *cursor = ctxt->cursor;
xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
my_bool rc = TRUE;
/* Write the last endpoint of the current range, if the last pages of
the space have been skipped. */
if (cp->inside_skipped_range) {
byte tmp[4];
mach_write_to_4(tmp, cursor->space_size - 1);
if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
return(FALSE);
}
cp->inside_skipped_range = FALSE;
}
if (cp->buffer) {
if (ds_close(cp->buffer)) {
rc = FALSE;
}
}
if (cp->ds_buffer) {
ds_destroy(cp->ds_buffer);
}
return(rc);
}
/************************************************************************
Open a page map file and return a cursor.
@return page map cursor, or NULL if the file doesn't exist. */
static page_map_cursor_t *
page_map_file_open(const char *path)
{
MY_STAT statinfo;
page_map_cursor_t *pmap_cur;
int rc;
if (my_stat(path, &statinfo, MYF(0)) == NULL) {
return(NULL);
}
/* The maximum possible page map file corresponds to a 64 TB tablespace
and the worst case when every other page was skipped. That is, 2^32/2
page ranges = 16 GB. */
xb_a(statinfo.st_size < (off_t) 16 * 1024 * 1024 * 1024);
/* Must be a series of 8-byte tuples */
xb_a(statinfo.st_size % 8 == 0);
pmap_cur = (page_map_cursor_t *) my_malloc(sizeof(page_map_cursor_t),
MYF(MY_FAE));
pmap_cur->fd = my_open(path, O_RDONLY, MYF(MY_WME));
xb_a(pmap_cur->fd != 0);
rc = init_io_cache(&pmap_cur->cache, pmap_cur->fd, 0, READ_CACHE,
0, 0, MYF(MY_WME));
xb_a(rc == 0);
return(pmap_cur);
}
/************************************************************************
Read the next range from a page map file and update the cursor.
@return TRUE on success, FALSE on end-of-file. */
static ibool
page_map_file_next(page_map_cursor_t *pmap_cur, page_range_t *range)
{
byte buf[8];
xb_ad(pmap_cur != NULL);
if (my_b_read(&pmap_cur->cache, buf, sizeof(buf))) {
return(FALSE);
}
range->from = mach_read_from_4(buf);
range->to = mach_read_from_4(buf + 4);
return(TRUE);
}
/************************************************************************
Close the page map cursor.*/
static void
page_map_file_close(page_map_cursor_t *pmap_cur)
{
int rc;
xb_ad(pmap_cur != NULL);
rc = end_io_cache(&pmap_cur->cache);
xb_a(rc == 0);
posix_fadvise(pmap_cur->fd, 0, 0, POSIX_FADV_DONTNEED);
rc = my_close(pmap_cur->fd, MY_WME);
xb_a(rc == 0);
my_free(pmap_cur);
}
/****************************************************************************
Expand a single data file according to the skipped pages maps created by
--compact.
@return TRUE on success, FALSE on failure. */
static my_bool
xb_expand_file(fil_node_t *node)
{
char pmapfile_path[FN_REFLEN];
char tmpfile_path[FN_REFLEN];
xb_fil_cur_t cursor;
xb_fil_cur_result_t res;
ds_ctxt_t *ds_local;
ds_ctxt_t *ds_buffer;
ds_file_t *tmpfile;
my_bool success = FALSE;
ulint i;
byte *page;
ulint page_expected_no;
page_map_cursor_t *pmap_cur;
ibool have_next_range;
page_range_t pmap_range;
xb_ad(trx_sys_sys_space(node->space->id) == FALSE);
snprintf(pmapfile_path, sizeof(pmapfile_path), "%s%s",
node->name, XB_PAGE_MAP_SUFFIX);
/* Skip files that don't have a corresponding page map file */
if (!(pmap_cur = page_map_file_open(pmapfile_path))) {
msg("Not expanding %s\n", node->name);
return(FALSE);
}
msg("Expanding %s\n", node->name);
ds_local = ds_create(".", DS_TYPE_LOCAL);
ds_buffer = ds_create(".", DS_TYPE_BUFFER);
xb_a(ds_local != NULL && ds_buffer != NULL);
ds_buffer_set_size(ds_buffer, FSP_EXTENT_SIZE * UNIV_PAGE_SIZE_MAX);
ds_set_pipe(ds_buffer, ds_local);
res = xb_fil_cur_open(&cursor, &rf_pass_through, node, 1);
xb_a(res == XB_FIL_CUR_SUCCESS);
snprintf(tmpfile_path, sizeof(tmpfile_path), "%s%s",
node->name, XB_TMPFILE_SUFFIX);
tmpfile = ds_open(ds_buffer, tmpfile_path, &cursor.statinfo);
if (tmpfile == NULL) {
msg("Could not open temporary file '%s'\n", tmpfile_path);
goto error;
}
have_next_range = page_map_file_next(pmap_cur, &pmap_range);
page_expected_no = 0;
/* Initialize and mark the empty page which is used to replace
skipped pages. */
memset(empty_page, 0, cursor.page_size);
memcpy(empty_page + compacted_page_magic_offset,
compacted_page_magic, compacted_page_magic_size);
mach_write_to_4(empty_page + FIL_PAGE_SPACE_OR_CHKSUM,
BUF_NO_CHECKSUM_MAGIC);
mach_write_to_4(empty_page + cursor.page_size -
FIL_PAGE_END_LSN_OLD_CHKSUM,
BUF_NO_CHECKSUM_MAGIC);
/* Main copy loop */
while ((res = xb_fil_cur_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
for (i = 0, page = cursor.buf; i < cursor.buf_npages;
i++, page += cursor.page_size) {
ulint page_read_no;
page_read_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
xb_a(!page_read_no || page_expected_no <= page_read_no);
if (have_next_range &&
page_expected_no == pmap_range.from) {
xb_a(pmap_range.from <= pmap_range.to);
/* Write empty pages instead of skipped ones, if
necessary. */
while (page_expected_no <= pmap_range.to) {
if (ds_write(tmpfile, empty_page,
cursor.page_size)) {
goto write_error;
}
page_expected_no++;
}
have_next_range =
page_map_file_next(pmap_cur,
&pmap_range);
}
/* Write the current page */
if (ds_write(tmpfile, page, cursor.page_size)) {
goto write_error;
}
page_expected_no++;
}
}
if (res != XB_FIL_CUR_EOF) {
goto error;
}
/* Write empty pages instead of trailing skipped ones, if any */
if (have_next_range) {
xb_a(page_expected_no == pmap_range.from);
xb_a(pmap_range.from <= pmap_range.to);
while (page_expected_no <= pmap_range.to) {
if (ds_write(tmpfile, empty_page,
cursor.page_size)) {
goto write_error;
}
page_expected_no++;
}
xb_a(!page_map_file_next(pmap_cur, &pmap_range));
}
/* Replace the original .ibd file with the expanded file */
if (my_rename(tmpfile_path, node->name, MYF(MY_WME))) {
msg("Failed to rename '%s' to '%s'\n",
tmpfile_path, node->name);
goto error;
}
my_delete(pmapfile_path, MYF(MY_WME));
if (!ds_close(tmpfile)) {
success = TRUE;
}
tmpfile = NULL;
goto end;
write_error:
msg("Write to '%s' failed\n", tmpfile_path);
error:
if (tmpfile != NULL) {
ds_close(tmpfile);
my_delete(tmpfile_path, MYF(MY_WME));
}
end:
ds_destroy(ds_buffer);
ds_destroy(ds_local);
xb_fil_cur_close(&cursor);
page_map_file_close(pmap_cur);
return(success);
}
/******************************************************************************
Expand the data files according to the skipped pages maps created by --compact.
@return TRUE on success, FALSE on failure. */
my_bool
xb_expand_datafiles(void)
/*=====================*/
{
ulint nfiles;
datafiles_iter_t *it = NULL;
fil_node_t *node;
fil_space_t *space;
msg("Starting to expand compacted .ibd files.\n");
/* Initialize the tablespace cache */
if (xb_data_files_init() != DB_SUCCESS) {
return(FALSE);
}
nfiles = UT_LIST_GET_LEN(fil_system->space_list);
xb_a(nfiles > 0);
it = datafiles_iter_new(fil_system);
if (it == NULL) {
msg("xtrabackup: error: datafiles_iter_new() failed.\n");
goto error;
}
while ((node = datafiles_iter_next(it)) != NULL) {
space = node->space;
/* System tablespace cannot be compacted */
if (!fil_is_user_tablespace_id(space->id)) {
continue;
}
if (!xb_expand_file(node)) {
goto error;
}
}
datafiles_iter_free(it);
xb_data_files_close();
return(TRUE);
error:
if (it != NULL) {
datafiles_iter_free(it);
}
xb_data_files_close();
return(FALSE);
}
/******************************************************************************
Callback used in buf_page_io_complete() to detect compacted pages.
@return TRUE if the page is marked as compacted, FALSE otherwise. */
ibool
buf_page_is_compacted(
/*==================*/
const byte* page) /*!< in: a database page */
{
return !memcmp(page + compacted_page_magic_offset,
compacted_page_magic, compacted_page_magic_size);
}
/*****************************************************************************
Builds an index definition corresponding to an index object. It is roughly
similar to innobase_create_index_def() / innobase_create_index_field_def() and
the opposite to dict_mem_index_create() / dict_mem_index_add_field(). */
static
void
xb_build_index_def(
/*=======================*/
mem_heap_t* heap, /*!< in: heap */
const dict_index_t* index, /*!< in: index */
index_def_t* index_def) /*!< out: index definition */
{
index_field_t* fields;
ulint n_fields;
ulint i;
ut_a(index->n_fields);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
/* Use n_user_defined_cols instead of n_fields, as the index will
contain a part of the primary key after n_user_defined_cols, and those
columns will be created automatically in
dict_index_build_internal_clust(). */
n_fields = index->n_user_defined_cols;
memset(index_def, 0, sizeof(*index_def));
index_def->name = mem_heap_strdup(heap, index->name);
index_def->ind_type = index->type;
fields = static_cast<index_field_t *>
(mem_heap_alloc(heap, n_fields * sizeof(*fields)));
for (i = 0; i < n_fields; i++) {
dict_field_t* field;
field = dict_index_get_nth_field(index, i);
fields[i].col_no = dict_col_get_no(field->col);
fields[i].prefix_len = field->prefix_len;
}
index_def->fields = fields;
index_def->n_fields = n_fields;
}
/* A dummy autoc_inc sequence for row_merge_build_indexes(). */
static ib_sequence_t null_seq(NULL, 0, 0);
/* A dummy table share and table for row_merge_build_indexes() error reporting.
Assumes that no errors are going to be reported. */
static struct TABLE_SHARE dummy_table_share;
static struct TABLE dummy_table;
/********************************************************************//**
Rebuild secondary indexes for a given table. */
static
void
xb_rebuild_indexes_for_table(
/*=========================*/
dict_table_t* table, /*!< in: table */
trx_t* trx, /*!< in: transaction handle */
ulint thread_n) /*!< in: thread number */
{
dict_index_t* index;
dict_index_t** indexes;
ulint n_indexes;
index_def_t* index_defs;
ulint i;
mem_heap_t* heap;
ulint error;
ulint* add_key_nums;
ut_ad(!mutex_own(&(dict_sys->mutex)));
ut_ad(table);
ut_a(UT_LIST_GET_LEN(table->indexes) > 0);
n_indexes = UT_LIST_GET_LEN(table->indexes) - 1;
if (!n_indexes) {
/* Only the primary key, nothing to do. */
return;
}
heap = mem_heap_create(1024);
indexes = (dict_index_t**) mem_heap_alloc(heap,
n_indexes * sizeof(*indexes));
index_defs = (index_def_t*) mem_heap_alloc(heap, n_indexes *
sizeof(*index_defs));
add_key_nums = static_cast<ulint *>
(mem_heap_alloc(heap, n_indexes * sizeof(*add_key_nums)));
/* Skip the primary key. */
index = dict_table_get_first_index(table);
ut_a(dict_index_is_clust(index));
row_mysql_lock_data_dictionary(trx);
for (i = 0; (index = dict_table_get_next_index(index)); i++) {
msg("[%02lu] Found index %s\n", thread_n, index->name);
/* Pretend that it's the current trx that created this index.
Required to avoid 5.6+ debug assertions. */
index->trx_id = trx->id;
xb_build_index_def(heap, index, &index_defs[i]);
/* In 5.6+, row_merge_drop_indexes() drops all the indexes on
the table that have the temp index prefix. It does not accept
an array of indexes to drop as in 5.5-. */
row_merge_rename_index_to_drop(trx, table->id, index->id);
}
ut_ad(i == n_indexes);
row_merge_drop_indexes(trx, table, TRUE);
index = dict_table_get_first_index(table);
ut_a(dict_index_is_clust(index));
index = dict_table_get_next_index(index);
while (index) {
/* In 5.6+, row_merge_drop_indexes() does not remove the
indexes from the dictionary cache nor from any foreign key
list. This may cause invalid dereferences as we try to access
the dropped indexes from other tables as FKs. */
dict_index_t* next_index = dict_table_get_next_index(index);
index->to_be_dropped = 1;
/* Patch up any FK referencing this index with NULL */
dict_foreign_replace_index(table, NULL, index);
dict_index_remove_from_cache(table, index);
index = next_index;
}
msg("[%02lu] Rebuilding %lu index(es).\n", thread_n, n_indexes);
error = row_merge_lock_table(trx, table, LOCK_X);
xb_a(error == DB_SUCCESS);
for (i = 0; i < n_indexes; i++) {
indexes[i] = row_merge_create_index(trx, table,
&index_defs[i]);
add_key_nums[i] = index_defs[i].key_number;
}
/* Commit trx to release latches on system tables */
trx_commit_for_mysql(trx);
trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
row_mysql_unlock_data_dictionary(trx);
/* Reacquire table lock for row_merge_build_indexes() */
error = row_merge_lock_table(trx, table, LOCK_X);
xb_a(error == DB_SUCCESS);
error = row_merge_build_indexes(trx, table, table, FALSE, indexes,
add_key_nums, n_indexes, &dummy_table,
NULL, NULL, ULINT_UNDEFINED, null_seq);
ut_a(error == DB_SUCCESS);
mem_heap_free(heap);
trx_commit_for_mysql(trx);
trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
}
/**************************************************************************
Worker thread function for index rebuild. */
static
void *
xb_rebuild_indexes_thread_func(
/*===========================*/
void* arg) /* thread context */
{
dict_table_t* table;
index_rebuild_table_t* rebuild_table;
index_rebuild_thread_t* thread;
trx_t* trx;
thread = (index_rebuild_thread_t *) arg;
trx = trx_allocate_for_mysql();
/* Suppress foreign key checks, as we are going to drop and recreate all
secondary keys. */
trx->check_foreigns = FALSE;
trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
/* Loop until there are no more tables in tables list */
for (;;) {
pthread_mutex_lock(&table_list_mutex);
rebuild_table = UT_LIST_GET_FIRST(table_list);
if (rebuild_table == NULL) {
pthread_mutex_unlock(&table_list_mutex);
break;
}
UT_LIST_REMOVE(list, table_list, rebuild_table);
pthread_mutex_unlock(&table_list_mutex);
ut_ad(rebuild_table->name);
ut_ad(fil_is_user_tablespace_id(rebuild_table->space_id));
row_mysql_lock_data_dictionary(trx);
table = dict_table_get_low(rebuild_table->name);
ut_d(table->n_ref_count++);
row_mysql_unlock_data_dictionary(trx);
ut_a(table != NULL);
ut_a(table->space == rebuild_table->space_id);
/* Discard change buffer entries for this space */
ibuf_delete_for_discarded_space(rebuild_table->space_id);
msg("[%02lu] Checking if there are indexes to rebuild in table "
"%s (space id: %lu)\n",
thread->num,
rebuild_table->name, rebuild_table->space_id);
xb_rebuild_indexes_for_table(table, trx, thread->num);
ut_d(table->n_ref_count--);
mem_free(rebuild_table->name);
mem_free(rebuild_table);
}
trx_commit_for_mysql(trx);
trx_free_for_mysql(trx);
return(NULL);
}
/******************************************************************************
Rebuild all secondary indexes in all tables in separate spaces. Called from
innobase_start_or_create_for_mysql(). */
void
xb_compact_rebuild_indexes(void)
/*=============================*/
{
dict_table_t* sys_tables;
dict_index_t* sys_index;
btr_pcur_t pcur;
const rec_t* rec;
mtr_t mtr;
const byte* field;
ulint len;
ulint space_id;
trx_t* trx;
index_rebuild_table_t* rebuild_table;
index_rebuild_thread_t* threads;
ulint i;
/* Set up the dummy table for the index rebuild error reporting */
dummy_table_share.fields = 0;
dummy_table.s = &dummy_table_share;
/* Iterate all tables that are not in the system tablespace and add them
to the list of tables to be rebuilt later. */
trx = trx_allocate_for_mysql();
trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
row_mysql_lock_data_dictionary(trx);
/* Enlarge the fatal lock wait timeout during index rebuild
operation. */
os_increment_counter_by_amount(server_mutex,
srv_fatal_semaphore_wait_threshold,
7200);
mtr_start(&mtr);
sys_tables = dict_table_get_low("SYS_TABLES");
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
ut_a(!dict_table_is_comp(sys_tables));
pthread_mutex_init(&table_list_mutex, NULL);
UT_LIST_INIT(table_list);
btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
TRUE, 0, &mtr);
for (;;) {
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
rec = btr_pcur_get_rec(&pcur);
if (!btr_pcur_is_on_user_rec(&pcur)) {
/* end of index */
break;
}
if (rec_get_deleted_flag(rec, 0)) {
continue;
}
field = rec_get_nth_field_old(rec, 9, &len);
ut_a(len == 4);
space_id = mach_read_from_4(field);
/* Don't touch tables in the system tablespace */
if (!fil_is_user_tablespace_id(space_id)) {
continue;
}
field = rec_get_nth_field_old(rec, 0, &len);
rebuild_table = static_cast<index_rebuild_table_t *>
(mem_alloc(sizeof(*rebuild_table)));
rebuild_table->name = mem_strdupl((char*) field, len);
rebuild_table->space_id = space_id;
UT_LIST_ADD_LAST(list, table_list, rebuild_table);
}
btr_pcur_close(&pcur);
mtr_commit(&mtr);
row_mysql_unlock_data_dictionary(trx);
trx_commit_for_mysql(trx);
trx_free_for_mysql(trx);
/* Start worker threads for the index rebuild operation */
ut_ad(xtrabackup_rebuild_threads > 0);
if (xtrabackup_rebuild_threads > 1) {
msg("Starting %lu threads to rebuild indexes.\n",
xtrabackup_rebuild_threads);
}
threads = (index_rebuild_thread_t *)
mem_alloc(sizeof(*threads) *
xtrabackup_rebuild_threads);
for (i = 0; i < xtrabackup_rebuild_threads; i++) {
threads[i].num = i+1;
if (pthread_create(&threads[i].id, NULL,
xb_rebuild_indexes_thread_func,
&threads[i])) {
msg("error: pthread_create() failed: errno = %d\n",
errno);
ut_a(0);
}
}
/* Wait for worker threads to finish */
for (i = 0; i < xtrabackup_rebuild_threads; i++) {
pthread_join(threads[i].id, NULL);
}
mem_free(threads);
}
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2013 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XB_COMPACT_H
#define XB_COMPACT_H
#include "write_filt.h"
/* Compact page filter context */
typedef struct {
my_bool skip;
ds_ctxt_t *ds_buffer;
ds_file_t *buffer;
index_id_t clustered_index;
my_bool clustered_index_found;
my_bool inside_skipped_range;
ulint free_limit;
} xb_wf_compact_ctxt_t;
/******************************************************************************
Expand the data files according to the skipped pages maps created by --compact.
@return TRUE on success, FALSE on failure. */
my_bool xb_expand_datafiles(void);
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Data sink interface.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include "common.h"
#include "datasink.h"
#include "ds_compress.h"
#include "ds_archive.h"
#include "ds_xbstream.h"
#include "ds_local.h"
#include "ds_stdout.h"
#include "ds_tmpfile.h"
#include "ds_encrypt.h"
#include "ds_buffer.h"
/************************************************************************
Create a datasink of the specified type */
ds_ctxt_t *
ds_create(const char *root, ds_type_t type)
{
datasink_t *ds;
ds_ctxt_t *ctxt;
switch (type) {
case DS_TYPE_STDOUT:
ds = &datasink_stdout;
break;
case DS_TYPE_LOCAL:
ds = &datasink_local;
break;
case DS_TYPE_ARCHIVE:
ds = &datasink_archive;
break;
case DS_TYPE_XBSTREAM:
ds = &datasink_xbstream;
break;
case DS_TYPE_COMPRESS:
ds = &datasink_compress;
break;
case DS_TYPE_ENCRYPT:
ds = &datasink_encrypt;
break;
case DS_TYPE_TMPFILE:
ds = &datasink_tmpfile;
break;
case DS_TYPE_BUFFER:
ds = &datasink_buffer;
break;
default:
msg("Unknown datasink type: %d\n", type);
xb_ad(0);
return NULL;
}
ctxt = ds->init(root);
if (ctxt != NULL) {
ctxt->datasink = ds;
} else {
msg("Error: failed to initialize datasink.\n");
exit(EXIT_FAILURE);
}
return ctxt;
}
/************************************************************************
Open a datasink file */
ds_file_t *
ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat)
{
ds_file_t *file;
file = ctxt->datasink->open(ctxt, path, stat);
if (file != NULL) {
file->datasink = ctxt->datasink;
}
return file;
}
/************************************************************************
Write to a datasink file.
@return 0 on success, 1 on error. */
int
ds_write(ds_file_t *file, const void *buf, size_t len)
{
return file->datasink->write(file, buf, len);
}
/************************************************************************
Close a datasink file.
@return 0 on success, 1, on error. */
int
ds_close(ds_file_t *file)
{
return file->datasink->close(file);
}
/************************************************************************
Destroy a datasink handle */
void
ds_destroy(ds_ctxt_t *ctxt)
{
ctxt->datasink->deinit(ctxt);
}
/************************************************************************
Set the destination pipe for a datasink (only makes sense for compress and
tmpfile). */
void ds_set_pipe(ds_ctxt_t *ctxt, ds_ctxt_t *pipe_ctxt)
{
ctxt->pipe_ctxt = pipe_ctxt;
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Data sink interface.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XB_DATASINK_H
#define XB_DATASINK_H
#include <my_global.h>
#include <my_dir.h>
#ifdef __cplusplus
extern "C" {
#endif
struct datasink_struct;
typedef struct datasink_struct datasink_t;
typedef struct ds_ctxt {
datasink_t *datasink;
char *root;
void *ptr;
struct ds_ctxt *pipe_ctxt;
} ds_ctxt_t;
typedef struct {
void *ptr;
char *path;
datasink_t *datasink;
} ds_file_t;
struct datasink_struct {
ds_ctxt_t *(*init)(const char *root);
ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
int (*write)(ds_file_t *file, const void *buf, size_t len);
int (*close)(ds_file_t *file);
void (*deinit)(ds_ctxt_t *ctxt);
};
/* Supported datasink types */
typedef enum {
DS_TYPE_STDOUT,
DS_TYPE_LOCAL,
DS_TYPE_ARCHIVE,
DS_TYPE_XBSTREAM,
DS_TYPE_COMPRESS,
DS_TYPE_ENCRYPT,
DS_TYPE_TMPFILE,
DS_TYPE_BUFFER
} ds_type_t;
/************************************************************************
Create a datasink of the specified type */
ds_ctxt_t *ds_create(const char *root, ds_type_t type);
/************************************************************************
Open a datasink file */
ds_file_t *ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
/************************************************************************
Write to a datasink file.
@return 0 on success, 1 on error. */
int ds_write(ds_file_t *file, const void *buf, size_t len);
/************************************************************************
Close a datasink file.
@return 0 on success, 1, on error. */
int ds_close(ds_file_t *file);
/************************************************************************
Destroy a datasink handle */
void ds_destroy(ds_ctxt_t *ctxt);
/************************************************************************
Set the destination pipe for a datasink (only makes sense for compress and
tmpfile). */
void ds_set_pipe(ds_ctxt_t *ctxt, ds_ctxt_t *pipe_ctxt);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* XB_DATASINK_H */
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Streaming implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include <archive.h>
#include <archive_entry.h>
#include "common.h"
#include "datasink.h"
typedef struct {
struct archive *archive;
ds_file_t *dest_file;
pthread_mutex_t mutex;
} ds_archive_ctxt_t;
typedef struct {
struct archive_entry *entry;
ds_archive_ctxt_t *archive_ctxt;
} ds_archive_file_t;
/***********************************************************************
General archive interface */
static ds_ctxt_t *archive_init(const char *root);
static ds_file_t *archive_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int archive_write(ds_file_t *file, const void *buf, size_t len);
static int archive_close(ds_file_t *file);
static void archive_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_archive = {
&archive_init,
&archive_open,
&archive_write,
&archive_close,
&archive_deinit
};
static
int
my_archive_open_callback(struct archive *a __attribute__((unused)),
void *data __attribute__((unused)))
{
return ARCHIVE_OK;
}
static
ssize_t
my_archive_write_callback(struct archive *a __attribute__((unused)),
void *data, const void *buffer, size_t length)
{
ds_archive_ctxt_t *archive_ctxt;
archive_ctxt = (ds_archive_ctxt_t *) data;
xb_ad(archive_ctxt != NULL);
xb_ad(archive_ctxt->dest_file != NULL);
if (!ds_write(archive_ctxt->dest_file, buffer, length)) {
return length;
}
return -1;
}
static
int
my_archive_close_callback(struct archive *a __attribute__((unused)),
void *data __attribute__((unused)))
{
return ARCHIVE_OK;
}
static
ds_ctxt_t *
archive_init(const char *root __attribute__((unused)))
{
ds_ctxt_t *ctxt;
ds_archive_ctxt_t *archive_ctxt;
struct archive *a;
ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_archive_ctxt_t),
MYF(MY_FAE));
archive_ctxt = (ds_archive_ctxt_t *)(ctxt + 1);
if (pthread_mutex_init(&archive_ctxt->mutex, NULL)) {
msg("archive_init: pthread_mutex_init() failed.\n");
goto err;
}
a = archive_write_new();
if (a == NULL) {
msg("archive_write_new() failed.\n");
goto err;
}
archive_ctxt->archive = a;
archive_ctxt->dest_file = NULL;
if (archive_write_set_compression_none(a) != ARCHIVE_OK ||
archive_write_set_format_pax_restricted(a) != ARCHIVE_OK ||
/* disable internal buffering so we don't have to flush the
output in xtrabackup */
archive_write_set_bytes_per_block(a, 0) != ARCHIVE_OK) {
msg("failed to set libarchive archive options: %s\n",
archive_error_string(a));
archive_write_finish(a);
goto err;
}
if (archive_write_open(a, archive_ctxt, my_archive_open_callback,
my_archive_write_callback,
my_archive_close_callback) != ARCHIVE_OK) {
msg("cannot open output archive.\n");
return NULL;
}
ctxt->ptr = archive_ctxt;
return ctxt;
err:
my_free(ctxt);
return NULL;
}
static
ds_file_t *
archive_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
{
ds_archive_ctxt_t *archive_ctxt;
ds_ctxt_t *dest_ctxt;
ds_file_t *file;
ds_archive_file_t *archive_file;
struct archive *a;
struct archive_entry *entry;
xb_ad(ctxt->pipe_ctxt != NULL);
dest_ctxt = ctxt->pipe_ctxt;
archive_ctxt = (ds_archive_ctxt_t *) ctxt->ptr;
pthread_mutex_lock(&archive_ctxt->mutex);
if (archive_ctxt->dest_file == NULL) {
archive_ctxt->dest_file = ds_open(dest_ctxt, path, mystat);
if (archive_ctxt->dest_file == NULL) {
return NULL;
}
}
pthread_mutex_unlock(&archive_ctxt->mutex);
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_archive_file_t),
MYF(MY_FAE));
archive_file = (ds_archive_file_t *) (file + 1);
a = archive_ctxt->archive;
entry = archive_entry_new();
if (entry == NULL) {
msg("archive_entry_new() failed.\n");
goto err;
}
archive_entry_set_size(entry, mystat->st_size);
archive_entry_set_mode(entry, 0660);
archive_entry_set_filetype(entry, AE_IFREG);
archive_entry_set_pathname(entry, path);
archive_entry_set_mtime(entry, mystat->st_mtime, 0);
archive_file->entry = entry;
archive_file->archive_ctxt = archive_ctxt;
if (archive_write_header(a, entry) != ARCHIVE_OK) {
msg("archive_write_header() failed.\n");
archive_entry_free(entry);
goto err;
}
file->ptr = archive_file;
file->path = archive_ctxt->dest_file->path;
return file;
err:
if (archive_ctxt->dest_file) {
ds_close(archive_ctxt->dest_file);
archive_ctxt->dest_file = NULL;
}
my_free(file);
return NULL;
}
static
int
archive_write(ds_file_t *file, const void *buf, size_t len)
{
ds_archive_file_t *archive_file;
struct archive *a;
archive_file = (ds_archive_file_t *) file->ptr;
a = archive_file->archive_ctxt->archive;
xb_ad(archive_file->archive_ctxt->dest_file != NULL);
if (archive_write_data(a, buf, len) < 0) {
msg("archive_write_data() failed: %s (errno = %d)\n",
archive_error_string(a), archive_errno(a));
return 1;
}
return 0;
}
static
int
archive_close(ds_file_t *file)
{
ds_archive_file_t *archive_file;
int rc = 0;
archive_file = (ds_archive_file_t *)file->ptr;
archive_entry_free(archive_file->entry);
my_free(file);
return rc;
}
static
void
archive_deinit(ds_ctxt_t *ctxt)
{
struct archive *a;
ds_archive_ctxt_t *archive_ctxt;
archive_ctxt = (ds_archive_ctxt_t *) ctxt->ptr;
a = archive_ctxt->archive;
if (archive_write_close(a) != ARCHIVE_OK) {
msg("archive_write_close() failed.\n");
}
archive_write_finish(a);
if (archive_ctxt->dest_file) {
ds_close(archive_ctxt->dest_file);
archive_ctxt->dest_file = NULL;
}
pthread_mutex_destroy(&archive_ctxt->mutex);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Streaming interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_ARCHIVE_H
#define DS_ARCHIVE_H
#include "datasink.h"
extern datasink_t datasink_archive;
#endif
/******************************************************
Copyright (c) 2012-2013 Percona LLC and/or its affiliates.
buffer datasink for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Does buffered output to a destination datasink set with ds_set_pipe().
Writes to the destination datasink are guaranteed to not be smaller than a
specified buffer size (DS_DEFAULT_BUFFER_SIZE by default), with the only
exception for the last write for a file. */
#include <mysql_version.h>
#include <my_base.h>
#include "ds_buffer.h"
#include "common.h"
#include "datasink.h"
#define DS_DEFAULT_BUFFER_SIZE (64 * 1024)
typedef struct {
ds_file_t *dst_file;
char *buf;
size_t pos;
size_t size;
} ds_buffer_file_t;
typedef struct {
size_t buffer_size;
} ds_buffer_ctxt_t;
static ds_ctxt_t *buffer_init(const char *root);
static ds_file_t *buffer_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int buffer_write(ds_file_t *file, const void *buf, size_t len);
static int buffer_close(ds_file_t *file);
static void buffer_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_buffer = {
&buffer_init,
&buffer_open,
&buffer_write,
&buffer_close,
&buffer_deinit
};
/* Change the default buffer size */
void ds_buffer_set_size(ds_ctxt_t *ctxt, size_t size)
{
ds_buffer_ctxt_t *buffer_ctxt = (ds_buffer_ctxt_t *) ctxt->ptr;
buffer_ctxt->buffer_size = size;
}
static ds_ctxt_t *
buffer_init(const char *root)
{
ds_ctxt_t *ctxt;
ds_buffer_ctxt_t *buffer_ctxt;
ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_buffer_ctxt_t),
MYF(MY_FAE));
buffer_ctxt = (ds_buffer_ctxt_t *) (ctxt + 1);
buffer_ctxt->buffer_size = DS_DEFAULT_BUFFER_SIZE;
ctxt->ptr = buffer_ctxt;
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static ds_file_t *
buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
{
ds_buffer_ctxt_t *buffer_ctxt;
ds_ctxt_t *pipe_ctxt;
ds_file_t *dst_file;
ds_file_t *file;
ds_buffer_file_t *buffer_file;
pipe_ctxt = ctxt->pipe_ctxt;
xb_a(pipe_ctxt != NULL);
dst_file = ds_open(pipe_ctxt, path, mystat);
if (dst_file == NULL) {
exit(EXIT_FAILURE);
}
buffer_ctxt = (ds_buffer_ctxt_t *) ctxt->ptr;
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_buffer_file_t) +
buffer_ctxt->buffer_size,
MYF(MY_FAE));
buffer_file = (ds_buffer_file_t *) (file + 1);
buffer_file->dst_file = dst_file;
buffer_file->buf = (char *) (buffer_file + 1);
buffer_file->size = buffer_ctxt->buffer_size;
buffer_file->pos = 0;
file->path = dst_file->path;
file->ptr = buffer_file;
return file;
}
static int
buffer_write(ds_file_t *file, const void *buf, size_t len)
{
ds_buffer_file_t *buffer_file;
buffer_file = (ds_buffer_file_t *) file->ptr;
while (len > 0) {
if (buffer_file->pos + len > buffer_file->size) {
if (buffer_file->pos > 0) {
size_t bytes;
bytes = buffer_file->size - buffer_file->pos;
memcpy(buffer_file->buf + buffer_file->pos, buf,
bytes);
if (ds_write(buffer_file->dst_file,
buffer_file->buf,
buffer_file->size)) {
return 1;
}
buffer_file->pos = 0;
buf = (const char *) buf + bytes;
len -= bytes;
} else {
/* We don't have any buffered bytes, just write
the entire source buffer */
if (ds_write(buffer_file->dst_file, buf, len)) {
return 1;
}
break;
}
} else {
memcpy(buffer_file->buf + buffer_file->pos, buf, len);
buffer_file->pos += len;
break;
}
}
return 0;
}
static int
buffer_close(ds_file_t *file)
{
ds_buffer_file_t *buffer_file;
int ret;
buffer_file = (ds_buffer_file_t *) file->ptr;
if (buffer_file->pos > 0) {
ds_write(buffer_file->dst_file, buffer_file->buf,
buffer_file->pos);
}
ret = ds_close(buffer_file->dst_file);
my_free(file);
return ret;
}
static void
buffer_deinit(ds_ctxt_t *ctxt)
{
my_free(ctxt->root);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2012-2013 Percona LLC and/or its affiliates.
buffer datasink for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_BUFFER_H
#define DS_BUFFER_H
#include "datasink.h"
#ifdef __cplusplus
extern "C" {
#endif
extern datasink_t datasink_buffer;
/* Change the default buffer size */
void ds_buffer_set_size(ds_ctxt_t *ctxt, size_t size);
#ifdef __cplusplus
}
#endif
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Compressing datasink implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <quicklz.h>
#include <zlib.h>
#include "common.h"
#include "datasink.h"
#define COMPRESS_CHUNK_SIZE ((size_t) (xtrabackup_compress_chunk_size))
#define MY_QLZ_COMPRESS_OVERHEAD 400
typedef struct {
pthread_t id;
uint num;
pthread_mutex_t ctrl_mutex;
pthread_cond_t ctrl_cond;
pthread_mutex_t data_mutex;
pthread_cond_t data_cond;
my_bool started;
my_bool data_avail;
my_bool cancelled;
const char *from;
size_t from_len;
char *to;
size_t to_len;
qlz_state_compress state;
ulong adler;
} comp_thread_ctxt_t;
typedef struct {
comp_thread_ctxt_t *threads;
uint nthreads;
} ds_compress_ctxt_t;
typedef struct {
ds_file_t *dest_file;
ds_compress_ctxt_t *comp_ctxt;
size_t bytes_processed;
} ds_compress_file_t;
/* Compression options */
extern char *xtrabackup_compress_alg;
extern uint xtrabackup_compress_threads;
extern ulonglong xtrabackup_compress_chunk_size;
static ds_ctxt_t *compress_init(const char *root);
static ds_file_t *compress_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int compress_write(ds_file_t *file, const void *buf, size_t len);
static int compress_close(ds_file_t *file);
static void compress_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_compress = {
&compress_init,
&compress_open,
&compress_write,
&compress_close,
&compress_deinit
};
static inline int write_uint32_le(ds_file_t *file, ulong n);
static inline int write_uint64_le(ds_file_t *file, ulonglong n);
static comp_thread_ctxt_t *create_worker_threads(uint n);
static void destroy_worker_threads(comp_thread_ctxt_t *threads, uint n);
static void *compress_worker_thread_func(void *arg);
static
ds_ctxt_t *
compress_init(const char *root)
{
ds_ctxt_t *ctxt;
ds_compress_ctxt_t *compress_ctxt;
comp_thread_ctxt_t *threads;
/* Create and initialize the worker threads */
threads = create_worker_threads(xtrabackup_compress_threads);
if (threads == NULL) {
msg("compress: failed to create worker threads.\n");
return NULL;
}
ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) +
sizeof(ds_compress_ctxt_t),
MYF(MY_FAE));
compress_ctxt = (ds_compress_ctxt_t *) (ctxt + 1);
compress_ctxt->threads = threads;
compress_ctxt->nthreads = xtrabackup_compress_threads;
ctxt->ptr = compress_ctxt;
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static
ds_file_t *
compress_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
{
ds_compress_ctxt_t *comp_ctxt;
ds_ctxt_t *dest_ctxt;
ds_file_t *dest_file;
char new_name[FN_REFLEN];
size_t name_len;
ds_file_t *file;
ds_compress_file_t *comp_file;
xb_ad(ctxt->pipe_ctxt != NULL);
dest_ctxt = ctxt->pipe_ctxt;
comp_ctxt = (ds_compress_ctxt_t *) ctxt->ptr;
/* Append the .qp extension to the filename */
fn_format(new_name, path, "", ".qp", MYF(MY_APPEND_EXT));
dest_file = ds_open(dest_ctxt, new_name, mystat);
if (dest_file == NULL) {
return NULL;
}
/* Write the qpress archive header */
if (ds_write(dest_file, "qpress10", 8) ||
write_uint64_le(dest_file, COMPRESS_CHUNK_SIZE)) {
goto err;
}
/* We are going to create a one-file "flat" (i.e. with no
subdirectories) archive. So strip the directory part from the path and
remove the '.qp' suffix. */
fn_format(new_name, path, "", "", MYF(MY_REPLACE_DIR));
/* Write the qpress file header */
name_len = strlen(new_name);
if (ds_write(dest_file, "F", 1) ||
write_uint32_le(dest_file, name_len) ||
/* we want to write the terminating \0 as well */
ds_write(dest_file, new_name, name_len + 1)) {
goto err;
}
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_compress_file_t),
MYF(MY_FAE));
comp_file = (ds_compress_file_t *) (file + 1);
comp_file->dest_file = dest_file;
comp_file->comp_ctxt = comp_ctxt;
comp_file->bytes_processed = 0;
file->ptr = comp_file;
file->path = dest_file->path;
return file;
err:
ds_close(dest_file);
return NULL;
}
static
int
compress_write(ds_file_t *file, const void *buf, size_t len)
{
ds_compress_file_t *comp_file;
ds_compress_ctxt_t *comp_ctxt;
comp_thread_ctxt_t *threads;
comp_thread_ctxt_t *thd;
uint nthreads;
uint i;
const char *ptr;
ds_file_t *dest_file;
comp_file = (ds_compress_file_t *) file->ptr;
comp_ctxt = comp_file->comp_ctxt;
dest_file = comp_file->dest_file;
threads = comp_ctxt->threads;
nthreads = comp_ctxt->nthreads;
ptr = (const char *) buf;
while (len > 0) {
uint max_thread;
/* Send data to worker threads for compression */
for (i = 0; i < nthreads; i++) {
size_t chunk_len;
thd = threads + i;
pthread_mutex_lock(&thd->ctrl_mutex);
chunk_len = (len > COMPRESS_CHUNK_SIZE) ?
COMPRESS_CHUNK_SIZE : len;
thd->from = ptr;
thd->from_len = chunk_len;
pthread_mutex_lock(&thd->data_mutex);
thd->data_avail = TRUE;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
len -= chunk_len;
if (len == 0) {
break;
}
ptr += chunk_len;
}
max_thread = (i < nthreads) ? i : nthreads - 1;
/* Reap and stream the compressed data */
for (i = 0; i <= max_thread; i++) {
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
while (thd->data_avail == TRUE) {
pthread_cond_wait(&thd->data_cond,
&thd->data_mutex);
}
xb_a(threads[i].to_len > 0);
if (ds_write(dest_file, "NEWBNEWB", 8) ||
write_uint64_le(dest_file,
comp_file->bytes_processed)) {
msg("compress: write to the destination stream "
"failed.\n");
return 1;
}
comp_file->bytes_processed += threads[i].from_len;
if (write_uint32_le(dest_file, threads[i].adler) ||
ds_write(dest_file, threads[i].to,
threads[i].to_len)) {
msg("compress: write to the destination stream "
"failed.\n");
return 1;
}
pthread_mutex_unlock(&threads[i].data_mutex);
pthread_mutex_unlock(&threads[i].ctrl_mutex);
}
}
return 0;
}
static
int
compress_close(ds_file_t *file)
{
ds_compress_file_t *comp_file;
ds_file_t *dest_file;
int rc;
comp_file = (ds_compress_file_t *) file->ptr;
dest_file = comp_file->dest_file;
/* Write the qpress file trailer */
ds_write(dest_file, "ENDSENDS", 8);
/* Supposedly the number of written bytes should be written as a
"recovery information" in the file trailer, but in reality qpress
always writes 8 zeros here. Let's do the same */
write_uint64_le(dest_file, 0);
rc = ds_close(dest_file);
my_free(file);
return rc;
}
static
void
compress_deinit(ds_ctxt_t *ctxt)
{
ds_compress_ctxt_t *comp_ctxt;
xb_ad(ctxt->pipe_ctxt != NULL);
comp_ctxt = (ds_compress_ctxt_t *) ctxt->ptr;;
destroy_worker_threads(comp_ctxt->threads, comp_ctxt->nthreads);
my_free(ctxt->root);
my_free(ctxt);
}
static inline
int
write_uint32_le(ds_file_t *file, ulong n)
{
char tmp[4];
int4store(tmp, n);
return ds_write(file, tmp, sizeof(tmp));
}
static inline
int
write_uint64_le(ds_file_t *file, ulonglong n)
{
char tmp[8];
int8store(tmp, n);
return ds_write(file, tmp, sizeof(tmp));
}
static
comp_thread_ctxt_t *
create_worker_threads(uint n)
{
comp_thread_ctxt_t *threads;
uint i;
threads = (comp_thread_ctxt_t *)
my_malloc(sizeof(comp_thread_ctxt_t) * n, MYF(MY_FAE));
for (i = 0; i < n; i++) {
comp_thread_ctxt_t *thd = threads + i;
thd->num = i + 1;
thd->started = FALSE;
thd->cancelled = FALSE;
thd->data_avail = FALSE;
thd->to = (char *) my_malloc(COMPRESS_CHUNK_SIZE +
MY_QLZ_COMPRESS_OVERHEAD,
MYF(MY_FAE));
/* Initialize the control mutex and condition var */
if (pthread_mutex_init(&thd->ctrl_mutex, NULL) ||
pthread_cond_init(&thd->ctrl_cond, NULL)) {
goto err;
}
/* Initialize and data mutex and condition var */
if (pthread_mutex_init(&thd->data_mutex, NULL) ||
pthread_cond_init(&thd->data_cond, NULL)) {
goto err;
}
pthread_mutex_lock(&thd->ctrl_mutex);
if (pthread_create(&thd->id, NULL, compress_worker_thread_func,
thd)) {
msg("compress: pthread_create() failed: "
"errno = %d\n", errno);
goto err;
}
}
/* Wait for the threads to start */
for (i = 0; i < n; i++) {
comp_thread_ctxt_t *thd = threads + i;
while (thd->started == FALSE)
pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex);
pthread_mutex_unlock(&thd->ctrl_mutex);
}
return threads;
err:
return NULL;
}
static
void
destroy_worker_threads(comp_thread_ctxt_t *threads, uint n)
{
uint i;
for (i = 0; i < n; i++) {
comp_thread_ctxt_t *thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
threads[i].cancelled = TRUE;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
pthread_join(thd->id, NULL);
pthread_cond_destroy(&thd->data_cond);
pthread_mutex_destroy(&thd->data_mutex);
pthread_cond_destroy(&thd->ctrl_cond);
pthread_mutex_destroy(&thd->ctrl_mutex);
my_free(thd->to);
}
my_free(threads);
}
static
void *
compress_worker_thread_func(void *arg)
{
comp_thread_ctxt_t *thd = (comp_thread_ctxt_t *) arg;
pthread_mutex_lock(&thd->ctrl_mutex);
pthread_mutex_lock(&thd->data_mutex);
thd->started = TRUE;
pthread_cond_signal(&thd->ctrl_cond);
pthread_mutex_unlock(&thd->ctrl_mutex);
while (1) {
thd->data_avail = FALSE;
pthread_cond_signal(&thd->data_cond);
while (!thd->data_avail && !thd->cancelled) {
pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
}
if (thd->cancelled)
break;
thd->to_len = qlz_compress(thd->from, thd->to, thd->from_len,
&thd->state);
/* qpress uses 0x00010000 as the initial value, but its own
Adler-32 implementation treats the value differently:
1. higher order bits are the sum of all bytes in the sequence
2. lower order bits are the sum of resulting values at every
step.
So it's the other way around as compared to zlib's adler32().
That's why 0x00000001 is being passed here to be compatible
with qpress implementation. */
thd->adler = adler32(0x00000001, (uchar *) thd->to,
thd->to_len);
}
pthread_mutex_unlock(&thd->data_mutex);
return NULL;
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Compression interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_COMPRESS_H
#define DS_COMPRESS_H
#include "datasink.h"
extern datasink_t datasink_compress;
#endif
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Encryption datasink implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include "common.h"
#include "datasink.h"
#if GCC_VERSION >= 4002
/* Workaround to avoid "gcry_ac_* is deprecated" warnings in gcrypt.h */
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <gcrypt.h>
#if GCC_VERSION >= 4002
# pragma GCC diagnostic warning "-Wdeprecated-declarations"
#endif
#include "xbcrypt.h"
#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
GCRY_THREAD_OPTION_PTHREAD_IMPL;
#endif
#define XB_CRYPT_CHUNK_SIZE ((size_t) (xtrabackup_encrypt_chunk_size))
typedef struct {
pthread_t id;
uint num;
pthread_mutex_t ctrl_mutex;
pthread_cond_t ctrl_cond;
pthread_mutex_t data_mutex;
pthread_cond_t data_cond;
my_bool started;
my_bool data_avail;
my_bool cancelled;
const char *from;
size_t from_len;
char *to;
char *iv;
size_t to_len;
gcry_cipher_hd_t cipher_handle;
} crypt_thread_ctxt_t;
typedef struct {
crypt_thread_ctxt_t *threads;
uint nthreads;
} ds_encrypt_ctxt_t;
typedef struct {
xb_wcrypt_t *xbcrypt_file;
ds_encrypt_ctxt_t *crypt_ctxt;
size_t bytes_processed;
ds_file_t *dest_file;
} ds_encrypt_file_t;
/* Encryption options */
extern ulong xtrabackup_encrypt_algo;
extern char *xtrabackup_encrypt_key;
extern char *xtrabackup_encrypt_key_file;
extern uint xtrabackup_encrypt_threads;
extern ulonglong xtrabackup_encrypt_chunk_size;
static ds_ctxt_t *encrypt_init(const char *root);
static ds_file_t *encrypt_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int encrypt_write(ds_file_t *file, const void *buf, size_t len);
static int encrypt_close(ds_file_t *file);
static void encrypt_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_encrypt = {
&encrypt_init,
&encrypt_open,
&encrypt_write,
&encrypt_close,
&encrypt_deinit
};
static crypt_thread_ctxt_t *create_worker_threads(uint n);
static void destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n);
static void *encrypt_worker_thread_func(void *arg);
static uint encrypt_algos[] = { GCRY_CIPHER_NONE, GCRY_CIPHER_AES128,
GCRY_CIPHER_AES192, GCRY_CIPHER_AES256 };
static uint encrypt_algo;
static const uint encrypt_mode = GCRY_CIPHER_MODE_CTR;
static uint encrypt_key_len = 0;
static size_t encrypt_iv_len = 0;
static
ssize_t
my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len)
{
ds_encrypt_file_t *encrypt_file;
encrypt_file = (ds_encrypt_file_t *) userdata;
xb_ad(encrypt_file != NULL);
xb_ad(encrypt_file->dest_file != NULL);
if (!ds_write(encrypt_file->dest_file, buf, len)) {
return len;
}
return -1;
}
static
ds_ctxt_t *
encrypt_init(const char *root)
{
ds_ctxt_t *ctxt;
ds_encrypt_ctxt_t *encrypt_ctxt;
crypt_thread_ctxt_t *threads;
gcry_error_t gcry_error;
/* Acording to gcrypt docs (and my testing), setting up the threading
callbacks must be done first, so, lets give it a shot */
#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
if (gcry_error) {
msg("encrypt: unable to set libgcrypt thread cbs - "
"%s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return NULL;
}
#endif
/* Version check should be the very next call because it
makes sure that important subsystems are intialized. */
if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) {
const char *gcrypt_version;
gcrypt_version = gcry_check_version(NULL);
/* No other library has already initialized libgcrypt. */
if (!gcrypt_version) {
msg("encrypt: failed to initialize libgcrypt\n");
return NULL;
} else {
msg("encrypt: using gcrypt %s\n", gcrypt_version);
}
}
/* Disable the gcry secure memory, not dealing with this for now */
gcry_error = gcry_control(GCRYCTL_DISABLE_SECMEM, 0);
if (gcry_error) {
msg("encrypt: unable to disable libgcrypt secmem - "
"%s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return NULL;
}
/* Finalize gcry initialization. */
gcry_error = gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
if (gcry_error) {
msg("encrypt: unable to finish libgcrypt initialization - "
"%s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return NULL;
}
/* Determine the algorithm */
encrypt_algo = encrypt_algos[xtrabackup_encrypt_algo];
/* Set up the iv length */
encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo);
xb_a(encrypt_iv_len > 0);
/* Now set up the key */
if (xtrabackup_encrypt_key == NULL &&
xtrabackup_encrypt_key_file == NULL) {
msg("encrypt: no encryption key or key file specified.\n");
return NULL;
} else if (xtrabackup_encrypt_key && xtrabackup_encrypt_key_file) {
msg("encrypt: both encryption key and key file specified.\n");
return NULL;
} else if (xtrabackup_encrypt_key_file) {
if (!xb_crypt_read_key_file(xtrabackup_encrypt_key_file,
(void**)&xtrabackup_encrypt_key,
&encrypt_key_len)) {
msg("encrypt: unable to read encryption key file"
" \"%s\".\n", xtrabackup_encrypt_key_file);
return NULL;
}
} else if (xtrabackup_encrypt_key) {
encrypt_key_len = strlen(xtrabackup_encrypt_key);
} else {
msg("encrypt: no encryption key or key file specified.\n");
return NULL;
}
/* Create and initialize the worker threads */
threads = create_worker_threads(xtrabackup_encrypt_threads);
if (threads == NULL) {
msg("encrypt: failed to create worker threads.\n");
return NULL;
}
ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) +
sizeof(ds_encrypt_ctxt_t),
MYF(MY_FAE));
encrypt_ctxt = (ds_encrypt_ctxt_t *) (ctxt + 1);
encrypt_ctxt->threads = threads;
encrypt_ctxt->nthreads = xtrabackup_encrypt_threads;
ctxt->ptr = encrypt_ctxt;
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static
ds_file_t *
encrypt_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
{
ds_ctxt_t *dest_ctxt;
ds_encrypt_ctxt_t *crypt_ctxt;
ds_encrypt_file_t *crypt_file;
char new_name[FN_REFLEN];
ds_file_t *file;
xb_ad(ctxt->pipe_ctxt != NULL);
dest_ctxt = ctxt->pipe_ctxt;
crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr;
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_encrypt_file_t),
MYF(MY_FAE|MY_ZEROFILL));
crypt_file = (ds_encrypt_file_t *) (file + 1);
/* Append the .xbcrypt extension to the filename */
fn_format(new_name, path, "", ".xbcrypt", MYF(MY_APPEND_EXT));
crypt_file->dest_file = ds_open(dest_ctxt, new_name, mystat);
if (crypt_file->dest_file == NULL) {
msg("encrypt: ds_open(\"%s\") failed.\n", new_name);
goto err;
}
crypt_file->crypt_ctxt = crypt_ctxt;
crypt_file->xbcrypt_file = xb_crypt_write_open(crypt_file,
my_xb_crypt_write_callback);
if (crypt_file->xbcrypt_file == NULL) {
msg("encrypt: xb_crypt_write_open() failed.\n");
goto err;
}
file->ptr = crypt_file;
file->path = crypt_file->dest_file->path;
return file;
err:
if (crypt_file->dest_file) {
ds_close(crypt_file->dest_file);
}
my_free(file);
return NULL;
}
static
int
encrypt_write(ds_file_t *file, const void *buf, size_t len)
{
ds_encrypt_file_t *crypt_file;
ds_encrypt_ctxt_t *crypt_ctxt;
crypt_thread_ctxt_t *threads;
crypt_thread_ctxt_t *thd;
uint nthreads;
uint i;
const char *ptr;
crypt_file = (ds_encrypt_file_t *) file->ptr;
crypt_ctxt = crypt_file->crypt_ctxt;
threads = crypt_ctxt->threads;
nthreads = crypt_ctxt->nthreads;
ptr = (const char *) buf;
while (len > 0) {
uint max_thread;
/* Send data to worker threads for encryption */
for (i = 0; i < nthreads; i++) {
size_t chunk_len;
thd = threads + i;
pthread_mutex_lock(&thd->ctrl_mutex);
chunk_len = (len > XB_CRYPT_CHUNK_SIZE) ?
XB_CRYPT_CHUNK_SIZE : len;
thd->from = ptr;
thd->from_len = chunk_len;
pthread_mutex_lock(&thd->data_mutex);
thd->data_avail = TRUE;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
len -= chunk_len;
if (len == 0) {
break;
}
ptr += chunk_len;
}
max_thread = (i < nthreads) ? i : nthreads - 1;
/* Reap and stream the encrypted data */
for (i = 0; i <= max_thread; i++) {
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
while (thd->data_avail == TRUE) {
pthread_cond_wait(&thd->data_cond,
&thd->data_mutex);
}
xb_a(threads[i].to_len > 0);
if (xb_crypt_write_chunk(crypt_file->xbcrypt_file,
threads[i].to,
threads[i].from_len +
XB_CRYPT_HASH_LEN,
threads[i].to_len,
threads[i].iv,
encrypt_iv_len)) {
msg("encrypt: write to the destination file "
"failed.\n");
return 1;
}
crypt_file->bytes_processed += threads[i].from_len;
pthread_mutex_unlock(&threads[i].data_mutex);
pthread_mutex_unlock(&threads[i].ctrl_mutex);
}
}
return 0;
}
static
int
encrypt_close(ds_file_t *file)
{
ds_encrypt_file_t *crypt_file;
ds_file_t *dest_file;
int rc = 0;
crypt_file = (ds_encrypt_file_t *) file->ptr;
dest_file = crypt_file->dest_file;
rc = xb_crypt_write_close(crypt_file->xbcrypt_file);
if (ds_close(dest_file)) {
rc = 1;
}
my_free(file);
return rc;
}
static
void
encrypt_deinit(ds_ctxt_t *ctxt)
{
ds_encrypt_ctxt_t *crypt_ctxt;
xb_ad(ctxt->pipe_ctxt != NULL);
crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr;
destroy_worker_threads(crypt_ctxt->threads, crypt_ctxt->nthreads);
my_free(ctxt->root);
my_free(ctxt);
if (xtrabackup_encrypt_key)
my_free(xtrabackup_encrypt_key);
if (xtrabackup_encrypt_key_file)
my_free(xtrabackup_encrypt_key_file);
}
static
crypt_thread_ctxt_t *
create_worker_threads(uint n)
{
crypt_thread_ctxt_t *threads;
uint i;
threads = (crypt_thread_ctxt_t *)
my_malloc(sizeof(crypt_thread_ctxt_t) * n, MYF(MY_FAE));
for (i = 0; i < n; i++) {
crypt_thread_ctxt_t *thd = threads + i;
thd->num = i + 1;
thd->started = FALSE;
thd->cancelled = FALSE;
thd->data_avail = FALSE;
thd->to = (char *) my_malloc(XB_CRYPT_CHUNK_SIZE +
XB_CRYPT_HASH_LEN, MYF(MY_FAE));
thd->iv = (char *) my_malloc(encrypt_iv_len,
MYF(MY_FAE));
/* Initialize the control mutex and condition var */
if (pthread_mutex_init(&thd->ctrl_mutex, NULL) ||
pthread_cond_init(&thd->ctrl_cond, NULL)) {
goto err;
}
/* Initialize and data mutex and condition var */
if (pthread_mutex_init(&thd->data_mutex, NULL) ||
pthread_cond_init(&thd->data_cond, NULL)) {
goto err;
}
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error_t gcry_error;
gcry_error = gcry_cipher_open(&thd->cipher_handle,
encrypt_algo,
encrypt_mode, 0);
if (gcry_error) {
msg("encrypt: unable to open libgcrypt"
" cipher - %s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
gcry_cipher_close(thd->cipher_handle);
goto err;
}
gcry_error = gcry_cipher_setkey(thd->cipher_handle,
xtrabackup_encrypt_key,
encrypt_key_len);
if (gcry_error) {
msg("encrypt: unable to set libgcrypt"
" cipher key - %s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
gcry_cipher_close(thd->cipher_handle);
goto err;
}
}
pthread_mutex_lock(&thd->ctrl_mutex);
if (pthread_create(&thd->id, NULL, encrypt_worker_thread_func,
thd)) {
msg("encrypt: pthread_create() failed: "
"errno = %d\n", errno);
goto err;
}
}
/* Wait for the threads to start */
for (i = 0; i < n; i++) {
crypt_thread_ctxt_t *thd = threads + i;
while (thd->started == FALSE)
pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex);
pthread_mutex_unlock(&thd->ctrl_mutex);
}
return threads;
err:
return NULL;
}
static
void
destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n)
{
uint i;
for (i = 0; i < n; i++) {
crypt_thread_ctxt_t *thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
threads[i].cancelled = TRUE;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
pthread_join(thd->id, NULL);
pthread_cond_destroy(&thd->data_cond);
pthread_mutex_destroy(&thd->data_mutex);
pthread_cond_destroy(&thd->ctrl_cond);
pthread_mutex_destroy(&thd->ctrl_mutex);
if (encrypt_algo != GCRY_CIPHER_NONE)
gcry_cipher_close(thd->cipher_handle);
my_free(thd->to);
my_free(thd->iv);
}
my_free(threads);
}
static
void *
encrypt_worker_thread_func(void *arg)
{
crypt_thread_ctxt_t *thd = (crypt_thread_ctxt_t *) arg;
pthread_mutex_lock(&thd->ctrl_mutex);
pthread_mutex_lock(&thd->data_mutex);
thd->started = TRUE;
pthread_cond_signal(&thd->ctrl_cond);
pthread_mutex_unlock(&thd->ctrl_mutex);
while (1) {
thd->data_avail = FALSE;
pthread_cond_signal(&thd->data_cond);
while (!thd->data_avail && !thd->cancelled) {
pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
}
if (thd->cancelled)
break;
/* ensure that XB_CRYPT_HASH_LEN is the correct length
of XB_CRYPT_HASH hashing algorithm output */
assert(gcry_md_get_algo_dlen(XB_CRYPT_HASH) ==
XB_CRYPT_HASH_LEN);
memcpy(thd->to, thd->from, thd->from_len);
gcry_md_hash_buffer(XB_CRYPT_HASH, thd->to + thd->from_len,
thd->from, thd->from_len);
thd->to_len = thd->from_len;
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error_t gcry_error;
gcry_error = gcry_cipher_reset(thd->cipher_handle);
if (gcry_error) {
msg("encrypt: unable to reset cipher - "
"%s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
thd->to_len = 0;
continue;
}
xb_crypt_create_iv(thd->iv, encrypt_iv_len);
gcry_error = gcry_cipher_setctr(thd->cipher_handle,
thd->iv,
encrypt_iv_len);
if (gcry_error) {
msg("encrypt: unable to set cipher ctr - "
"%s : %s\n",
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
thd->to_len = 0;
continue;
}
gcry_error = gcry_cipher_encrypt(thd->cipher_handle,
thd->to,
thd->to_len +
XB_CRYPT_HASH_LEN,
thd->to,
thd->from_len +
XB_CRYPT_HASH_LEN);
if (gcry_error) {
msg("encrypt: unable to encrypt buffer - "
"%s : %s\n", gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
thd->to_len = 0;
}
} else {
memcpy(thd->to, thd->from,
thd->from_len + XB_CRYPT_HASH_LEN);
}
thd->to_len += XB_CRYPT_HASH_LEN;
}
pthread_mutex_unlock(&thd->data_mutex);
return NULL;
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Encryption interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_ENCRYPT_H
#define DS_ENCRYPT_H
#include "datasink.h"
extern datasink_t datasink_encrypt;
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Local datasink implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <mysys_err.h>
#include "common.h"
#include "datasink.h"
typedef struct {
File fd;
} ds_local_file_t;
static ds_ctxt_t *local_init(const char *root);
static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int local_write(ds_file_t *file, const void *buf, size_t len);
static int local_close(ds_file_t *file);
static void local_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_local = {
&local_init,
&local_open,
&local_write,
&local_close,
&local_deinit
};
static
ds_ctxt_t *
local_init(const char *root)
{
ds_ctxt_t *ctxt;
if (my_mkdir(root, 0777, MYF(0)) < 0
&& my_errno != EEXIST && my_errno != EISDIR)
{
char errbuf[MYSYS_STRERROR_SIZE];
my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
root, my_errno, my_strerror(errbuf, sizeof(errbuf),
my_errno));
return NULL;
}
ctxt = my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE));
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static
ds_file_t *
local_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat __attribute__((unused)))
{
char fullpath[FN_REFLEN];
char dirpath[FN_REFLEN];
size_t dirpath_len;
size_t path_len;
ds_local_file_t *local_file;
ds_file_t *file;
File fd;
fn_format(fullpath, path, ctxt->root, "", MYF(MY_RELATIVE_PATH));
/* Create the directory if needed */
dirname_part(dirpath, fullpath, &dirpath_len);
if (my_mkdir(dirpath, 0777, MYF(0)) < 0 && my_errno != EEXIST) {
char errbuf[MYSYS_STRERROR_SIZE];
my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
dirpath, my_errno, my_strerror(errbuf, sizeof(errbuf),
my_errno));
return NULL;
}
fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
MYF(MY_WME));
if (fd < 0) {
return NULL;
}
path_len = strlen(fullpath) + 1; /* terminating '\0' */
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_local_file_t) +
path_len,
MYF(MY_FAE));
local_file = (ds_local_file_t *) (file + 1);
local_file->fd = fd;
file->path = (char *) local_file + sizeof(ds_local_file_t);
memcpy(file->path, fullpath, path_len);
file->ptr = local_file;
return file;
}
static
int
local_write(ds_file_t *file, const void *buf, size_t len)
{
File fd = ((ds_local_file_t *) file->ptr)->fd;
if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
return 0;
}
return 1;
}
static
int
local_close(ds_file_t *file)
{
File fd = ((ds_local_file_t *) file->ptr)->fd;
my_free(file);
my_sync(fd, MYF(MY_WME));
return my_close(fd, MYF(MY_WME));
}
static
void
local_deinit(ds_ctxt_t *ctxt)
{
my_free(ctxt->root);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Local datasink interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_LOCAL_H
#define DS_LOCAL_H
#include "datasink.h"
extern datasink_t datasink_local;
#endif
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Local datasink implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include <mysys_err.h>
#include "common.h"
#include "datasink.h"
typedef struct {
File fd;
} ds_stdout_file_t;
static ds_ctxt_t *stdout_init(const char *root);
static ds_file_t *stdout_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int stdout_write(ds_file_t *file, const void *buf, size_t len);
static int stdout_close(ds_file_t *file);
static void stdout_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_stdout = {
&stdout_init,
&stdout_open,
&stdout_write,
&stdout_close,
&stdout_deinit
};
static
ds_ctxt_t *
stdout_init(const char *root)
{
ds_ctxt_t *ctxt;
ctxt = my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE));
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static
ds_file_t *
stdout_open(ds_ctxt_t *ctxt __attribute__((unused)),
const char *path __attribute__((unused)),
MY_STAT *mystat __attribute__((unused)))
{
ds_stdout_file_t *stdout_file;
ds_file_t *file;
size_t pathlen;
const char *fullpath = "<STDOUT>";
pathlen = strlen(fullpath) + 1;
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_stdout_file_t) +
pathlen,
MYF(MY_FAE));
stdout_file = (ds_stdout_file_t *) (file + 1);
#ifdef __WIN__
setmode(fileno(stdout), _O_BINARY);
#endif
stdout_file->fd = fileno(stdout);
file->path = (char *) stdout_file + sizeof(ds_stdout_file_t);
memcpy(file->path, fullpath, pathlen);
file->ptr = stdout_file;
return file;
}
static
int
stdout_write(ds_file_t *file, const void *buf, size_t len)
{
File fd = ((ds_stdout_file_t *) file->ptr)->fd;
if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
return 0;
}
return 1;
}
static
int
stdout_close(ds_file_t *file)
{
my_free(file);
return 1;
}
static
void
stdout_deinit(ds_ctxt_t *ctxt)
{
my_free(ctxt->root);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Local datasink interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_STDOUT_H
#define DS_STDOUT_H
#include "datasink.h"
extern datasink_t datasink_stdout;
#endif
/******************************************************
Copyright (c) 2012 Percona LLC and/or its affiliates.
tmpfile datasink for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Do all writes to temporary files first, then pipe them to the specified
datasink in a serialized way in deinit(). */
#include <my_base.h>
#include "common.h"
#include "datasink.h"
typedef struct {
pthread_mutex_t mutex;
LIST *file_list;
} ds_tmpfile_ctxt_t;
typedef struct {
LIST list;
File fd;
char *orig_path;
MY_STAT mystat;
ds_file_t *file;
} ds_tmp_file_t;
static ds_ctxt_t *tmpfile_init(const char *root);
static ds_file_t *tmpfile_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int tmpfile_write(ds_file_t *file, const void *buf, size_t len);
static int tmpfile_close(ds_file_t *file);
static void tmpfile_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_tmpfile = {
&tmpfile_init,
&tmpfile_open,
&tmpfile_write,
&tmpfile_close,
&tmpfile_deinit
};
MY_TMPDIR mysql_tmpdir_list;
static ds_ctxt_t *
tmpfile_init(const char *root)
{
ds_ctxt_t *ctxt;
ds_tmpfile_ctxt_t *tmpfile_ctxt;
ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_tmpfile_ctxt_t),
MYF(MY_FAE));
tmpfile_ctxt = (ds_tmpfile_ctxt_t *) (ctxt + 1);
tmpfile_ctxt->file_list = NULL;
if (pthread_mutex_init(&tmpfile_ctxt->mutex, NULL)) {
my_free(ctxt);
return NULL;
}
ctxt->ptr = tmpfile_ctxt;
ctxt->root = my_strdup(root, MYF(MY_FAE));
return ctxt;
}
static ds_file_t *
tmpfile_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat)
{
ds_tmpfile_ctxt_t *tmpfile_ctxt;
char tmp_path[FN_REFLEN];
ds_tmp_file_t *tmp_file;
ds_file_t *file;
size_t path_len;
File fd;
/* Create a temporary file in tmpdir. The file will be automatically
removed on close. Code copied from mysql_tmpfile(). */
fd = create_temp_file(tmp_path, my_tmpdir(&mysql_tmpdir_list),
"xbtemp",
#ifdef __WIN__
O_BINARY | O_TRUNC | O_SEQUENTIAL |
O_TEMPORARY | O_SHORT_LIVED |
#endif /* __WIN__ */
O_CREAT | O_EXCL | O_RDWR,
MYF(MY_WME));
#ifndef __WIN__
if (fd >= 0) {
/* On Windows, open files cannot be removed, but files can be
created with the O_TEMPORARY flag to the same effect
("delete on close"). */
unlink(tmp_path);
}
#endif /* !__WIN__ */
if (fd < 0) {
return NULL;
}
path_len = strlen(path) + 1; /* terminating '\0' */
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_tmp_file_t) + path_len,
MYF(MY_FAE));
tmp_file = (ds_tmp_file_t *) (file + 1);
tmp_file->file = file;
memcpy(&tmp_file->mystat, mystat, sizeof(MY_STAT));
/* Save a copy of 'path', since it may not be accessible later */
tmp_file->orig_path = (char *) tmp_file + sizeof(ds_tmp_file_t);
tmp_file->fd = fd;
memcpy(tmp_file->orig_path, path, path_len);
/* Store the real temporary file name in file->path */
file->path = my_strdup(tmp_path, MYF(MY_FAE));
file->ptr = tmp_file;
/* Store the file object in the list to be piped later */
tmpfile_ctxt = (ds_tmpfile_ctxt_t *) ctxt->ptr;
tmp_file->list.data = tmp_file;
pthread_mutex_lock(&tmpfile_ctxt->mutex);
tmpfile_ctxt->file_list = list_add(tmpfile_ctxt->file_list,
&tmp_file->list);
pthread_mutex_unlock(&tmpfile_ctxt->mutex);
return file;
}
static int
tmpfile_write(ds_file_t *file, const void *buf, size_t len)
{
File fd = ((ds_tmp_file_t *) file->ptr)->fd;
if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
return 0;
}
return 1;
}
static int
tmpfile_close(ds_file_t *file)
{
/* Do nothing -- we will close (and thus remove) the file after piping
it to the destination datasink in tmpfile_deinit(). */
my_free(file->path);
return 0;
}
static void
tmpfile_deinit(ds_ctxt_t *ctxt)
{
LIST *list;
ds_tmpfile_ctxt_t *tmpfile_ctxt;
MY_STAT mystat;
ds_tmp_file_t *tmp_file;
ds_file_t *dst_file;
ds_ctxt_t *pipe_ctxt;
void *buf = NULL;
const size_t buf_size = 10 * 1024 * 1024;
size_t bytes;
size_t offset;
pipe_ctxt = ctxt->pipe_ctxt;
xb_a(pipe_ctxt != NULL);
buf = my_malloc(buf_size, MYF(MY_FAE));
tmpfile_ctxt = (ds_tmpfile_ctxt_t *) ctxt->ptr;
list = tmpfile_ctxt->file_list;
/* Walk the files in the order they have been added */
list = list_reverse(list);
while (list != NULL) {
tmp_file = list->data;
/* Stat the file to replace size and mtime on the original
* mystat struct */
if (my_fstat(tmp_file->fd, &mystat, MYF(0))) {
msg("error: my_fstat() failed.\n");
exit(EXIT_FAILURE);
}
tmp_file->mystat.st_size = mystat.st_size;
tmp_file->mystat.st_mtime = mystat.st_mtime;
dst_file = ds_open(pipe_ctxt, tmp_file->orig_path,
&tmp_file->mystat);
if (dst_file == NULL) {
msg("error: could not stream a temporary file to "
"'%s'\n", tmp_file->orig_path);
exit(EXIT_FAILURE);
}
/* copy to the destination datasink */
posix_fadvise(tmp_file->fd, 0, 0, POSIX_FADV_SEQUENTIAL);
if (my_seek(tmp_file->fd, 0, SEEK_SET, MYF(0)) ==
MY_FILEPOS_ERROR) {
msg("error: my_seek() failed for '%s', errno = %d.\n",
tmp_file->file->path, my_errno);
exit(EXIT_FAILURE);
}
offset = 0;
while ((bytes = my_read(tmp_file->fd, buf, buf_size,
MYF(MY_WME))) > 0) {
posix_fadvise(tmp_file->fd, offset, buf_size, POSIX_FADV_DONTNEED);
offset += buf_size;
if (ds_write(dst_file, buf, bytes)) {
msg("error: cannot write to stream for '%s'.\n",
tmp_file->orig_path);
exit(EXIT_FAILURE);
}
}
if (bytes == (size_t) -1) {
exit(EXIT_FAILURE);
}
my_close(tmp_file->fd, MYF(MY_WME));
ds_close(dst_file);
list = list_rest(list);
my_free(tmp_file->file);
}
pthread_mutex_destroy(&tmpfile_ctxt->mutex);
my_free(buf);
my_free(ctxt->root);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2012 Percona LLC and/or its affiliates.
tmpfile datasink for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_TMPFILE_H
#define DS_TMPFILE_H
#include "datasink.h"
extern datasink_t datasink_tmpfile;
extern MY_TMPDIR mysql_tmpdir_list;
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Streaming implementation for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include "common.h"
#include "datasink.h"
#include "xbstream.h"
typedef struct {
xb_wstream_t *xbstream;
ds_file_t *dest_file;
pthread_mutex_t mutex;
} ds_stream_ctxt_t;
typedef struct {
xb_wstream_file_t *xbstream_file;
ds_stream_ctxt_t *stream_ctxt;
} ds_stream_file_t;
/***********************************************************************
General streaming interface */
static ds_ctxt_t *xbstream_init(const char *root);
static ds_file_t *xbstream_open(ds_ctxt_t *ctxt, const char *path,
MY_STAT *mystat);
static int xbstream_write(ds_file_t *file, const void *buf, size_t len);
static int xbstream_close(ds_file_t *file);
static void xbstream_deinit(ds_ctxt_t *ctxt);
datasink_t datasink_xbstream = {
&xbstream_init,
&xbstream_open,
&xbstream_write,
&xbstream_close,
&xbstream_deinit
};
static
ssize_t
my_xbstream_write_callback(xb_wstream_file_t *f __attribute__((unused)),
void *userdata, const void *buf, size_t len)
{
ds_stream_ctxt_t *stream_ctxt;
stream_ctxt = (ds_stream_ctxt_t *) userdata;
xb_ad(stream_ctxt != NULL);
xb_ad(stream_ctxt->dest_file != NULL);
if (!ds_write(stream_ctxt->dest_file, buf, len)) {
return len;
}
return -1;
}
static
ds_ctxt_t *
xbstream_init(const char *root __attribute__((unused)))
{
ds_ctxt_t *ctxt;
ds_stream_ctxt_t *stream_ctxt;
xb_wstream_t *xbstream;
ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_stream_ctxt_t),
MYF(MY_FAE));
stream_ctxt = (ds_stream_ctxt_t *)(ctxt + 1);
if (pthread_mutex_init(&stream_ctxt->mutex, NULL)) {
msg("xbstream_init: pthread_mutex_init() failed.\n");
goto err;
}
xbstream = xb_stream_write_new();
if (xbstream == NULL) {
msg("xb_stream_write_new() failed.\n");
goto err;
}
stream_ctxt->xbstream = xbstream;
stream_ctxt->dest_file = NULL;
ctxt->ptr = stream_ctxt;
return ctxt;
err:
my_free(ctxt);
return NULL;
}
static
ds_file_t *
xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
{
ds_file_t *file;
ds_stream_file_t *stream_file;
ds_stream_ctxt_t *stream_ctxt;
ds_ctxt_t *dest_ctxt;
xb_wstream_t *xbstream;
xb_wstream_file_t *xbstream_file;
xb_ad(ctxt->pipe_ctxt != NULL);
dest_ctxt = ctxt->pipe_ctxt;
stream_ctxt = (ds_stream_ctxt_t *) ctxt->ptr;
pthread_mutex_lock(&stream_ctxt->mutex);
if (stream_ctxt->dest_file == NULL) {
stream_ctxt->dest_file = ds_open(dest_ctxt, path, mystat);
if (stream_ctxt->dest_file == NULL) {
return NULL;
}
}
pthread_mutex_unlock(&stream_ctxt->mutex);
file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
sizeof(ds_stream_file_t),
MYF(MY_FAE));
stream_file = (ds_stream_file_t *) (file + 1);
xbstream = stream_ctxt->xbstream;
xbstream_file = xb_stream_write_open(xbstream, path, mystat,
stream_ctxt,
my_xbstream_write_callback);
if (xbstream_file == NULL) {
msg("xb_stream_write_open() failed.\n");
goto err;
}
stream_file->xbstream_file = xbstream_file;
stream_file->stream_ctxt = stream_ctxt;
file->ptr = stream_file;
file->path = stream_ctxt->dest_file->path;
return file;
err:
if (stream_ctxt->dest_file) {
ds_close(stream_ctxt->dest_file);
stream_ctxt->dest_file = NULL;
}
my_free(file);
return NULL;
}
static
int
xbstream_write(ds_file_t *file, const void *buf, size_t len)
{
ds_stream_file_t *stream_file;
xb_wstream_file_t *xbstream_file;
stream_file = (ds_stream_file_t *) file->ptr;
xbstream_file = stream_file->xbstream_file;
if (xb_stream_write_data(xbstream_file, buf, len)) {
msg("xb_stream_write_data() failed.\n");
return 1;
}
return 0;
}
static
int
xbstream_close(ds_file_t *file)
{
ds_stream_file_t *stream_file;
int rc = 0;
stream_file = (ds_stream_file_t *)file->ptr;
rc = xb_stream_write_close(stream_file->xbstream_file);
my_free(file);
return rc;
}
static
void
xbstream_deinit(ds_ctxt_t *ctxt)
{
ds_stream_ctxt_t *stream_ctxt;
stream_ctxt = (ds_stream_ctxt_t *) ctxt->ptr;
if (xb_stream_write_done(stream_ctxt->xbstream)) {
msg("xb_stream_done() failed.\n");
}
if (stream_ctxt->dest_file) {
ds_close(stream_ctxt->dest_file);
stream_ctxt->dest_file = NULL;
}
pthread_mutex_destroy(&stream_ctxt->mutex);
my_free(ctxt);
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
Streaming interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef DS_XBSTREAM_H
#define DS_XBSTREAM_H
#include "datasink.h"
extern datasink_t datasink_xbstream;
#endif
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2013 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Source file cursor implementation */
#include <my_base.h>
#include <univ.i>
#include <fil0fil.h>
#include <srv0start.h>
#include <trx0sys.h>
#include "fil_cur.h"
#include "common.h"
#include "read_filt.h"
#include "xtrabackup.h"
/* Size of read buffer in pages (640 pages = 10M for 16K sized pages) */
#define XB_FIL_CUR_PAGES 640
/***********************************************************************
Extracts the relative path ("database/table.ibd") of a tablespace from a
specified possibly absolute path.
For user tablespaces both "./database/table.ibd" and
"/remote/dir/database/table.ibd" result in "database/table.ibd".
For system tablepsaces (i.e. When is_system is TRUE) both "/remote/dir/ibdata1"
and "./ibdata1" yield "ibdata1" in the output. */
const char *
xb_get_relative_path(
/*=================*/
const char* path, /*!< in: tablespace path (either
relative or absolute) */
ibool is_system) /*!< in: TRUE for system tablespaces,
i.e. when only the filename must be
returned. */
{
const char *next;
const char *cur;
const char *prev;
prev = NULL;
cur = path;
while ((next = strchr(cur, SRV_PATH_SEPARATOR)) != NULL) {
prev = cur;
cur = next + 1;
}
if (is_system) {
return(cur);
} else {
return((prev == NULL) ? cur : prev);
}
}
/**********************************************************************//**
Closes a file. */
static
void
xb_fil_node_close_file(
/*===================*/
fil_node_t* node) /*!< in: file node */
{
ibool ret;
mutex_enter(&fil_system->mutex);
ut_ad(node);
ut_a(node->n_pending == 0);
ut_a(node->n_pending_flushes == 0);
ut_a(!node->being_extended);
if (!node->open) {
mutex_exit(&fil_system->mutex);
return;
}
ret = os_file_close(node->handle);
ut_a(ret);
node->open = FALSE;
ut_a(fil_system->n_open > 0);
fil_system->n_open--;
fil_n_file_opened--;
if (node->space->purpose == FIL_TABLESPACE &&
fil_is_user_tablespace_id(node->space->id)) {
ut_a(UT_LIST_GET_LEN(fil_system->LRU) > 0);
/* The node is in the LRU list, remove it */
UT_LIST_REMOVE(LRU, fil_system->LRU, node);
}
mutex_exit(&fil_system->mutex);
}
/************************************************************************
Open a source file cursor and initialize the associated read filter.
@return XB_FIL_CUR_SUCCESS on success, XB_FIL_CUR_SKIP if the source file must
be skipped and XB_FIL_CUR_ERROR on error. */
xb_fil_cur_result_t
xb_fil_cur_open(
/*============*/
xb_fil_cur_t* cursor, /*!< out: source file cursor */
xb_read_filt_t* read_filter, /*!< in/out: the read filter */
fil_node_t* node, /*!< in: source tablespace node */
uint thread_n) /*!< thread number for diagnostics */
{
ulint page_size;
ulint page_size_shift;
ulint zip_size;
ibool success;
/* Initialize these first so xb_fil_cur_close() handles them correctly
in case of error */
cursor->orig_buf = NULL;
cursor->node = NULL;
cursor->space_id = node->space->id;
cursor->is_system = !fil_is_user_tablespace_id(node->space->id);
strncpy(cursor->abs_path, node->name, sizeof(cursor->abs_path));
/* Get the relative path for the destination tablespace name, i.e. the
one that can be appended to the backup root directory. Non-system
tablespaces may have absolute paths for remote tablespaces in MySQL
5.6+. We want to make "local" copies for the backup. */
strncpy(cursor->rel_path,
xb_get_relative_path(cursor->abs_path, cursor->is_system),
sizeof(cursor->rel_path));
/* In the backup mode we should already have a tablespace handle created
by fil_load_single_table_tablespace() unless it is a system
tablespace. Otherwise we open the file here. */
if (cursor->is_system || !srv_backup_mode || srv_close_files) {
node->handle =
os_file_create_simple_no_error_handling(0, node->name,
OS_FILE_OPEN,
OS_FILE_READ_ONLY,
&success);
if (!success) {
/* The following call prints an error message */
os_file_get_last_error(TRUE);
msg("[%02u] xtrabackup: error: cannot open "
"tablespace %s\n",
thread_n, cursor->abs_path);
return(XB_FIL_CUR_ERROR);
}
mutex_enter(&fil_system->mutex);
node->open = TRUE;
fil_system->n_open++;
fil_n_file_opened++;
if (node->space->purpose == FIL_TABLESPACE &&
fil_is_user_tablespace_id(node->space->id)) {
/* Put the node to the LRU list */
UT_LIST_ADD_FIRST(LRU, fil_system->LRU, node);
}
mutex_exit(&fil_system->mutex);
}
ut_ad(node->open);
cursor->node = node;
cursor->file = node->handle;
if (my_fstat(cursor->file, &cursor->statinfo, MYF(MY_WME))) {
msg("[%02u] xtrabackup: error: cannot stat %s\n",
thread_n, cursor->abs_path);
xb_fil_cur_close(cursor);
return(XB_FIL_CUR_ERROR);
}
if (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
|| srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC) {
os_file_set_nocache(cursor->file, node->name, "OPEN");
}
posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
/* Determine the page size */
zip_size = xb_get_zip_size(cursor->file);
if (zip_size == ULINT_UNDEFINED) {
xb_fil_cur_close(cursor);
return(XB_FIL_CUR_SKIP);
} else if (zip_size) {
page_size = zip_size;
page_size_shift = get_bit_shift(page_size);
msg("[%02u] %s is compressed with page size = "
"%lu bytes\n", thread_n, node->name, page_size);
if (page_size_shift < 10 || page_size_shift > 14) {
msg("[%02u] xtrabackup: Error: Invalid "
"page size: %lu.\n", thread_n, page_size);
ut_error;
}
} else {
page_size = UNIV_PAGE_SIZE;
page_size_shift = UNIV_PAGE_SIZE_SHIFT;
}
cursor->page_size = page_size;
cursor->page_size_shift = page_size_shift;
cursor->zip_size = zip_size;
/* Allocate read buffer */
cursor->buf_size = XB_FIL_CUR_PAGES * page_size;
cursor->orig_buf = static_cast<byte *>
(ut_malloc(cursor->buf_size + UNIV_PAGE_SIZE));
cursor->buf = static_cast<byte *>
(ut_align(cursor->orig_buf, UNIV_PAGE_SIZE));
cursor->buf_read = 0;
cursor->buf_npages = 0;
cursor->buf_offset = 0;
cursor->buf_page_no = 0;
cursor->thread_n = thread_n;
cursor->space_size = cursor->statinfo.st_size / page_size;
cursor->read_filter = read_filter;
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
node->space->id);
return(XB_FIL_CUR_SUCCESS);
}
/************************************************************************
Reads and verifies the next block of pages from the source
file. Positions the cursor after the last read non-corrupted page.
@return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF
if there are no more pages to read and XB_FIL_CUR_ERROR on error. */
xb_fil_cur_result_t
xb_fil_cur_read(
/*============*/
xb_fil_cur_t* cursor) /*!< in/out: source file cursor */
{
ibool success;
byte* page;
ulint i;
ulint npages;
ulint retry_count;
xb_fil_cur_result_t ret;
ib_int64_t offset;
ib_int64_t to_read;
cursor->read_filter->get_next_batch(&cursor->read_filter_ctxt,
&offset, &to_read);
if (to_read == 0LL) {
return(XB_FIL_CUR_EOF);
}
if (to_read > (ib_int64_t) cursor->buf_size) {
to_read = (ib_int64_t) cursor->buf_size;
}
xb_a(to_read > 0 && to_read <= 0xFFFFFFFFLL);
if (to_read % cursor->page_size != 0 &&
offset + to_read == cursor->statinfo.st_size) {
if (to_read < (ib_int64_t) cursor->page_size) {
msg("[%02u] xtrabackup: Warning: junk at the end of "
"%s:\n", cursor->thread_n, cursor->abs_path);
msg("[%02u] xtrabackup: Warning: offset = %llu, "
"to_read = %llu\n",
cursor->thread_n,
(unsigned long long) offset,
(unsigned long long) to_read);
return(XB_FIL_CUR_EOF);
}
to_read = (ib_int64_t) (((ulint) to_read) &
~(cursor->page_size - 1));
}
xb_a(to_read % cursor->page_size == 0);
npages = (ulint) (to_read >> cursor->page_size_shift);
retry_count = 10;
ret = XB_FIL_CUR_SUCCESS;
read_retry:
xtrabackup_io_throttling();
cursor->buf_read = 0;
cursor->buf_npages = 0;
cursor->buf_offset = offset;
cursor->buf_page_no = (ulint) (offset >> cursor->page_size_shift);
success = os_file_read(cursor->file, cursor->buf, offset,
to_read);
if (!success) {
return(XB_FIL_CUR_ERROR);
}
/* check pages for corruption and re-read if necessary. i.e. in case of
partially written pages */
for (page = cursor->buf, i = 0; i < npages;
page += cursor->page_size, i++) {
if (buf_page_is_corrupted(TRUE, page, cursor->zip_size)) {
ulint page_no = cursor->buf_page_no + i;
if (cursor->is_system &&
page_no >= FSP_EXTENT_SIZE &&
page_no < FSP_EXTENT_SIZE * 3) {
/* skip doublewrite buffer pages */
xb_a(cursor->page_size == UNIV_PAGE_SIZE);
msg("[%02u] xtrabackup: "
"Page %lu is a doublewrite buffer page, "
"skipping.\n", cursor->thread_n, page_no);
} else {
retry_count--;
if (retry_count == 0) {
msg("[%02u] xtrabackup: "
"Error: failed to read page after "
"10 retries. File %s seems to be "
"corrupted.\n", cursor->thread_n,
cursor->abs_path);
ret = XB_FIL_CUR_ERROR;
break;
}
msg("[%02u] xtrabackup: "
"Database page corruption detected at page "
"%lu, retrying...\n", cursor->thread_n,
page_no);
os_thread_sleep(100000);
goto read_retry;
}
}
cursor->buf_read += cursor->page_size;
cursor->buf_npages++;
}
posix_fadvise(cursor->file, offset, to_read, POSIX_FADV_DONTNEED);
return(ret);
}
/************************************************************************
Close the source file cursor opened with xb_fil_cur_open() and its
associated read filter. */
void
xb_fil_cur_close(
/*=============*/
xb_fil_cur_t *cursor) /*!< in/out: source file cursor */
{
cursor->read_filter->deinit(&cursor->read_filter_ctxt);
if (cursor->orig_buf != NULL) {
ut_free(cursor->orig_buf);
}
if (cursor->node != NULL) {
xb_fil_node_close_file(cursor->node);
cursor->file = XB_FILE_UNDEFINED;
}
}
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2013 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Source file cursor interface */
#ifndef FIL_CUR_H
#define FIL_CUR_H
#include <my_dir.h>
#include "read_filt.h"
struct xb_fil_cur_t {
os_file_t file; /*!< source file handle */
fil_node_t* node; /*!< source tablespace node */
char rel_path[FN_REFLEN];
/*!< normalized file path */
char abs_path[FN_REFLEN];
/*!< absolute file path */
MY_STAT statinfo; /*!< information about the file */
ulint zip_size; /*!< compressed page size in bytes or 0
for uncompressed pages */
ulint page_size; /*!< = zip_size for compressed pages or
UNIV_PAGE_SIZE for uncompressed ones */
ulint page_size_shift;/*!< bit shift corresponding to
page_size */
my_bool is_system; /*!< TRUE for system tablespace, FALSE
otherwise */
xb_read_filt_t* read_filter; /*!< read filter */
xb_read_filt_ctxt_t read_filter_ctxt;
/*!< read filter context */
byte* orig_buf; /*!< read buffer */
byte* buf; /*!< aligned pointer for orig_buf */
ulint buf_size; /*!< buffer size in bytes */
ulint buf_read; /*!< number of read bytes in buffer
after the last cursor read */
ulint buf_npages; /*!< number of pages in buffer after the
last cursor read */
ib_int64_t buf_offset; /*!< file offset of the first page in
buffer */
ulint buf_page_no; /*!< number of the first page in
buffer */
uint thread_n; /*!< thread number for diagnostics */
ulint space_id; /*!< ID of tablespace */
ulint space_size; /*!< space size in pages */
};
typedef enum {
XB_FIL_CUR_SUCCESS,
XB_FIL_CUR_SKIP,
XB_FIL_CUR_ERROR,
XB_FIL_CUR_EOF
} xb_fil_cur_result_t;
/************************************************************************
Open a source file cursor and initialize the associated read filter.
@return XB_FIL_CUR_SUCCESS on success, XB_FIL_CUR_SKIP if the source file must
be skipped and XB_FIL_CUR_ERROR on error. */
xb_fil_cur_result_t
xb_fil_cur_open(
/*============*/
xb_fil_cur_t* cursor, /*!< out: source file cursor */
xb_read_filt_t* read_filter, /*!< in/out: the read filter */
fil_node_t* node, /*!< in: source tablespace node */
uint thread_n); /*!< thread number for diagnostics */
/************************************************************************
Reads and verifies the next block of pages from the source
file. Positions the cursor after the last read non-corrupted page.
@return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF
if there are no more pages to read and XB_FIL_CUR_ERROR on error. */
xb_fil_cur_result_t
xb_fil_cur_read(
/*============*/
xb_fil_cur_t* cursor); /*!< in/out: source file cursor */
/************************************************************************
Close the source file cursor opened with xb_fil_cur_open() and its
associated read filter. */
void
xb_fil_cur_close(
/*=============*/
xb_fil_cur_t *cursor); /*!< in/out: source file cursor */
/***********************************************************************
Extracts the relative path ("database/table.ibd") of a tablespace from a
specified possibly absolute path.
For user tablespaces both "./database/table.ibd" and
"/remote/dir/database/table.ibd" result in "database/table.ibd".
For system tablepsaces (i.e. When is_system is TRUE) both "/remote/dir/ibdata1"
and "./ibdata1" yield "ibdata1" in the output. */
const char *
xb_get_relative_path(
/*=================*/
const char* path, /*!< in: tablespace path (either
relative or absolute) */
ibool is_system); /*!< in: TRUE for system tablespaces,
i.e. when only the filename must be
returned. */
#endif
/******************************************************
hot backup tool for InnoDB
(c) 2009-2015 Percona LLC and/or its affiliates
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************
This file incorporates work covered by the following copyright and
permission notice:
Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*******************************************************/
#include <my_global.h>
#include <stdio.h>
#include <string.h>
#include <mysql.h>
#include <my_dir.h>
#include <ut0mem.h>
#include <os0sync.h>
#include <os0file.h>
#include <srv0start.h>
#include <algorithm>
#include <mysqld.h>
#include <my_default.h>
#include <my_getopt.h>
#include <strings.h>
#include <string>
#include <sstream>
#include <set>
#include "common.h"
#include "innobackupex.h"
#include "xtrabackup.h"
#include "xtrabackup_version.h"
#include "xbstream.h"
#include "fil_cur.h"
#include "write_filt.h"
#include "backup_copy.h"
using std::min;
using std::max;
/* options */
my_bool opt_ibx_version = FALSE;
my_bool opt_ibx_help = FALSE;
my_bool opt_ibx_apply_log = FALSE;
my_bool opt_ibx_redo_only = FALSE;
my_bool opt_ibx_incremental = FALSE;
my_bool opt_ibx_notimestamp = FALSE;
my_bool opt_ibx_copy_back = FALSE;
my_bool opt_ibx_move_back = FALSE;
my_bool opt_ibx_galera_info = FALSE;
my_bool opt_ibx_slave_info = FALSE;
my_bool opt_ibx_no_lock = FALSE;
my_bool opt_ibx_safe_slave_backup = FALSE;
my_bool opt_ibx_rsync = FALSE;
my_bool opt_ibx_force_non_empty_dirs = FALSE;
my_bool opt_ibx_noversioncheck = FALSE;
my_bool opt_ibx_no_backup_locks = FALSE;
my_bool opt_ibx_decompress = FALSE;
char *opt_ibx_incremental_history_name = NULL;
char *opt_ibx_incremental_history_uuid = NULL;
char *opt_ibx_user = NULL;
char *opt_ibx_password = NULL;
char *opt_ibx_host = NULL;
char *opt_ibx_defaults_group = NULL;
char *opt_ibx_socket = NULL;
uint opt_ibx_port = 0;
char *opt_ibx_login_path = NULL;
ulong opt_ibx_lock_wait_query_type;
ulong opt_ibx_kill_long_query_type;
ulong opt_ibx_decrypt_algo = 0;
uint opt_ibx_kill_long_queries_timeout = 0;
uint opt_ibx_lock_wait_timeout = 0;
uint opt_ibx_lock_wait_threshold = 0;
uint opt_ibx_debug_sleep_before_unlock = 0;
uint opt_ibx_safe_slave_backup_timeout = 0;
const char *opt_ibx_history = NULL;
bool opt_ibx_decrypt = false;
char *opt_ibx_include = NULL;
char *opt_ibx_databases = NULL;
bool ibx_partial_backup = false;
char *ibx_position_arg = NULL;
char *ibx_backup_directory = NULL;
/* copy of proxied xtrabackup options */
my_bool ibx_xb_close_files;
my_bool ibx_xtrabackup_compact;
const char *ibx_xtrabackup_compress_alg;
uint ibx_xtrabackup_compress_threads;
ulonglong ibx_xtrabackup_compress_chunk_size;
ulong ibx_xtrabackup_encrypt_algo;
char *ibx_xtrabackup_encrypt_key;
char *ibx_xtrabackup_encrypt_key_file;
uint ibx_xtrabackup_encrypt_threads;
ulonglong ibx_xtrabackup_encrypt_chunk_size;
my_bool ibx_xtrabackup_export;
char *ibx_xtrabackup_extra_lsndir;
char *ibx_xtrabackup_incremental_basedir;
char *ibx_xtrabackup_incremental_dir;
my_bool ibx_xtrabackup_incremental_force_scan;
ulint ibx_xtrabackup_log_copy_interval;
char *ibx_xtrabackup_incremental;
int ibx_xtrabackup_parallel;
my_bool ibx_xtrabackup_rebuild_indexes;
ulint ibx_xtrabackup_rebuild_threads;
char *ibx_xtrabackup_stream_str;
char *ibx_xtrabackup_tables_file;
long ibx_xtrabackup_throttle;
char *ibx_opt_mysql_tmpdir;
longlong ibx_xtrabackup_use_memory;
static inline int ibx_msg(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
static inline int ibx_msg(const char *fmt, ...)
{
int result;
time_t t = time(NULL);
char date[100];
char *line;
va_list args;
strftime(date, sizeof(date), "%y%m%d %H:%M:%S", localtime(&t));
va_start(args, fmt);
result = vasprintf(&line, fmt, args);
va_end(args);
if (result != -1) {
result = fprintf(stderr, "%s %s: %s",
date, INNOBACKUPEX_BIN_NAME, line);
free(line);
}
return result;
}
enum innobackupex_options
{
OPT_APPLY_LOG = 256,
OPT_COPY_BACK,
OPT_MOVE_BACK,
OPT_REDO_ONLY,
OPT_GALERA_INFO,
OPT_SLAVE_INFO,
OPT_INCREMENTAL,
OPT_INCREMENTAL_HISTORY_NAME,
OPT_INCREMENTAL_HISTORY_UUID,
OPT_LOCK_WAIT_QUERY_TYPE,
OPT_KILL_LONG_QUERY_TYPE,
OPT_KILL_LONG_QUERIES_TIMEOUT,
OPT_LOCK_WAIT_TIMEOUT,
OPT_LOCK_WAIT_THRESHOLD,
OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
OPT_NO_LOCK,
OPT_SAFE_SLAVE_BACKUP,
OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
OPT_RSYNC,
OPT_HISTORY,
OPT_INCLUDE,
OPT_FORCE_NON_EMPTY_DIRS,
OPT_NO_TIMESTAMP,
OPT_NO_VERSION_CHECK,
OPT_NO_BACKUP_LOCKS,
OPT_DATABASES,
OPT_DECRYPT,
OPT_DECOMPRESS,
/* options wich are passed directly to xtrabackup */
OPT_CLOSE_FILES,
OPT_COMPACT,
OPT_COMPRESS,
OPT_COMPRESS_THREADS,
OPT_COMPRESS_CHUNK_SIZE,
OPT_ENCRYPT,
OPT_ENCRYPT_KEY,
OPT_ENCRYPT_KEY_FILE,
OPT_ENCRYPT_THREADS,
OPT_ENCRYPT_CHUNK_SIZE,
OPT_EXPORT,
OPT_EXTRA_LSNDIR,
OPT_INCREMENTAL_BASEDIR,
OPT_INCREMENTAL_DIR,
OPT_INCREMENTAL_FORCE_SCAN,
OPT_LOG_COPY_INTERVAL,
OPT_PARALLEL,
OPT_REBUILD_INDEXES,
OPT_REBUILD_THREADS,
OPT_STREAM,
OPT_TABLES_FILE,
OPT_THROTTLE,
OPT_USE_MEMORY
};
ibx_mode_t ibx_mode = IBX_MODE_BACKUP;
static struct my_option ibx_long_options[] =
{
{"version", 'v', "print xtrabackup version information",
(uchar *) &opt_ibx_version, (uchar *) &opt_ibx_version, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"help", '?', "This option displays a help screen and exits.",
(uchar *) &opt_ibx_help, (uchar *) &opt_ibx_help, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"apply-log", OPT_APPLY_LOG, "Prepare a backup in BACKUP-DIR by "
"applying the transaction log file named \"xtrabackup_logfile\" "
"located in the same directory. Also, create new transaction logs. "
"The InnoDB configuration is read from the file \"backup-my.cnf\".",
(uchar*) &opt_ibx_apply_log, (uchar*) &opt_ibx_apply_log,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"redo-only", OPT_REDO_ONLY, "This option should be used when "
"preparing the base full backup and when merging all incrementals "
"except the last one. This forces xtrabackup to skip the \"rollback\" "
"phase and do a \"redo\" only. This is necessary if the backup will "
"have incremental changes applied to it later. See the xtrabackup "
"documentation for details.",
(uchar *) &opt_ibx_redo_only, (uchar *) &opt_ibx_redo_only, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"copy-back", OPT_COPY_BACK, "Copy all the files in a previously made "
"backup from the backup directory to their original locations.",
(uchar *) &opt_ibx_copy_back, (uchar *) &opt_ibx_copy_back, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"move-back", OPT_MOVE_BACK, "Move all the files in a previously made "
"backup from the backup directory to the actual datadir location. "
"Use with caution, as it removes backup files.",
(uchar *) &opt_ibx_move_back, (uchar *) &opt_ibx_move_back, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"galera-info", OPT_GALERA_INFO, "This options creates the "
"xtrabackup_galera_info file which contains the local node state at "
"the time of the backup. Option should be used when performing the "
"backup of Percona-XtraDB-Cluster. Has no effect when backup locks "
"are used to create the backup.",
(uchar *) &opt_ibx_galera_info, (uchar *) &opt_ibx_galera_info, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"slave-info", OPT_SLAVE_INFO, "This option is useful when backing "
"up a replication slave server. It prints the binary log position "
"and name of the master server. It also writes this information to "
"the \"xtrabackup_slave_info\" file as a \"CHANGE MASTER\" command. "
"A new slave for this master can be set up by starting a slave server "
"on this backup and issuing a \"CHANGE MASTER\" command with the "
"binary log position saved in the \"xtrabackup_slave_info\" file.",
(uchar *) &opt_ibx_slave_info, (uchar *) &opt_ibx_slave_info, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"incremental", OPT_INCREMENTAL, "This option tells xtrabackup to "
"create an incremental backup, rather than a full one. It is passed "
"to the xtrabackup child process. When this option is specified, "
"either --incremental-lsn or --incremental-basedir can also be given. "
"If neither option is given, option --incremental-basedir is passed "
"to xtrabackup by default, set to the first timestamped backup "
"directory in the backup base directory.",
(uchar *) &opt_ibx_incremental, (uchar *) &opt_ibx_incremental, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"no-lock", OPT_NO_LOCK, "Use this option to disable table lock "
"with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your "
"tables are InnoDB and you DO NOT CARE about the binary log "
"position of the backup. This option shouldn't be used if there "
"are any DDL statements being executed or if any updates are "
"happening on non-InnoDB tables (this includes the system MyISAM "
"tables in the mysql database), otherwise it could lead to an "
"inconsistent backup. If you are considering to use --no-lock "
"because your backups are failing to acquire the lock, this could "
"be because of incoming replication events preventing the lock "
"from succeeding. Please try using --safe-slave-backup to "
"momentarily stop the replication slave thread, this may help "
"the backup to succeed and you then don't need to resort to "
"using this option.",
(uchar *) &opt_ibx_no_lock, (uchar *) &opt_ibx_no_lock, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"safe-slave-backup", OPT_SAFE_SLAVE_BACKUP, "Stop slave SQL thread "
"and wait to start backup until Slave_open_temp_tables in "
"\"SHOW STATUS\" is zero. If there are no open temporary tables, "
"the backup will take place, otherwise the SQL thread will be "
"started and stopped until there are no open temporary tables. "
"The backup will fail if Slave_open_temp_tables does not become "
"zero after --safe-slave-backup-timeout seconds. The slave SQL "
"thread will be restarted when the backup finishes.",
(uchar *) &opt_ibx_safe_slave_backup,
(uchar *) &opt_ibx_safe_slave_backup,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"rsync", OPT_RSYNC, "Uses the rsync utility to optimize local file "
"transfers. When this option is specified, innobackupex uses rsync "
"to copy all non-InnoDB files instead of spawning a separate cp for "
"each file, which can be much faster for servers with a large number "
"of databases or tables. This option cannot be used together with "
"--stream.",
(uchar *) &opt_ibx_rsync, (uchar *) &opt_ibx_rsync,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This "
"option, when specified, makes --copy-back or --move-back transfer "
"files to non-empty directories. Note that no existing files will be "
"overwritten. If --copy-back or --nove-back has to copy a file from "
"the backup directory which already exists in the destination "
"directory, it will still fail with an error.",
(uchar *) &opt_ibx_force_non_empty_dirs,
(uchar *) &opt_ibx_force_non_empty_dirs,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"no-timestamp", OPT_NO_TIMESTAMP, "This option prevents creation of a "
"time-stamped subdirectory of the BACKUP-ROOT-DIR given on the "
"command line. When it is specified, the backup is done in "
"BACKUP-ROOT-DIR instead.",
(uchar *) &opt_ibx_notimestamp,
(uchar *) &opt_ibx_notimestamp,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"no-version-check", OPT_NO_VERSION_CHECK, "This option disables the "
"version check which is enabled by the --version-check option.",
(uchar *) &opt_ibx_noversioncheck,
(uchar *) &opt_ibx_noversioncheck,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"no-backup-locks", OPT_NO_BACKUP_LOCKS, "This option controls if "
"backup locks should be used instead of FLUSH TABLES WITH READ LOCK "
"on the backup stage. The option has no effect when backup locks are "
"not supported by the server. This option is enabled by default, "
"disable with --no-backup-locks.",
(uchar *) &opt_ibx_no_backup_locks,
(uchar *) &opt_ibx_no_backup_locks,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp "
"extension in a backup previously made with the --compress option.",
(uchar *) &opt_ibx_decompress,
(uchar *) &opt_ibx_decompress,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"user", 'u', "This option specifies the MySQL username used "
"when connecting to the server, if that's not the current user. "
"The option accepts a string argument. See mysql --help for details.",
(uchar*) &opt_ibx_user, (uchar*) &opt_ibx_user, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"host", 'H', "This option specifies the host to use when "
"connecting to the database server with TCP/IP. The option accepts "
"a string argument. See mysql --help for details.",
(uchar*) &opt_ibx_host, (uchar*) &opt_ibx_host, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "This option specifies the port to use when "
"connecting to the database server with TCP/IP. The option accepts "
"a string argument. See mysql --help for details.",
&opt_ibx_port, &opt_ibx_port, 0, GET_UINT, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"password", 'p', "This option specifies the password to use "
"when connecting to the database. It accepts a string argument. "
"See mysql --help for details.",
0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"socket", 'S', "This option specifies the socket to use when "
"connecting to the local database server with a UNIX domain socket. "
"The option accepts a string argument. See mysql --help for details.",
(uchar*) &opt_ibx_socket, (uchar*) &opt_ibx_socket, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-history-name", OPT_INCREMENTAL_HISTORY_NAME,
"This option specifies the name of the backup series stored in the "
"PERCONA_SCHEMA.xtrabackup_history history record to base an "
"incremental backup on. Xtrabackup will search the history table "
"looking for the most recent (highest innodb_to_lsn), successful "
"backup in the series and take the to_lsn value to use as the "
"starting lsn for the incremental backup. This will be mutually "
"exclusive with --incremental-history-uuid, --incremental-basedir "
"and --incremental-lsn. If no valid lsn can be found (no series by "
"that name, no successful backups by that name) xtrabackup will "
"return with an error. It is used with the --incremental option.",
(uchar*) &opt_ibx_incremental_history_name,
(uchar*) &opt_ibx_incremental_history_name, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-history-uuid", OPT_INCREMENTAL_HISTORY_UUID,
"This option specifies the UUID of the specific history record "
"stored in the PERCONA_SCHEMA.xtrabackup_history to base an "
"incremental backup on. --incremental-history-name, "
"--incremental-basedir and --incremental-lsn. If no valid lsn can be "
"found (no success record with that uuid) xtrabackup will return "
"with an error. It is used with the --incremental option.",
(uchar*) &opt_ibx_incremental_history_uuid,
(uchar*) &opt_ibx_incremental_history_uuid, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"decrypt", OPT_DECRYPT, "Decrypts all files with the .xbcrypt "
"extension in a backup previously made with --encrypt option.",
&opt_ibx_decrypt_algo, &opt_ibx_decrypt_algo,
&xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE,
"This option specifies which types of queries are allowed to complete "
"before innobackupex will issue the global lock. Default is all.",
(uchar*) &opt_ibx_lock_wait_query_type,
(uchar*) &opt_ibx_lock_wait_query_type, &query_type_typelib,
GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0},
{"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE,
"This option specifies which types of queries should be killed to "
"unblock the global lock. Default is \"all\".",
(uchar*) &opt_ibx_kill_long_query_type,
(uchar*) &opt_ibx_kill_long_query_type, &query_type_typelib,
GET_ENUM, REQUIRED_ARG, QUERY_TYPE_SELECT, 0, 0, 0, 0, 0},
{"history", OPT_HISTORY,
"This option enables the tracking of backup history in the "
"PERCONA_SCHEMA.xtrabackup_history table. An optional history "
"series name may be specified that will be placed with the history "
"record for the current backup being taken.",
NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"include", OPT_INCLUDE,
"This option is a regular expression to be matched against table "
"names in databasename.tablename format. It is passed directly to "
"xtrabackup's --tables option. See the xtrabackup documentation for "
"details.",
(uchar*) &opt_ibx_include,
(uchar*) &opt_ibx_include, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"databases", OPT_DATABASES,
"This option specifies the list of databases that innobackupex should "
"back up. The option accepts a string argument or path to file that "
"contains the list of databases to back up. The list is of the form "
"\"databasename1[.table_name1] databasename2[.table_name2] . . .\". "
"If this option is not specified, all databases containing MyISAM and "
"InnoDB tables will be backed up. Please make sure that --databases "
"contains all of the InnoDB databases and tables, so that all of the "
"innodb.frm files are also backed up. In case the list is very long, "
"this can be specified in a file, and the full path of the file can "
"be specified instead of the list. (See option --tables-file.)",
(uchar*) &opt_ibx_databases,
(uchar*) &opt_ibx_databases, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT,
"This option specifies the number of seconds innobackupex waits "
"between starting FLUSH TABLES WITH READ LOCK and killing those "
"queries that block it. Default is 0 seconds, which means "
"innobackupex will not attempt to kill any queries.",
(uchar*) &opt_ibx_kill_long_queries_timeout,
(uchar*) &opt_ibx_kill_long_queries_timeout, 0, GET_UINT,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT,
"This option specifies time in seconds that innobackupex should wait "
"for queries that would block FTWRL before running it. If there are "
"still such queries when the timeout expires, innobackupex terminates "
"with an error. Default is 0, in which case innobackupex does not "
"wait for queries to complete and starts FTWRL immediately.",
(uchar*) &opt_ibx_lock_wait_timeout,
(uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD,
"This option specifies the query run time threshold which is used by "
"innobackupex to detect long-running queries with a non-zero value "
"of --ftwrl-wait-timeout. FTWRL is not started until such "
"long-running queries exist. This option has no effect if "
"--ftwrl-wait-timeout is 0. Default value is 60 seconds.",
(uchar*) &opt_ibx_lock_wait_threshold,
(uchar*) &opt_ibx_lock_wait_threshold, 0, GET_UINT,
REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
{"debug-sleep-before-unlock", OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
"This is a debug-only option used by the XtraBackup test suite.",
(uchar*) &opt_ibx_debug_sleep_before_unlock,
(uchar*) &opt_ibx_debug_sleep_before_unlock, 0, GET_UINT,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
"How many seconds --safe-slave-backup should wait for "
"Slave_open_temp_tables to become zero. (default 300)",
(uchar*) &opt_ibx_safe_slave_backup_timeout,
(uchar*) &opt_ibx_safe_slave_backup_timeout, 0, GET_UINT,
REQUIRED_ARG, 300, 0, 0, 0, 0, 0},
/* Following command-line options are actually handled by xtrabackup.
We put them here with only purpose for them to showup in
innobackupex --help output */
{"close_files", OPT_CLOSE_FILES, "Do not keep files opened. This "
"option is passed directly to xtrabackup. Use at your own risk.",
(uchar*) &ibx_xb_close_files, (uchar*) &ibx_xb_close_files, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"compact", OPT_COMPACT, "Create a compact backup with all secondary "
"index pages omitted. This option is passed directly to xtrabackup. "
"See xtrabackup documentation for details.",
(uchar*) &ibx_xtrabackup_compact, (uchar*) &ibx_xtrabackup_compact,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"compress", OPT_COMPRESS, "This option instructs xtrabackup to "
"compress backup copies of InnoDB data files. It is passed directly "
"to the xtrabackup child process. Try 'xtrabackup --help' for more "
"details.", (uchar*) &ibx_xtrabackup_compress_alg,
(uchar*) &ibx_xtrabackup_compress_alg, 0,
GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"compress-threads", OPT_COMPRESS_THREADS,
"This option specifies the number of worker threads that will be used "
"for parallel compression. It is passed directly to the xtrabackup "
"child process. Try 'xtrabackup --help' for more details.",
(uchar*) &ibx_xtrabackup_compress_threads,
(uchar*) &ibx_xtrabackup_compress_threads,
0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
{"compress-chunk-size", OPT_COMPRESS_CHUNK_SIZE, "Size of working "
"buffer(s) for compression threads in bytes. The default value "
"is 64K.", (uchar*) &ibx_xtrabackup_compress_chunk_size,
(uchar*) &ibx_xtrabackup_compress_chunk_size,
0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
{"encrypt", OPT_ENCRYPT, "This option instructs xtrabackup to encrypt "
"backup copies of InnoDB data files using the algorithm specified in "
"the ENCRYPTION-ALGORITHM. It is passed directly to the xtrabackup "
"child process. Try 'xtrabackup --help' for more details.",
&ibx_xtrabackup_encrypt_algo, &ibx_xtrabackup_encrypt_algo,
&xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"encrypt-key", OPT_ENCRYPT_KEY, "This option instructs xtrabackup to "
"use the given ENCRYPTION-KEY when using the --encrypt or --decrypt "
"options. During backup it is passed directly to the xtrabackup child "
"process. Try 'xtrabackup --help' for more details.",
(uchar*) &ibx_xtrabackup_encrypt_key,
(uchar*) &ibx_xtrabackup_encrypt_key, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-key-file", OPT_ENCRYPT_KEY_FILE, "This option instructs "
"xtrabackup to use the encryption key stored in the given "
"ENCRYPTION-KEY-FILE when using the --encrypt or --decrypt options.",
(uchar*) &ibx_xtrabackup_encrypt_key_file,
(uchar*) &ibx_xtrabackup_encrypt_key_file, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-threads", OPT_ENCRYPT_THREADS,
"This option specifies the number of worker threads that will be used "
"for parallel encryption. It is passed directly to the xtrabackup "
"child process. Try 'xtrabackup --help' for more details.",
(uchar*) &ibx_xtrabackup_encrypt_threads,
(uchar*) &ibx_xtrabackup_encrypt_threads,
0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
{"encrypt-chunk-size", OPT_ENCRYPT_CHUNK_SIZE,
"This option specifies the size of the internal working buffer for "
"each encryption thread, measured in bytes. It is passed directly to "
"the xtrabackup child process. Try 'xtrabackup --help' for more "
"details.",
(uchar*) &ibx_xtrabackup_encrypt_chunk_size,
(uchar*) &ibx_xtrabackup_encrypt_chunk_size,
0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
{"export", OPT_EXPORT, "This option is passed directly to xtrabackup's "
"--export option. It enables exporting individual tables for import "
"into another server. See the xtrabackup documentation for details.",
(uchar*) &ibx_xtrabackup_export, (uchar*) &ibx_xtrabackup_export,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"extra-lsndir", OPT_EXTRA_LSNDIR, "This option specifies the "
"directory in which to save an extra copy of the "
"\"xtrabackup_checkpoints\" file. The option accepts a string "
"argument. It is passed directly to xtrabackup's --extra-lsndir "
"option. See the xtrabackup documentation for details.",
(uchar*) &ibx_xtrabackup_extra_lsndir,
(uchar*) &ibx_xtrabackup_extra_lsndir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-basedir", OPT_INCREMENTAL_BASEDIR, "This option "
"specifies the directory containing the full backup that is the base "
"dataset for the incremental backup. The option accepts a string "
"argument. It is used with the --incremental option.",
(uchar*) &ibx_xtrabackup_incremental_basedir,
(uchar*) &ibx_xtrabackup_incremental_basedir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-dir", OPT_INCREMENTAL_DIR, "This option specifies the "
"directory where the incremental backup will be combined with the "
"full backup to make a new full backup. The option accepts a string "
"argument. It is used with the --incremental option.",
(uchar*) &ibx_xtrabackup_incremental_dir,
(uchar*) &ibx_xtrabackup_incremental_dir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-force-scan", OPT_INCREMENTAL_FORCE_SCAN,
"This options tells xtrabackup to perform full scan of data files "
"for taking an incremental backup even if full changed page bitmap "
"data is available to enable the backup without the full scan.",
(uchar*)&ibx_xtrabackup_incremental_force_scan,
(uchar*)&ibx_xtrabackup_incremental_force_scan, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"log-copy-interval", OPT_LOG_COPY_INTERVAL, "This option specifies "
"time interval between checks done by log copying thread in "
"milliseconds.", (uchar*) &ibx_xtrabackup_log_copy_interval,
(uchar*) &ibx_xtrabackup_log_copy_interval,
0, GET_LONG, REQUIRED_ARG, 1000, 0, LONG_MAX, 0, 1, 0},
{"incremental-lsn", OPT_INCREMENTAL, "This option specifies the log "
"sequence number (LSN) to use for the incremental backup. The option "
"accepts a string argument. It is used with the --incremental option. "
"It is used instead of specifying --incremental-basedir. For "
"databases created by MySQL and Percona Server 5.0-series versions, "
"specify the LSN as two 32-bit integers in high:low format. For "
"databases created in 5.1 and later, specify the LSN as a single "
"64-bit integer.",
(uchar*) &ibx_xtrabackup_incremental,
(uchar*) &ibx_xtrabackup_incremental,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"parallel", OPT_PARALLEL, "On backup, this option specifies the "
"number of threads the xtrabackup child process should use to back "
"up files concurrently. The option accepts an integer argument. It "
"is passed directly to xtrabackup's --parallel option. See the "
"xtrabackup documentation for details.",
(uchar*) &ibx_xtrabackup_parallel, (uchar*) &ibx_xtrabackup_parallel,
0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0},
{"rebuild-indexes", OPT_REBUILD_INDEXES,
"This option only has effect when used together with the --apply-log "
"option and is passed directly to xtrabackup. When used, makes "
"xtrabackup rebuild all secondary indexes after applying the log. "
"This option is normally used to prepare compact backups. See the "
"XtraBackup manual for more information.",
(uchar*) &ibx_xtrabackup_rebuild_indexes,
(uchar*) &ibx_xtrabackup_rebuild_indexes,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"rebuild-threads", OPT_REBUILD_THREADS,
"Use this number of threads to rebuild indexes in a compact backup. "
"Only has effect with --prepare and --rebuild-indexes.",
(uchar*) &ibx_xtrabackup_rebuild_threads,
(uchar*) &ibx_xtrabackup_rebuild_threads,
0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
{"stream", OPT_STREAM, "This option specifies the format in which to "
"do the streamed backup. The option accepts a string argument. The "
"backup will be done to STDOUT in the specified format. Currently, "
"the only supported formats are tar and xbstream. This option is "
"passed directly to xtrabackup's --stream option.",
(uchar*) &ibx_xtrabackup_stream_str,
(uchar*) &ibx_xtrabackup_stream_str, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tables-file", OPT_TABLES_FILE, "This option specifies the file in "
"which there are a list of names of the form database. The option "
"accepts a string argument.table, one per line. The option is passed "
"directly to xtrabackup's --tables-file option.",
(uchar*) &ibx_xtrabackup_tables_file,
(uchar*) &ibx_xtrabackup_tables_file,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"throttle", OPT_THROTTLE, "This option specifies a number of I/O "
"operations (pairs of read+write) per second. It accepts an integer "
"argument. It is passed directly to xtrabackup's --throttle option.",
(uchar*) &ibx_xtrabackup_throttle, (uchar*) &ibx_xtrabackup_throttle,
0, GET_LONG, REQUIRED_ARG, 0, 0, LONG_MAX, 0, 1, 0},
{"tmpdir", 't', "This option specifies the location where a temporary "
"files will be stored. If the option is not specified, the default is "
"to use the value of tmpdir read from the server configuration.",
(uchar*) &ibx_opt_mysql_tmpdir,
(uchar*) &ibx_opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"use-memory", OPT_USE_MEMORY, "This option accepts a string argument "
"that specifies the amount of memory in bytes for xtrabackup to use "
"for crash recovery while preparing a backup. Multiples are supported "
"providing the unit (e.g. 1MB, 1GB). It is used only with the option "
"--apply-log. It is passed directly to xtrabackup's --use-memory "
"option. See the xtrabackup documentation for details.",
(uchar*) &ibx_xtrabackup_use_memory,
(uchar*) &ibx_xtrabackup_use_memory,
0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
1024*1024L, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static void usage(void)
{
puts("Open source backup tool for InnoDB and XtraDB\n\
\n\
Copyright (C) 2009-2015 Percona LLC and/or its affiliates.\n\
Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.\n\
\n\
This program is free software; you can redistribute it and/or\n\
modify it under the terms of the GNU General Public License\n\
as published by the Free Software Foundation version 2\n\
of the License.\n\
\n\
This program is distributed in the hope that it will be useful,\n\
but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
GNU General Public License for more details.\n\
\n\
You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0.txt\n\n");
puts("innobackupex - Non-blocking backup tool for InnoDB, XtraDB and HailDB databases\n\
\n\
SYNOPOSIS\n\
\n\
innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chunk-size=CHUNK-SIZE]\n\
[--encrypt=ENCRYPTION-ALGORITHM] [--encrypt-threads=NUMBER-OF-THREADS] [--encrypt-chunk-size=CHUNK-SIZE]\n\
[--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\
[--include=REGEXP] [--user=NAME]\n\
[--password=WORD] [--port=PORT] [--socket=SOCKET]\n\
[--no-timestamp] [--ibbackup=IBBACKUP-BINARY]\n\
[--slave-info] [--galera-info] [--stream=tar|xbstream]\n\
[--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME]\n\
[--databases=LIST] [--no-lock] \n\
[--tmpdir=DIRECTORY] [--tables-file=FILE]\n\
[--history=NAME]\n\
[--incremental] [--incremental-basedir]\n\
[--incremental-dir] [--incremental-force-scan] [--incremental-lsn]\n\
[--incremental-history-name=NAME] [--incremental-history-uuid=UUID]\n\
[--close-files] [--compact] \n\
BACKUP-ROOT-DIR\n\
\n\
innobackupex --apply-log [--use-memory=B]\n\
[--defaults-file=MY.CNF]\n\
[--export] [--redo-only] [--ibbackup=IBBACKUP-BINARY]\n\
BACKUP-DIR\n\
\n\
innobackupex --copy-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
\n\
innobackupex --move-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
\n\
innobackupex [--decompress] [--decrypt=ENCRYPTION-ALGORITHM]\n\
[--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\
[--parallel=NUMBER-OF-FORKS] BACKUP-DIR\n\
\n\
DESCRIPTION\n\
\n\
The first command line above makes a hot backup of a MySQL database.\n\
By default it creates a backup directory (named by the current date\n\
and time) in the given backup root directory. With the --no-timestamp\n\
option it does not create a time-stamped backup directory, but it puts\n\
the backup in the given directory (which must not exist). This\n\
command makes a complete backup of all MyISAM and InnoDB tables and\n\
indexes in all databases or in all of the databases specified with the\n\
--databases option. The created backup contains .frm, .MRG, .MYD,\n\
.MYI, .MAD, .MAI, .TRG, .TRN, .ARM, .ARZ, .CSM, CSV, .opt, .par, and\n\
InnoDB data and log files. The MY.CNF options file defines the\n\
location of the database. This command connects to the MySQL server\n\
using the mysql client program, and runs xtrabackup as a child\n\
process.\n\
\n\
The --apply-log command prepares a backup for starting a MySQL\n\
server on the backup. This command recovers InnoDB data files as specified\n\
in BACKUP-DIR/backup-my.cnf using BACKUP-DIR/xtrabackup_logfile,\n\
and creates new InnoDB log files as specified in BACKUP-DIR/backup-my.cnf.\n\
The BACKUP-DIR should be the path to a backup directory created by\n\
xtrabackup. This command runs xtrabackup as a child process, but it does not \n\
connect to the database server.\n\
\n\
The --copy-back command copies data, index, and log files\n\
from the backup directory back to their original locations.\n\
The MY.CNF options file defines the original location of the database.\n\
The BACKUP-DIR is the path to a backup directory created by xtrabackup.\n\
\n\
The --move-back command is similar to --copy-back with the only difference that\n\
it moves files to their original locations rather than copies them. As this\n\
option removes backup files, it must be used with caution. It may be useful in\n\
cases when there is not enough free disk space to copy files.\n\
\n\
The --decompress --decrypt command will decrypt and/or decompress a backup made\n\
with the --compress and/or --encrypt options. When decrypting, the encryption\n\
algorithm and key used when the backup was taken MUST be provided via the\n\
specified options. --decrypt and --decompress may be used together at the same\n\
time to completely normalize a previously compressed and encrypted backup. The\n\
--parallel option will allow multiple files to be decrypted and/or decompressed\n\
simultaneously. In order to decompress, the qpress utility MUST be installed\n\
and accessable within the path. This process will remove the original\n\
compressed/encrypted files and leave the results in the same location.\n\
\n\
On success the exit code innobackupex is 0. A non-zero exit code \n\
indicates an error.\n");
printf("Usage: [%s [--defaults-file=#] --backup | %s [--defaults-file=#] --prepare] [OPTIONS]\n", my_progname, my_progname);
my_print_help(ibx_long_options);
}
static
my_bool
ibx_get_one_option(int optid,
const struct my_option *opt __attribute__((unused)),
char *argument)
{
switch(optid) {
case '?':
usage();
exit(0);
break;
case 'v':
msg("innobackupex version %s %s (%s) (revision id: %s)\n",
XTRABACKUP_VERSION,
SYSTEM_TYPE, MACHINE_TYPE, XTRABACKUP_REVISION);
exit(0);
break;
case OPT_HISTORY:
if (argument) {
opt_ibx_history = argument;
} else {
opt_ibx_history = "";
}
break;
case OPT_DECRYPT:
if (argument == NULL) {
ibx_msg("Missing --decrypt argument, must specify a "
"valid encryption algorithm.\n");
return(1);
}
opt_ibx_decrypt = true;
break;
case OPT_STREAM:
if (!strcasecmp(argument, "tar"))
xtrabackup_stream_fmt = XB_STREAM_FMT_TAR;
else if (!strcasecmp(argument, "xbstream"))
xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM;
else {
ibx_msg("Invalid --stream argument: %s\n", argument);
return 1;
}
xtrabackup_stream = TRUE;
break;
case OPT_COMPRESS:
if (argument == NULL)
xtrabackup_compress_alg = "quicklz";
else if (strcasecmp(argument, "quicklz"))
{
ibx_msg("Invalid --compress argument: %s\n", argument);
return 1;
}
xtrabackup_compress = TRUE;
break;
case OPT_ENCRYPT:
if (argument == NULL)
{
msg("Missing --encrypt argument, must specify a "
"valid encryption algorithm.\n");
return 1;
}
xtrabackup_encrypt = TRUE;
break;
case 'p':
if (argument)
{
char *start = argument;
my_free(opt_ibx_password);
opt_ibx_password= my_strdup(argument, MYF(MY_FAE));
/* Destroy argument */
while (*argument)
*argument++= 'x';
if (*start)
start[1]=0 ;
}
break;
}
return(0);
}
bool
make_backup_dir()
{
time_t t = time(NULL);
char buf[100];
if (!opt_ibx_notimestamp && !ibx_xtrabackup_stream_str) {
strftime(buf, sizeof(buf), "%Y-%m-%d_%H-%M-%S", localtime(&t));
ut_a(asprintf(&ibx_backup_directory, "%s/%s",
ibx_position_arg, buf) != -1);
} else {
ibx_backup_directory = strdup(ibx_position_arg);
}
if (!directory_exists(ibx_backup_directory, true)) {
return(false);
}
return(true);
}
bool
ibx_handle_options(int *argc, char ***argv)
{
int i, n_arguments;
if (handle_options(argc, argv, ibx_long_options, ibx_get_one_option)) {
return(false);
}
if (opt_ibx_apply_log) {
ibx_mode = IBX_MODE_APPLY_LOG;
} else if (opt_ibx_copy_back) {
ibx_mode = IBX_MODE_COPY_BACK;
} else if (opt_ibx_move_back) {
ibx_mode = IBX_MODE_MOVE_BACK;
} else if (opt_ibx_decrypt || opt_ibx_decompress) {
ibx_mode = IBX_MODE_DECRYPT_DECOMPRESS;
} else {
ibx_mode = IBX_MODE_BACKUP;
}
/* find and save position argument */
i = 0;
n_arguments = 0;
while (i < *argc) {
char *opt = (*argv)[i];
if (strncmp(opt, "--", 2) != 0
&& !(strlen(opt) == 2 && opt[0] == '-')) {
if (ibx_position_arg != NULL
&& ibx_position_arg != opt) {
ibx_msg("Error: extra argument found %s\n",
opt);
}
ibx_position_arg = opt;
++n_arguments;
}
++i;
}
*argc -= n_arguments;
if (n_arguments > 1) {
return(false);
}
if (ibx_position_arg == NULL) {
ibx_msg("Missing argument\n");
return(false);
}
/* set argv[0] to be the program name */
--(*argv);
++(*argc);
return(true);
}
/*********************************************************************//**
Parse command-line options, connect to MySQL server,
detect server capabilities, etc.
@return true on success. */
bool
ibx_init()
{
const char *run;
/*=====================*/
xtrabackup_copy_back = opt_ibx_copy_back;
xtrabackup_move_back = opt_ibx_move_back;
opt_galera_info = opt_ibx_galera_info;
opt_slave_info = opt_ibx_slave_info;
opt_no_lock = opt_ibx_no_lock;
opt_safe_slave_backup = opt_ibx_safe_slave_backup;
opt_rsync = opt_ibx_rsync;
opt_force_non_empty_dirs = opt_ibx_force_non_empty_dirs;
opt_noversioncheck = opt_ibx_noversioncheck;
opt_no_backup_locks = opt_ibx_no_backup_locks;
opt_decompress = opt_ibx_decompress;
opt_incremental_history_name = opt_ibx_incremental_history_name;
opt_incremental_history_uuid = opt_ibx_incremental_history_uuid;
opt_user = opt_ibx_user;
opt_password = opt_ibx_password;
opt_host = opt_ibx_host;
opt_defaults_group = opt_ibx_defaults_group;
opt_socket = opt_ibx_socket;
opt_port = opt_ibx_port;
opt_login_path = opt_ibx_login_path;
opt_lock_wait_query_type = opt_ibx_lock_wait_query_type;
opt_kill_long_query_type = opt_ibx_kill_long_query_type;
opt_decrypt_algo = opt_ibx_decrypt_algo;
opt_kill_long_queries_timeout = opt_ibx_kill_long_queries_timeout;
opt_lock_wait_timeout = opt_ibx_lock_wait_timeout;
opt_lock_wait_threshold = opt_ibx_lock_wait_threshold;
opt_debug_sleep_before_unlock = opt_ibx_debug_sleep_before_unlock;
opt_safe_slave_backup_timeout = opt_ibx_safe_slave_backup_timeout;
opt_history = opt_ibx_history;
opt_decrypt = opt_ibx_decrypt;
/* setup xtrabackup options */
xb_close_files = ibx_xb_close_files;
xtrabackup_compact = ibx_xtrabackup_compact;
xtrabackup_compress_alg = ibx_xtrabackup_compress_alg;
xtrabackup_compress_threads = ibx_xtrabackup_compress_threads;
xtrabackup_compress_chunk_size = ibx_xtrabackup_compress_chunk_size;
xtrabackup_encrypt_algo = ibx_xtrabackup_encrypt_algo;
xtrabackup_encrypt_key = ibx_xtrabackup_encrypt_key;
xtrabackup_encrypt_key_file = ibx_xtrabackup_encrypt_key_file;
xtrabackup_encrypt_threads = ibx_xtrabackup_encrypt_threads;
xtrabackup_encrypt_chunk_size = ibx_xtrabackup_encrypt_chunk_size;
xtrabackup_export = ibx_xtrabackup_export;
xtrabackup_extra_lsndir = ibx_xtrabackup_extra_lsndir;
xtrabackup_incremental_basedir = ibx_xtrabackup_incremental_basedir;
xtrabackup_incremental_dir = ibx_xtrabackup_incremental_dir;
xtrabackup_incremental_force_scan =
ibx_xtrabackup_incremental_force_scan;
xtrabackup_log_copy_interval = ibx_xtrabackup_log_copy_interval;
xtrabackup_incremental = ibx_xtrabackup_incremental;
xtrabackup_parallel = ibx_xtrabackup_parallel;
xtrabackup_rebuild_indexes = ibx_xtrabackup_rebuild_indexes;
xtrabackup_rebuild_threads = ibx_xtrabackup_rebuild_threads;
xtrabackup_stream_str = ibx_xtrabackup_stream_str;
xtrabackup_tables_file = ibx_xtrabackup_tables_file;
xtrabackup_throttle = ibx_xtrabackup_throttle;
opt_mysql_tmpdir = ibx_opt_mysql_tmpdir;
xtrabackup_use_memory = ibx_xtrabackup_use_memory;
if (!opt_ibx_incremental
&& (xtrabackup_incremental
|| xtrabackup_incremental_basedir
|| opt_ibx_incremental_history_name
|| opt_ibx_incremental_history_uuid)) {
ibx_msg("Error: --incremental-lsn, --incremental-basedir, "
"--incremental-history-name and "
"--incremental-history-uuid require the "
"--incremental option.\n");
return(false);
}
if (opt_ibx_databases != NULL) {
if (is_path_separator(*opt_ibx_databases)) {
xtrabackup_databases_file = opt_ibx_databases;
} else {
xtrabackup_databases = opt_ibx_databases;
}
}
/* --tables and --tables-file options are xtrabackup only */
ibx_partial_backup = (opt_ibx_include || opt_ibx_databases);
if (ibx_mode == IBX_MODE_BACKUP) {
if (!make_backup_dir()) {
return(false);
}
}
/* --binlog-info is xtrabackup only, so force
--binlog-info=ON. i.e. behavior before the feature had been
implemented */
opt_binlog_info = BINLOG_INFO_ON;
switch (ibx_mode) {
case IBX_MODE_APPLY_LOG:
xtrabackup_prepare = TRUE;
if (opt_ibx_redo_only) {
xtrabackup_apply_log_only = TRUE;
}
xtrabackup_target_dir = ibx_position_arg;
run = "apply-log";
break;
case IBX_MODE_BACKUP:
xtrabackup_backup = TRUE;
xtrabackup_target_dir = ibx_backup_directory;
if (opt_ibx_include != NULL) {
xtrabackup_tables = opt_ibx_include;
}
run = "backup";
break;
case IBX_MODE_COPY_BACK:
xtrabackup_copy_back = TRUE;
xtrabackup_target_dir = ibx_position_arg;
run = "copy-back";
break;
case IBX_MODE_MOVE_BACK:
xtrabackup_move_back = TRUE;
xtrabackup_target_dir = ibx_position_arg;
run = "move-back";
break;
case IBX_MODE_DECRYPT_DECOMPRESS:
xtrabackup_decrypt_decompress = TRUE;
xtrabackup_target_dir = ibx_position_arg;
run = "decrypt and decompress";
break;
default:
ut_error;
}
ibx_msg("Starting the %s operation\n\n"
"IMPORTANT: Please check that the %s run completes "
"successfully.\n"
" At the end of a successful %s run innobackupex\n"
" prints \"completed OK!\".\n\n", run, run, run);
return(true);
}
void
ibx_cleanup()
{
free(ibx_backup_directory);
}
/******************************************************
Copyright (c) 2011-2014 Percona LLC and/or its affiliates.
Declarations for innobackupex.cc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef INNOBACKUPEX_H
#define INNOBACKUPEX_H
#define INNOBACKUPEX_BIN_NAME "innobackupex"
enum ibx_mode_t {
IBX_MODE_BACKUP,
IBX_MODE_APPLY_LOG,
IBX_MODE_COPY_BACK,
IBX_MODE_MOVE_BACK,
IBX_MODE_DECRYPT_DECOMPRESS
};
extern ibx_mode_t ibx_mode;
bool
ibx_handle_options(int *argc, char ***argv);
bool
ibx_init();
void
ibx_cleanup();
#endif
// Fast data compression library
// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
// lar@quicklz.com
//
// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
// released into public must be open source) or under a commercial license if such
// has been acquired (see http://www.quicklz.com/order.html). The commercial license
// does not cover derived or ported versions created by third parties under GPL.
// 1.5.0 final
#include "quicklz.h"
#if QLZ_VERSION_MAJOR != 1 || QLZ_VERSION_MINOR != 5 || QLZ_VERSION_REVISION != 0
#error quicklz.c and quicklz.h have different versions
#endif
#if (defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64))
#define X86X64
#endif
#define MINOFFSET 2
#define UNCONDITIONAL_MATCHLEN 6
#define UNCOMPRESSED_END 4
#define CWORD_LEN 4
#if QLZ_COMPRESSION_LEVEL == 1 && defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
#define OFFSET_BASE source
#define CAST (ui32)(size_t)
#else
#define OFFSET_BASE 0
#define CAST
#endif
int qlz_get_setting(int setting)
{
switch (setting)
{
case 0: return QLZ_COMPRESSION_LEVEL;
case 1: return sizeof(qlz_state_compress);
case 2: return sizeof(qlz_state_decompress);
case 3: return QLZ_STREAMING_BUFFER;
#ifdef QLZ_MEMORY_SAFE
case 6: return 1;
#else
case 6: return 0;
#endif
case 7: return QLZ_VERSION_MAJOR;
case 8: return QLZ_VERSION_MINOR;
case 9: return QLZ_VERSION_REVISION;
}
return -1;
}
#if QLZ_COMPRESSION_LEVEL == 1
static int same(const unsigned char *src, size_t n)
{
while(n > 0 && *(src + n) == *src)
n--;
return n == 0 ? 1 : 0;
}
#endif
static void reset_table_compress(qlz_state_compress *state)
{
int i;
for(i = 0; i < QLZ_HASH_VALUES; i++)
{
#if QLZ_COMPRESSION_LEVEL == 1
state->hash[i].offset = 0;
#else
state->hash_counter[i] = 0;
#endif
}
}
static void reset_table_decompress(qlz_state_decompress *state)
{
int i;
(void)state;
(void)i;
#if QLZ_COMPRESSION_LEVEL == 2
for(i = 0; i < QLZ_HASH_VALUES; i++)
{
state->hash_counter[i] = 0;
}
#endif
}
static __inline ui32 hash_func(ui32 i)
{
#if QLZ_COMPRESSION_LEVEL == 2
return ((i >> 9) ^ (i >> 13) ^ i) & (QLZ_HASH_VALUES - 1);
#else
return ((i >> 12) ^ i) & (QLZ_HASH_VALUES - 1);
#endif
}
static __inline ui32 fast_read(void const *src, ui32 bytes)
{
#ifndef X86X64
unsigned char *p = (unsigned char*)src;
switch (bytes)
{
case 4:
return(*p | *(p + 1) << 8 | *(p + 2) << 16 | *(p + 3) << 24);
case 3:
return(*p | *(p + 1) << 8 | *(p + 2) << 16);
case 2:
return(*p | *(p + 1) << 8);
case 1:
return(*p);
}
return 0;
#else
if (bytes >= 1 && bytes <= 4)
return *((ui32*)src);
else
return 0;
#endif
}
static __inline ui32 hashat(const unsigned char *src)
{
ui32 fetch, hash;
fetch = fast_read(src, 3);
hash = hash_func(fetch);
return hash;
}
static __inline void fast_write(ui32 f, void *dst, size_t bytes)
{
#ifndef X86X64
unsigned char *p = (unsigned char*)dst;
switch (bytes)
{
case 4:
*p = (unsigned char)f;
*(p + 1) = (unsigned char)(f >> 8);
*(p + 2) = (unsigned char)(f >> 16);
*(p + 3) = (unsigned char)(f >> 24);
return;
case 3:
*p = (unsigned char)f;
*(p + 1) = (unsigned char)(f >> 8);
*(p + 2) = (unsigned char)(f >> 16);
return;
case 2:
*p = (unsigned char)f;
*(p + 1) = (unsigned char)(f >> 8);
return;
case 1:
*p = (unsigned char)f;
return;
}
#else
switch (bytes)
{
case 4:
*((ui32*)dst) = f;
return;
case 3:
*((ui32*)dst) = f;
return;
case 2:
*((ui16 *)dst) = (ui16)f;
return;
case 1:
*((unsigned char*)dst) = (unsigned char)f;
return;
}
#endif
}
size_t qlz_size_decompressed(const char *source)
{
ui32 n, r;
n = (((*source) & 2) == 2) ? 4 : 1;
r = fast_read(source + 1 + n, n);
r = r & (0xffffffff >> ((4 - n)*8));
return r;
}
size_t qlz_size_compressed(const char *source)
{
ui32 n, r;
n = (((*source) & 2) == 2) ? 4 : 1;
r = fast_read(source + 1, n);
r = r & (0xffffffff >> ((4 - n)*8));
return r;
}
size_t qlz_size_header(const char *source)
{
size_t n = 2*((((*source) & 2) == 2) ? 4 : 1) + 1;
return n;
}
static __inline void memcpy_up(unsigned char *dst, const unsigned char *src, ui32 n)
{
// Caution if modifying memcpy_up! Overlap of dst and src must be special handled.
#ifndef X86X64
unsigned char *end = dst + n;
while(dst < end)
{
*dst = *src;
dst++;
src++;
}
#else
ui32 f = 0;
do
{
*(ui32 *)(dst + f) = *(ui32 *)(src + f);
f += MINOFFSET + 1;
}
while (f < n);
#endif
}
static __inline void update_hash(qlz_state_decompress *state, const unsigned char *s)
{
#if QLZ_COMPRESSION_LEVEL == 1
ui32 hash;
hash = hashat(s);
state->hash[hash].offset = s;
state->hash_counter[hash] = 1;
#elif QLZ_COMPRESSION_LEVEL == 2
ui32 hash;
unsigned char c;
hash = hashat(s);
c = state->hash_counter[hash];
state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = s;
c++;
state->hash_counter[hash] = c;
#endif
(void)state;
(void)s;
}
#if QLZ_COMPRESSION_LEVEL <= 2
static void update_hash_upto(qlz_state_decompress *state, unsigned char **lh, const unsigned char *max)
{
while(*lh < max)
{
(*lh)++;
update_hash(state, *lh);
}
}
#endif
static size_t qlz_compress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_compress *state)
{
const unsigned char *last_byte = source + size - 1;
const unsigned char *src = source;
unsigned char *cword_ptr = destination;
unsigned char *dst = destination + CWORD_LEN;
ui32 cword_val = 1U << 31;
const unsigned char *last_matchstart = last_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
ui32 fetch = 0;
unsigned int lits = 0;
(void) lits;
if(src <= last_matchstart)
fetch = fast_read(src, 3);
while(src <= last_matchstart)
{
if ((cword_val & 1) == 1)
{
// store uncompressed if compression ratio is too low
if (src > source + (size >> 1) && dst - destination > src - source - ((src - source) >> 5))
return 0;
fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
cword_ptr = dst;
dst += CWORD_LEN;
cword_val = 1U << 31;
fetch = fast_read(src, 3);
}
#if QLZ_COMPRESSION_LEVEL == 1
{
const unsigned char *o;
ui32 hash, cached;
hash = hash_func(fetch);
cached = fetch ^ state->hash[hash].cache;
state->hash[hash].cache = fetch;
o = state->hash[hash].offset + OFFSET_BASE;
state->hash[hash].offset = CAST(src - OFFSET_BASE);
#ifdef X86X64
if ((cached & 0xffffff) == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
{
if(cached != 0)
{
#else
if (cached == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
{
if (*(o + 3) != *(src + 3))
{
#endif
hash <<= 4;
cword_val = (cword_val >> 1) | (1U << 31);
fast_write((3 - 2) | hash, dst, 2);
src += 3;
dst += 2;
}
else
{
const unsigned char *old_src = src;
size_t matchlen;
hash <<= 4;
cword_val = (cword_val >> 1) | (1U << 31);
src += 4;
if(*(o + (src - old_src)) == *src)
{
src++;
if(*(o + (src - old_src)) == *src)
{
size_t q = last_byte - UNCOMPRESSED_END - (src - 5) + 1;
size_t remaining = q > 255 ? 255 : q;
src++;
while(*(o + (src - old_src)) == *src && (size_t)(src - old_src) < remaining)
src++;
}
}
matchlen = src - old_src;
if (matchlen < 18)
{
fast_write((ui32)(matchlen - 2) | hash, dst, 2);
dst += 2;
}
else
{
fast_write((ui32)(matchlen << 16) | hash, dst, 3);
dst += 3;
}
}
fetch = fast_read(src, 3);
lits = 0;
}
else
{
lits++;
*dst = *src;
src++;
dst++;
cword_val = (cword_val >> 1);
#ifdef X86X64
fetch = fast_read(src, 3);
#else
fetch = (fetch >> 8 & 0xffff) | (*(src + 2) << 16);
#endif
}
}
#elif QLZ_COMPRESSION_LEVEL >= 2
{
const unsigned char *o, *offset2;
ui32 hash, matchlen, k, m, best_k = 0;
unsigned char c;
size_t remaining = (last_byte - UNCOMPRESSED_END - src + 1) > 255 ? 255 : (last_byte - UNCOMPRESSED_END - src + 1);
(void)best_k;
//hash = hashat(src);
fetch = fast_read(src, 3);
hash = hash_func(fetch);
c = state->hash_counter[hash];
offset2 = state->hash[hash].offset[0];
if(offset2 < src - MINOFFSET && c > 0 && ((fast_read(offset2, 3) ^ fetch) & 0xffffff) == 0)
{
matchlen = 3;
if(*(offset2 + matchlen) == *(src + matchlen))
{
matchlen = 4;
while(*(offset2 + matchlen) == *(src + matchlen) && matchlen < remaining)
matchlen++;
}
}
else
matchlen = 0;
for(k = 1; k < QLZ_POINTERS && c > k; k++)
{
o = state->hash[hash].offset[k];
#if QLZ_COMPRESSION_LEVEL == 3
if(((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
#elif QLZ_COMPRESSION_LEVEL == 2
if(*(src + matchlen) == *(o + matchlen) && ((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
#endif
{
m = 3;
while(*(o + m) == *(src + m) && m < remaining)
m++;
#if QLZ_COMPRESSION_LEVEL == 3
if ((m > matchlen) || (m == matchlen && o > offset2))
#elif QLZ_COMPRESSION_LEVEL == 2
if (m > matchlen)
#endif
{
offset2 = o;
matchlen = m;
best_k = k;
}
}
}
o = offset2;
state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
c++;
state->hash_counter[hash] = c;
#if QLZ_COMPRESSION_LEVEL == 3
if(matchlen > 2 && src - o < 131071)
{
ui32 u;
size_t offset = src - o;
for(u = 1; u < matchlen; u++)
{
hash = hashat(src + u);
c = state->hash_counter[hash]++;
state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src + u;
}
cword_val = (cword_val >> 1) | (1U << 31);
src += matchlen;
if(matchlen == 3 && offset <= 63)
{
*dst = (unsigned char)(offset << 2);
dst++;
}
else if (matchlen == 3 && offset <= 16383)
{
ui32 f = (ui32)((offset << 2) | 1);
fast_write(f, dst, 2);
dst += 2;
}
else if (matchlen <= 18 && offset <= 1023)
{
ui32 f = ((matchlen - 3) << 2) | ((ui32)offset << 6) | 2;
fast_write(f, dst, 2);
dst += 2;
}
else if(matchlen <= 33)
{
ui32 f = ((matchlen - 2) << 2) | ((ui32)offset << 7) | 3;
fast_write(f, dst, 3);
dst += 3;
}
else
{
ui32 f = ((matchlen - 3) << 7) | ((ui32)offset << 15) | 3;
fast_write(f, dst, 4);
dst += 4;
}
}
else
{
*dst = *src;
src++;
dst++;
cword_val = (cword_val >> 1);
}
#elif QLZ_COMPRESSION_LEVEL == 2
if(matchlen > 2)
{
cword_val = (cword_val >> 1) | (1U << 31);
src += matchlen;
if (matchlen < 10)
{
ui32 f = best_k | ((matchlen - 2) << 2) | (hash << 5);
fast_write(f, dst, 2);
dst += 2;
}
else
{
ui32 f = best_k | (matchlen << 16) | (hash << 5);
fast_write(f, dst, 3);
dst += 3;
}
}
else
{
*dst = *src;
src++;
dst++;
cword_val = (cword_val >> 1);
}
#endif
}
#endif
}
while (src <= last_byte)
{
if ((cword_val & 1) == 1)
{
fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
cword_ptr = dst;
dst += CWORD_LEN;
cword_val = 1U << 31;
}
#if QLZ_COMPRESSION_LEVEL < 3
if (src <= last_byte - 3)
{
#if QLZ_COMPRESSION_LEVEL == 1
ui32 hash, fetch;
fetch = fast_read(src, 3);
hash = hash_func(fetch);
state->hash[hash].offset = CAST(src - OFFSET_BASE);
state->hash[hash].cache = fetch;
#elif QLZ_COMPRESSION_LEVEL == 2
ui32 hash;
unsigned char c;
hash = hashat(src);
c = state->hash_counter[hash];
state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
c++;
state->hash_counter[hash] = c;
#endif
}
#endif
*dst = *src;
src++;
dst++;
cword_val = (cword_val >> 1);
}
while((cword_val & 1) != 1)
cword_val = (cword_val >> 1);
fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
// min. size must be 9 bytes so that the qlz_size functions can take 9 bytes as argument
return dst - destination < 9 ? 9 : dst - destination;
}
static size_t qlz_decompress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_decompress *state, const unsigned char *history)
{
const unsigned char *src = source + qlz_size_header((const char *)source);
unsigned char *dst = destination;
const unsigned char *last_destination_byte = destination + size - 1;
ui32 cword_val = 1;
const unsigned char *last_matchstart = last_destination_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
unsigned char *last_hashed = destination - 1;
const unsigned char *last_source_byte = source + qlz_size_compressed((const char *)source) - 1;
static const ui32 bitlut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
(void) last_source_byte;
(void) last_hashed;
(void) state;
(void) history;
for(;;)
{
ui32 fetch;
if (cword_val == 1)
{
#ifdef QLZ_MEMORY_SAFE
if(src + CWORD_LEN - 1 > last_source_byte)
return 0;
#endif
cword_val = fast_read(src, CWORD_LEN);
src += CWORD_LEN;
}
#ifdef QLZ_MEMORY_SAFE
if(src + 4 - 1 > last_source_byte)
return 0;
#endif
fetch = fast_read(src, 4);
if ((cword_val & 1) == 1)
{
ui32 matchlen;
const unsigned char *offset2;
#if QLZ_COMPRESSION_LEVEL == 1
ui32 hash;
cword_val = cword_val >> 1;
hash = (fetch >> 4) & 0xfff;
offset2 = (const unsigned char *)(size_t)state->hash[hash].offset;
if((fetch & 0xf) != 0)
{
matchlen = (fetch & 0xf) + 2;
src += 2;
}
else
{
matchlen = *(src + 2);
src += 3;
}
#elif QLZ_COMPRESSION_LEVEL == 2
ui32 hash;
unsigned char c;
cword_val = cword_val >> 1;
hash = (fetch >> 5) & 0x7ff;
c = (unsigned char)(fetch & 0x3);
offset2 = state->hash[hash].offset[c];
if((fetch & (28)) != 0)
{
matchlen = ((fetch >> 2) & 0x7) + 2;
src += 2;
}
else
{
matchlen = *(src + 2);
src += 3;
}
#elif QLZ_COMPRESSION_LEVEL == 3
ui32 offset;
cword_val = cword_val >> 1;
if ((fetch & 3) == 0)
{
offset = (fetch & 0xff) >> 2;
matchlen = 3;
src++;
}
else if ((fetch & 2) == 0)
{
offset = (fetch & 0xffff) >> 2;
matchlen = 3;
src += 2;
}
else if ((fetch & 1) == 0)
{
offset = (fetch & 0xffff) >> 6;
matchlen = ((fetch >> 2) & 15) + 3;
src += 2;
}
else if ((fetch & 127) != 3)
{
offset = (fetch >> 7) & 0x1ffff;
matchlen = ((fetch >> 2) & 0x1f) + 2;
src += 3;
}
else
{
offset = (fetch >> 15);
matchlen = ((fetch >> 7) & 255) + 3;
src += 4;
}
offset2 = dst - offset;
#endif
#ifdef QLZ_MEMORY_SAFE
if(offset2 < history || offset2 > dst - MINOFFSET - 1)
return 0;
if(matchlen > (ui32)(last_destination_byte - dst - UNCOMPRESSED_END + 1))
return 0;
#endif
memcpy_up(dst, offset2, matchlen);
dst += matchlen;
#if QLZ_COMPRESSION_LEVEL <= 2
update_hash_upto(state, &last_hashed, dst - matchlen);
last_hashed = dst - 1;
#endif
}
else
{
if (dst < last_matchstart)
{
unsigned int n = bitlut[cword_val & 0xf];
#ifdef X86X64
*(ui32 *)dst = *(ui32 *)src;
#else
memcpy_up(dst, src, 4);
#endif
cword_val = cword_val >> n;
dst += n;
src += n;
#if QLZ_COMPRESSION_LEVEL <= 2
update_hash_upto(state, &last_hashed, dst - 3);
#endif
}
else
{
while(dst <= last_destination_byte)
{
if (cword_val == 1)
{
src += CWORD_LEN;
cword_val = 1U << 31;
}
#ifdef QLZ_MEMORY_SAFE
if(src >= last_source_byte + 1)
return 0;
#endif
*dst = *src;
dst++;
src++;
cword_val = cword_val >> 1;
}
#if QLZ_COMPRESSION_LEVEL <= 2
update_hash_upto(state, &last_hashed, last_destination_byte - 3); // todo, use constant
#endif
return size;
}
}
}
}
size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state)
{
size_t r;
ui32 compressed;
size_t base;
if(size == 0 || size > 0xffffffff - 400)
return 0;
if(size < 216)
base = 3;
else
base = 9;
#if QLZ_STREAMING_BUFFER > 0
if (state->stream_counter + size - 1 >= QLZ_STREAMING_BUFFER)
#endif
{
reset_table_compress(state);
r = base + qlz_compress_core((const unsigned char *)source, (unsigned char*)destination + base, size, state);
#if QLZ_STREAMING_BUFFER > 0
reset_table_compress(state);
#endif
if(r == base)
{
memcpy(destination + base, source, size);
r = size + base;
compressed = 0;
}
else
{
compressed = 1;
}
state->stream_counter = 0;
}
#if QLZ_STREAMING_BUFFER > 0
else
{
unsigned char *src = state->stream_buffer + state->stream_counter;
memcpy(src, source, size);
r = base + qlz_compress_core(src, (unsigned char*)destination + base, size, state);
if(r == base)
{
memcpy(destination + base, src, size);
r = size + base;
compressed = 0;
reset_table_compress(state);
}
else
{
compressed = 1;
}
state->stream_counter += size;
}
#endif
if(base == 3)
{
*destination = (unsigned char)(0 | compressed);
*(destination + 1) = (unsigned char)r;
*(destination + 2) = (unsigned char)size;
}
else
{
*destination = (unsigned char)(2 | compressed);
fast_write((ui32)r, destination + 1, 4);
fast_write((ui32)size, destination + 5, 4);
}
*destination |= (QLZ_COMPRESSION_LEVEL << 2);
*destination |= (1 << 6);
*destination |= ((QLZ_STREAMING_BUFFER == 0 ? 0 : (QLZ_STREAMING_BUFFER == 100000 ? 1 : (QLZ_STREAMING_BUFFER == 1000000 ? 2 : 3))) << 4);
// 76543210
// 01SSLLHC
return r;
}
size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state)
{
size_t dsiz = qlz_size_decompressed(source);
#if QLZ_STREAMING_BUFFER > 0
if (state->stream_counter + qlz_size_decompressed(source) - 1 >= QLZ_STREAMING_BUFFER)
#endif
{
if((*source & 1) == 1)
{
reset_table_decompress(state);
dsiz = qlz_decompress_core((const unsigned char *)source, (unsigned char *)destination, dsiz, state, (const unsigned char *)destination);
}
else
{
memcpy(destination, source + qlz_size_header(source), dsiz);
}
state->stream_counter = 0;
reset_table_decompress(state);
}
#if QLZ_STREAMING_BUFFER > 0
else
{
unsigned char *dst = state->stream_buffer + state->stream_counter;
if((*source & 1) == 1)
{
dsiz = qlz_decompress_core((const unsigned char *)source, dst, dsiz, state, (const unsigned char *)state->stream_buffer);
}
else
{
memcpy(dst, source + qlz_size_header(source), dsiz);
reset_table_decompress(state);
}
memcpy(destination, dst, dsiz);
state->stream_counter += dsiz;
}
#endif
return dsiz;
}
#ifndef QLZ_HEADER
#define QLZ_HEADER
// Fast data compression library
// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
// lar@quicklz.com
//
// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
// released into public must be open source) or under a commercial license if such
// has been acquired (see http://www.quicklz.com/order.html). The commercial license
// does not cover derived or ported versions created by third parties under GPL.
// You can edit following user settings. Data must be decompressed with the same
// setting of QLZ_COMPRESSION_LEVEL and QLZ_STREAMING_BUFFER as it was compressed
// (see manual). If QLZ_STREAMING_BUFFER > 0, scratch buffers must be initially
// zeroed out (see manual). First #ifndef makes it possible to define settings from
// the outside like the compiler command line.
// 1.5.0 final
#ifndef QLZ_COMPRESSION_LEVEL
#define QLZ_COMPRESSION_LEVEL 1
//#define QLZ_COMPRESSION_LEVEL 2
//#define QLZ_COMPRESSION_LEVEL 3
#define QLZ_STREAMING_BUFFER 0
//#define QLZ_STREAMING_BUFFER 100000
//#define QLZ_STREAMING_BUFFER 1000000
//#define QLZ_MEMORY_SAFE
#endif
#define QLZ_VERSION_MAJOR 1
#define QLZ_VERSION_MINOR 5
#define QLZ_VERSION_REVISION 0
// Using size_t, memset() and memcpy()
#include <string.h>
// Verify compression level
#if QLZ_COMPRESSION_LEVEL != 1 && QLZ_COMPRESSION_LEVEL != 2 && QLZ_COMPRESSION_LEVEL != 3
#error QLZ_COMPRESSION_LEVEL must be 1, 2 or 3
#endif
typedef unsigned int ui32;
typedef unsigned short int ui16;
// Decrease QLZ_POINTERS for level 3 to increase compression speed. Do not touch any other values!
#if QLZ_COMPRESSION_LEVEL == 1
#define QLZ_POINTERS 1
#define QLZ_HASH_VALUES 4096
#elif QLZ_COMPRESSION_LEVEL == 2
#define QLZ_POINTERS 4
#define QLZ_HASH_VALUES 2048
#elif QLZ_COMPRESSION_LEVEL == 3
#define QLZ_POINTERS 16
#define QLZ_HASH_VALUES 4096
#endif
// Detect if pointer size is 64-bit. It's not fatal if some 64-bit target is not detected because this is only for adding an optional 64-bit optimization.
#if defined _LP64 || defined __LP64__ || defined __64BIT__ || _ADDR64 || defined _WIN64 || defined __arch64__ || __WORDSIZE == 64 || (defined __sparc && defined __sparcv9) || defined __x86_64 || defined __amd64 || defined __x86_64__ || defined _M_X64 || defined _M_IA64 || defined __ia64 || defined __IA64__
#define QLZ_PTR_64
#endif
// hash entry
typedef struct
{
#if QLZ_COMPRESSION_LEVEL == 1
ui32 cache;
#if defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
unsigned int offset;
#else
const unsigned char *offset;
#endif
#else
const unsigned char *offset[QLZ_POINTERS];
#endif
} qlz_hash_compress;
typedef struct
{
#if QLZ_COMPRESSION_LEVEL == 1
const unsigned char *offset;
#else
const unsigned char *offset[QLZ_POINTERS];
#endif
} qlz_hash_decompress;
// states
typedef struct
{
#if QLZ_STREAMING_BUFFER > 0
unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
#endif
size_t stream_counter;
qlz_hash_compress hash[QLZ_HASH_VALUES];
unsigned char hash_counter[QLZ_HASH_VALUES];
} qlz_state_compress;
#if QLZ_COMPRESSION_LEVEL == 1 || QLZ_COMPRESSION_LEVEL == 2
typedef struct
{
#if QLZ_STREAMING_BUFFER > 0
unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
#endif
qlz_hash_decompress hash[QLZ_HASH_VALUES];
unsigned char hash_counter[QLZ_HASH_VALUES];
size_t stream_counter;
} qlz_state_decompress;
#elif QLZ_COMPRESSION_LEVEL == 3
typedef struct
{
#if QLZ_STREAMING_BUFFER > 0
unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
#endif
#if QLZ_COMPRESSION_LEVEL <= 2
qlz_hash_decompress hash[QLZ_HASH_VALUES];
#endif
size_t stream_counter;
} qlz_state_decompress;
#endif
#if defined (__cplusplus)
extern "C" {
#endif
// Public functions of QuickLZ
size_t qlz_size_decompressed(const char *source);
size_t qlz_size_compressed(const char *source);
size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state);
size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state);
int qlz_get_setting(int setting);
size_t qlz_size_header(const char *source);
#if defined (__cplusplus)
}
#endif
#endif
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2012 Percona Inc.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Data file read filter implementation */
#include "read_filt.h"
#include "common.h"
#include "fil_cur.h"
#include "xtrabackup.h"
/****************************************************************//**
Perform read filter context initialization that is common to all read
filters. */
static
void
common_init(
/*========*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
const xb_fil_cur_t* cursor) /*!<in: file cursor */
{
ctxt->offset = 0;
ctxt->data_file_size = cursor->statinfo.st_size;
ctxt->buffer_capacity = cursor->buf_size;
ctxt->page_size = cursor->page_size;
}
/****************************************************************//**
Initialize the pass-through read filter. */
static
void
rf_pass_through_init(
/*=================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
const xb_fil_cur_t* cursor, /*!<in: file cursor */
ulint space_id __attribute__((unused)))
/*!<in: space id we are reading */
{
common_init(ctxt, cursor);
}
/****************************************************************//**
Get the next batch of pages for the pass-through read filter. */
static
void
rf_pass_through_get_next_batch(
/*===========================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
ib_int64_t* read_batch_start, /*!<out: starting read
offset in bytes for the
next batch of pages */
ib_int64_t* read_batch_len) /*!<out: length in
bytes of the next batch
of pages */
{
*read_batch_start = ctxt->offset;
*read_batch_len = ctxt->data_file_size - ctxt->offset;
if (*read_batch_len > ctxt->buffer_capacity) {
*read_batch_len = ctxt->buffer_capacity;
}
ctxt->offset += *read_batch_len;
}
/****************************************************************//**
Deinitialize the pass-through read filter. */
static
void
rf_pass_through_deinit(
/*===================*/
xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
/*!<in: read filter context */
{
}
/****************************************************************//**
Initialize the changed page bitmap-based read filter. Assumes that
the bitmap is already set up in changed_page_bitmap. */
static
void
rf_bitmap_init(
/*===========*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
const xb_fil_cur_t* cursor, /*!<in: read cursor */
ulint space_id) /*!<in: space id */
{
common_init(ctxt, cursor);
ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
space_id);
ctxt->filter_batch_end = 0;
}
/****************************************************************//**
Get the next batch of pages for the bitmap read filter. */
static
void
rf_bitmap_get_next_batch(
/*=====================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
ib_int64_t* read_batch_start, /*!<out: starting read
offset in bytes for the
next batch of pages */
ib_int64_t* read_batch_len) /*!<out: length in
bytes of the next batch
of pages */
{
ulint start_page_id;
start_page_id = ctxt->offset / ctxt->page_size;
xb_a (ctxt->offset % ctxt->page_size == 0);
if (start_page_id == ctxt->filter_batch_end) {
/* Used up all the previous bitmap range, get some more */
ulint next_page_id;
/* Find the next changed page using the bitmap */
next_page_id = xb_page_bitmap_range_get_next_bit
(ctxt->bitmap_range, TRUE);
if (next_page_id == ULINT_UNDEFINED) {
*read_batch_len = 0;
return;
}
ctxt->offset = next_page_id * ctxt->page_size;
/* Find the end of the current changed page block by searching
for the next cleared bitmap bit */
ctxt->filter_batch_end
= xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
FALSE);
xb_a(next_page_id < ctxt->filter_batch_end);
}
*read_batch_start = ctxt->offset;
if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
/* No more cleared bits in the bitmap, need to copy all the
remaining pages. */
*read_batch_len = ctxt->data_file_size - ctxt->offset;
} else {
*read_batch_len = ctxt->filter_batch_end * ctxt->page_size
- ctxt->offset;
}
/* If the page block is larger than the buffer capacity, limit it to
buffer capacity. The subsequent invocations will continue returning
the current block in buffer-sized pieces until ctxt->filter_batch_end
is reached, trigerring the next bitmap query. */
if (*read_batch_len > ctxt->buffer_capacity) {
*read_batch_len = ctxt->buffer_capacity;
}
ctxt->offset += *read_batch_len;
xb_a (ctxt->offset % ctxt->page_size == 0);
xb_a (*read_batch_start % ctxt->page_size == 0);
xb_a (*read_batch_len % ctxt->page_size == 0);
}
/****************************************************************//**
Deinitialize the changed page bitmap-based read filter. */
static
void
rf_bitmap_deinit(
/*=============*/
xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
{
xb_page_bitmap_range_deinit(ctxt->bitmap_range);
}
/* The pass-through read filter */
xb_read_filt_t rf_pass_through = {
&rf_pass_through_init,
&rf_pass_through_get_next_batch,
&rf_pass_through_deinit
};
/* The changed page bitmap-based read filter */
xb_read_filt_t rf_bitmap = {
&rf_bitmap_init,
&rf_bitmap_get_next_batch,
&rf_bitmap_deinit
};
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2012 Percona Inc.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Data file read filter interface */
#ifndef XB_READ_FILT_H
#define XB_READ_FILT_H
#include "changed_page_bitmap.h"
struct xb_fil_cur_t;
/* The read filter context */
struct xb_read_filt_ctxt_t {
ib_int64_t offset; /*!< current file offset */
ib_int64_t data_file_size; /*!< data file size */
ib_int64_t buffer_capacity;/*!< read buffer capacity */
ulint space_id; /*!< space id */
/* The following fields used only in bitmap filter */
/* Move these to union if any other filters are added in future */
xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
iterator for space_id */
ulint page_size; /*!< page size */
ulint filter_batch_end;/*!< the ending page id of the
current changed page block in
the bitmap */
};
/* The read filter */
struct xb_read_filt_t {
void (*init)(xb_read_filt_ctxt_t* ctxt,
const xb_fil_cur_t* cursor,
ulint space_id);
void (*get_next_batch)(xb_read_filt_ctxt_t* ctxt,
ib_int64_t* read_batch_start,
ib_int64_t* read_batch_len);
void (*deinit)(xb_read_filt_ctxt_t* ctxt);
};
extern xb_read_filt_t rf_pass_through;
extern xb_read_filt_t rf_bitmap;
#endif
use warnings FATAL => 'all';
use strict;
use English qw(-no_match_vars);
use POSIX "strftime";
my @required_perl_version = (5, 0, 5);
my $required_perl_version_old_style = 5.005;
# check existence of DBD::mysql module
eval {
require DBD::mysql;
};
my $dbd_mysql_installed = $EVAL_ERROR ? 0 : 1;
my $now;
my %mysql;
my $prefix = "version_check";
# ###########################################################################
# HTTPMicro package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the Bazaar repository at,
# lib/HTTPMicro.pm
# t/lib/HTTPMicro.t
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package HTTPMicro;
BEGIN {
$HTTPMicro::VERSION = '0.001';
}
use strict;
use warnings;
use Carp ();
my @attributes;
BEGIN {
@attributes = qw(agent timeout);
no strict 'refs';
for my $accessor ( @attributes ) {
*{$accessor} = sub {
@_ > 1 ? $_[0]->{$accessor} = $_[1] : $_[0]->{$accessor};
};
}
}
sub new {
my($class, %args) = @_;
(my $agent = $class) =~ s{::}{-}g;
my $self = {
agent => $agent . "/" . ($class->VERSION || 0),
timeout => 60,
};
for my $key ( @attributes ) {
$self->{$key} = $args{$key} if exists $args{$key}
}
return bless $self, $class;
}
my %DefaultPort = (
http => 80,
https => 443,
);
sub request {
my ($self, $method, $url, $args) = @_;
@_ == 3 || (@_ == 4 && ref $args eq 'HASH')
or Carp::croak(q/Usage: $http->request(METHOD, URL, [HASHREF])/);
$args ||= {}; # we keep some state in this during _request
my $response;
for ( 0 .. 1 ) {
$response = eval { $self->_request($method, $url, $args) };
last unless $@ && $method eq 'GET'
&& $@ =~ m{^(?:Socket closed|Unexpected end)};
}
if (my $e = "$@") {
$response = {
success => q{},
status => 599,
reason => 'Internal Exception',
content => $e,
headers => {
'content-type' => 'text/plain',
'content-length' => length $e,
}
};
}
return $response;
}
sub _request {
my ($self, $method, $url, $args) = @_;
my ($scheme, $host, $port, $path_query) = $self->_split_url($url);
my $request = {
method => $method,
scheme => $scheme,
host_port => ($port == $DefaultPort{$scheme} ? $host : "$host:$port"),
uri => $path_query,
headers => {},
};
my $handle = HTTPMicro::Handle->new(timeout => $self->{timeout});
$handle->connect($scheme, $host, $port);
$self->_prepare_headers_and_cb($request, $args);
$handle->write_request_header(@{$request}{qw/method uri headers/});
$handle->write_content_body($request) if $request->{content};
my $response;
do { $response = $handle->read_response_header }
until (substr($response->{status},0,1) ne '1');
if (!($method eq 'HEAD' || $response->{status} =~ /^[23]04/)) {
$response->{content} = '';
$handle->read_content_body(sub { $_[1]->{content} .= $_[0] }, $response);
}
$handle->close;
$response->{success} = substr($response->{status},0,1) eq '2';
return $response;
}
sub _prepare_headers_and_cb {
my ($self, $request, $args) = @_;
for ($args->{headers}) {
next unless defined;
while (my ($k, $v) = each %$_) {
$request->{headers}{lc $k} = $v;
}
}
$request->{headers}{'host'} = $request->{host_port};
$request->{headers}{'connection'} = "close";
$request->{headers}{'user-agent'} ||= $self->{agent};
if (defined $args->{content}) {
$request->{headers}{'content-type'} ||= "application/octet-stream";
utf8::downgrade($args->{content}, 1)
or Carp::croak(q/Wide character in request message body/);
$request->{headers}{'content-length'} = length $args->{content};
$request->{content} = $args->{content};
}
return;
}
sub _split_url {
my $url = pop;
my ($scheme, $authority, $path_query) = $url =~ m<\A([^:/?#]+)://([^/?#]*)([^#]*)>
or Carp::croak(qq/Cannot parse URL: '$url'/);
$scheme = lc $scheme;
$path_query = "/$path_query" unless $path_query =~ m<\A/>;
my $host = (length($authority)) ? lc $authority : 'localhost';
$host =~ s/\A[^@]*@//; # userinfo
my $port = do {
$host =~ s/:([0-9]*)\z// && length $1
? $1
: $DefaultPort{$scheme}
};
return ($scheme, $host, $port, $path_query);
}
package
HTTPMicro::Handle; # hide from PAUSE/indexers
use strict;
use warnings;
use Carp qw[croak];
use Errno qw[EINTR EPIPE];
use IO::Socket qw[SOCK_STREAM];
sub BUFSIZE () { 32768 }
my $Printable = sub {
local $_ = shift;
s/\r/\\r/g;
s/\n/\\n/g;
s/\t/\\t/g;
s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge;
$_;
};
sub new {
my ($class, %args) = @_;
return bless {
rbuf => '',
timeout => 60,
max_line_size => 16384,
%args
}, $class;
}
my $ssl_verify_args = {
check_cn => "when_only",
wildcards_in_alt => "anywhere",
wildcards_in_cn => "anywhere"
};
sub connect {
@_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/);
my ($self, $scheme, $host, $port) = @_;
if ( $scheme eq 'https' ) {
eval "require IO::Socket::SSL"
unless exists $INC{'IO/Socket/SSL.pm'};
croak(qq/IO::Socket::SSL must be installed for https support\n/)
unless $INC{'IO/Socket/SSL.pm'};
}
elsif ( $scheme ne 'http' ) {
croak(qq/Unsupported URL scheme '$scheme'\n/);
}
$self->{fh} = 'IO::Socket::INET'->new(
PeerHost => $host,
PeerPort => $port,
Proto => 'tcp',
Type => SOCK_STREAM,
Timeout => $self->{timeout}
) or croak(qq/Could not connect to '$host:$port': $@/);
binmode($self->{fh})
or croak(qq/Could not binmode() socket: '$!'/);
if ( $scheme eq 'https') {
IO::Socket::SSL->start_SSL($self->{fh});
ref($self->{fh}) eq 'IO::Socket::SSL'
or die(qq/SSL connection failed for $host\n/);
if ( $self->{fh}->can("verify_hostname") ) {
$self->{fh}->verify_hostname( $host, $ssl_verify_args )
or die(qq/SSL certificate not valid for $host\n/);
}
else {
my $fh = $self->{fh};
_verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args)
or die(qq/SSL certificate not valid for $host\n/);
}
}
$self->{host} = $host;
$self->{port} = $port;
return $self;
}
sub close {
@_ == 1 || croak(q/Usage: $handle->close()/);
my ($self) = @_;
CORE::close($self->{fh})
or croak(qq/Could not close socket: '$!'/);
}
sub write {
@_ == 2 || croak(q/Usage: $handle->write(buf)/);
my ($self, $buf) = @_;
my $len = length $buf;
my $off = 0;
local $SIG{PIPE} = 'IGNORE';
while () {
$self->can_write
or croak(q/Timed out while waiting for socket to become ready for writing/);
my $r = syswrite($self->{fh}, $buf, $len, $off);
if (defined $r) {
$len -= $r;
$off += $r;
last unless $len > 0;
}
elsif ($! == EPIPE) {
croak(qq/Socket closed by remote server: $!/);
}
elsif ($! != EINTR) {
croak(qq/Could not write to socket: '$!'/);
}
}
return $off;
}
sub read {
@_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/);
my ($self, $len) = @_;
my $buf = '';
my $got = length $self->{rbuf};
if ($got) {
my $take = ($got < $len) ? $got : $len;
$buf = substr($self->{rbuf}, 0, $take, '');
$len -= $take;
}
while ($len > 0) {
$self->can_read
or croak(q/Timed out while waiting for socket to become ready for reading/);
my $r = sysread($self->{fh}, $buf, $len, length $buf);
if (defined $r) {
last unless $r;
$len -= $r;
}
elsif ($! != EINTR) {
croak(qq/Could not read from socket: '$!'/);
}
}
if ($len) {
croak(q/Unexpected end of stream/);
}
return $buf;
}
sub readline {
@_ == 1 || croak(q/Usage: $handle->readline()/);
my ($self) = @_;
while () {
if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) {
return $1;
}
$self->can_read
or croak(q/Timed out while waiting for socket to become ready for reading/);
my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf});
if (defined $r) {
last unless $r;
}
elsif ($! != EINTR) {
croak(qq/Could not read from socket: '$!'/);
}
}
croak(q/Unexpected end of stream while looking for line/);
}
sub read_header_lines {
@_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/);
my ($self, $headers) = @_;
$headers ||= {};
my $lines = 0;
my $val;
while () {
my $line = $self->readline;
if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) {
my ($field_name) = lc $1;
$val = \($headers->{$field_name} = $2);
}
elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) {
$val
or croak(q/Unexpected header continuation line/);
next unless length $1;
$$val .= ' ' if length $$val;
$$val .= $1;
}
elsif ($line =~ /\A \x0D?\x0A \z/x) {
last;
}
else {
croak(q/Malformed header line: / . $Printable->($line));
}
}
return $headers;
}
sub write_header_lines {
(@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/);
my($self, $headers) = @_;
my $buf = '';
while (my ($k, $v) = each %$headers) {
my $field_name = lc $k;
$field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x
or croak(q/Invalid HTTP header field name: / . $Printable->($field_name));
$field_name =~ s/\b(\w)/\u$1/g;
$buf .= "$field_name: $v\x0D\x0A";
}
$buf .= "\x0D\x0A";
return $self->write($buf);
}
sub read_content_body {
@_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/);
my ($self, $cb, $response, $len) = @_;
$len ||= $response->{headers}{'content-length'};
croak("No content-length in the returned response, and this "
. "UA doesn't implement chunking") unless defined $len;
while ($len > 0) {
my $read = ($len > BUFSIZE) ? BUFSIZE : $len;
$cb->($self->read($read), $response);
$len -= $read;
}
return;
}
sub write_content_body {
@_ == 2 || croak(q/Usage: $handle->write_content_body(request)/);
my ($self, $request) = @_;
my ($len, $content_length) = (0, $request->{headers}{'content-length'});
$len += $self->write($request->{content});
$len == $content_length
or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/);
return $len;
}
sub read_response_header {
@_ == 1 || croak(q/Usage: $handle->read_response_header()/);
my ($self) = @_;
my $line = $self->readline;
$line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x
or croak(q/Malformed Status-Line: / . $Printable->($line));
my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4);
return {
status => $status,
reason => $reason,
headers => $self->read_header_lines,
protocol => $protocol,
};
}
sub write_request_header {
@_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/);
my ($self, $method, $request_uri, $headers) = @_;
return $self->write("$method $request_uri HTTP/1.1\x0D\x0A")
+ $self->write_header_lines($headers);
}
sub _do_timeout {
my ($self, $type, $timeout) = @_;
$timeout = $self->{timeout}
unless defined $timeout && $timeout >= 0;
my $fd = fileno $self->{fh};
defined $fd && $fd >= 0
or croak(q/select(2): 'Bad file descriptor'/);
my $initial = time;
my $pending = $timeout;
my $nfound;
vec(my $fdset = '', $fd, 1) = 1;
while () {
$nfound = ($type eq 'read')
? select($fdset, undef, undef, $pending)
: select(undef, $fdset, undef, $pending) ;
if ($nfound == -1) {
$! == EINTR
or croak(qq/select(2): '$!'/);
redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0;
$nfound = 0;
}
last;
}
$! = 0;
return $nfound;
}
sub can_read {
@_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/);
my $self = shift;
return $self->_do_timeout('read', @_)
}
sub can_write {
@_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/);
my $self = shift;
return $self->_do_timeout('write', @_)
}
my $prog = <<'EOP';
BEGIN {
if ( defined &IO::Socket::SSL::CAN_IPV6 ) {
*CAN_IPV6 = \*IO::Socket::SSL::CAN_IPV6;
}
else {
constant->import( CAN_IPV6 => '' );
}
my %const = (
NID_CommonName => 13,
GEN_DNS => 2,
GEN_IPADD => 7,
);
while ( my ($name,$value) = each %const ) {
no strict 'refs';
*{$name} = UNIVERSAL::can( 'Net::SSLeay', $name ) || sub { $value };
}
}
{
my %dispatcher = (
issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) },
subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) },
);
if ( $Net::SSLeay::VERSION >= 1.30 ) {
$dispatcher{commonName} = sub {
my $cn = Net::SSLeay::X509_NAME_get_text_by_NID(
Net::SSLeay::X509_get_subject_name( shift ), NID_CommonName);
$cn =~s{\0$}{}; # work around Bug in Net::SSLeay <1.33
$cn;
}
} else {
$dispatcher{commonName} = sub {
croak "you need at least Net::SSLeay version 1.30 for getting commonName"
}
}
if ( $Net::SSLeay::VERSION >= 1.33 ) {
$dispatcher{subjectAltNames} = sub { Net::SSLeay::X509_get_subjectAltNames( shift ) };
} else {
$dispatcher{subjectAltNames} = sub {
return;
};
}
$dispatcher{authority} = $dispatcher{issuer};
$dispatcher{owner} = $dispatcher{subject};
$dispatcher{cn} = $dispatcher{commonName};
sub _peer_certificate {
my ($self, $field) = @_;
my $ssl = $self->_get_ssl_object or return;
my $cert = ${*$self}{_SSL_certificate}
||= Net::SSLeay::get_peer_certificate($ssl)
or return $self->error("Could not retrieve peer certificate");
if ($field) {
my $sub = $dispatcher{$field} or croak
"invalid argument for peer_certificate, valid are: ".join( " ",keys %dispatcher ).
"\nMaybe you need to upgrade your Net::SSLeay";
return $sub->($cert);
} else {
return $cert
}
}
my %scheme = (
ldap => {
wildcards_in_cn => 0,
wildcards_in_alt => 'leftmost',
check_cn => 'always',
},
http => {
wildcards_in_cn => 'anywhere',
wildcards_in_alt => 'anywhere',
check_cn => 'when_only',
},
smtp => {
wildcards_in_cn => 0,
wildcards_in_alt => 0,
check_cn => 'always'
},
none => {}, # do not check
);
$scheme{www} = $scheme{http}; # alias
$scheme{xmpp} = $scheme{http}; # rfc 3920
$scheme{pop3} = $scheme{ldap}; # rfc 2595
$scheme{imap} = $scheme{ldap}; # rfc 2595
$scheme{acap} = $scheme{ldap}; # rfc 2595
$scheme{nntp} = $scheme{ldap}; # rfc 4642
$scheme{ftp} = $scheme{http}; # rfc 4217
sub _verify_hostname_of_cert {
my $identity = shift;
my $cert = shift;
my $scheme = shift || 'none';
if ( ! ref($scheme) ) {
$scheme = $scheme{$scheme} or croak "scheme $scheme not defined";
}
return 1 if ! %$scheme; # 'none'
my $commonName = $dispatcher{cn}->($cert);
my @altNames = $dispatcher{subjectAltNames}->($cert);
if ( my $sub = $scheme->{callback} ) {
return $sub->($identity,$commonName,@altNames);
}
my $ipn;
if ( CAN_IPV6 and $identity =~m{:} ) {
$ipn = IO::Socket::SSL::inet_pton(IO::Socket::SSL::AF_INET6,$identity)
or croak "'$identity' is not IPv6, but neither IPv4 nor hostname";
} elsif ( $identity =~m{^\d+\.\d+\.\d+\.\d+$} ) {
$ipn = IO::Socket::SSL::inet_aton( $identity ) or croak "'$identity' is not IPv4, but neither IPv6 nor hostname";
} else {
if ( $identity =~m{[^a-zA-Z0-9_.\-]} ) {
$identity =~m{\0} and croak("name '$identity' has \\0 byte");
$identity = IO::Socket::SSL::idn_to_ascii($identity) or
croak "Warning: Given name '$identity' could not be converted to IDNA!";
}
}
my $check_name = sub {
my ($name,$identity,$wtyp) = @_;
$wtyp ||= '';
my $pattern;
if ( $wtyp eq 'anywhere' and $name =~m{^([a-zA-Z0-9_\-]*)\*(.+)} ) {
$pattern = qr{^\Q$1\E[a-zA-Z0-9_\-]*\Q$2\E$}i;
} elsif ( $wtyp eq 'leftmost' and $name =~m{^\*(\..+)$} ) {
$pattern = qr{^[a-zA-Z0-9_\-]*\Q$1\E$}i;
} else {
$pattern = qr{^\Q$name\E$}i;
}
return $identity =~ $pattern;
};
my $alt_dnsNames = 0;
while (@altNames) {
my ($type, $name) = splice (@altNames, 0, 2);
if ( $ipn and $type == GEN_IPADD ) {
return 1 if $ipn eq $name;
} elsif ( ! $ipn and $type == GEN_DNS ) {
$name =~s/\s+$//; $name =~s/^\s+//;
$alt_dnsNames++;
$check_name->($name,$identity,$scheme->{wildcards_in_alt})
and return 1;
}
}
if ( ! $ipn and (
$scheme->{check_cn} eq 'always' or
$scheme->{check_cn} eq 'when_only' and !$alt_dnsNames)) {
$check_name->($commonName,$identity,$scheme->{wildcards_in_cn})
and return 1;
}
return 0; # no match
}
}
EOP
eval { require IO::Socket::SSL };
if ( $INC{"IO/Socket/SSL.pm"} ) {
eval $prog;
die $@ if $@;
}
1;
}
# ###########################################################################
# End HTTPMicro package
# ###########################################################################
# ###########################################################################
# VersionCheck package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the Bazaar repository at,
# lib/VersionCheck.pm
# t/lib/VersionCheck.t
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package VersionCheck;
use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
use Data::Dumper;
local $Data::Dumper::Indent = 1;
local $Data::Dumper::Sortkeys = 1;
local $Data::Dumper::Quotekeys = 0;
use Digest::MD5 qw(md5_hex);
use Sys::Hostname qw(hostname);
use File::Basename qw();
use File::Spec;
use FindBin qw();
eval {
require Percona::Toolkit;
require HTTPMicro;
};
{
my $file = 'percona-version-check';
my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.';
my @vc_dirs = (
'/etc/percona',
'/etc/percona-toolkit',
'/tmp',
"$home",
);
if ($ENV{PTDEBUG_VERSION_CHECK_HOME}) {
@vc_dirs = ( $ENV{PTDEBUG_VERSION_CHECK_HOME} );
}
sub version_check_file {
foreach my $dir ( @vc_dirs ) {
if ( -d $dir && -w $dir ) {
PTDEBUG && _d('Version check file', $file, 'in', $dir);
return $dir . '/' . $file;
}
}
PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD});
return $file; # in the CWD
}
}
sub version_check_time_limit {
return 60 * 60 * 24; # one day
}
sub version_check {
my (%args) = @_;
my $instances = $args{instances} || [];
my $instances_to_check;
PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin);
if ( !$args{force} ) {
if ( $FindBin::Bin
&& (-d "$FindBin::Bin/../.bzr" || -d "$FindBin::Bin/../../.bzr") ) {
PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check");
return;
}
}
eval {
foreach my $instance ( @$instances ) {
my ($name, $id) = get_instance_id($instance);
$instance->{name} = $name;
$instance->{id} = $id;
}
push @$instances, { name => 'system', id => 0 };
$instances_to_check = get_instances_to_check(
instances => $instances,
vc_file => $args{vc_file}, # testing
now => $args{now}, # testing
);
PTDEBUG && _d(scalar @$instances_to_check, 'instances to check');
return unless @$instances_to_check;
my $protocol = 'https';
eval { require IO::Socket::SSL; };
if ( $EVAL_ERROR ) {
PTDEBUG && _d($EVAL_ERROR);
PTDEBUG && _d("SSL not available, won't run version_check");
return;
}
PTDEBUG && _d('Using', $protocol);
my $advice = pingback(
instances => $instances_to_check,
protocol => $protocol,
url => $args{url} # testing
|| $ENV{PERCONA_VERSION_CHECK_URL} # testing
|| "$protocol://v.percona.com",
);
if ( $advice ) {
PTDEBUG && _d('Advice:', Dumper($advice));
if ( scalar @$advice > 1) {
print "\n# " . scalar @$advice . " software updates are "
. "available:\n";
}
else {
print "\n# A software update is available:\n";
}
print join("\n", map { "# * $_" } @$advice), "\n\n";
}
};
if ( $EVAL_ERROR ) {
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
}
if ( @$instances_to_check ) {
eval {
update_check_times(
instances => $instances_to_check,
vc_file => $args{vc_file}, # testing
now => $args{now}, # testing
);
};
if ( $EVAL_ERROR ) {
PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR);
}
}
if ( $ENV{PTDEBUG_VERSION_CHECK} ) {
warn "Exiting because the PTDEBUG_VERSION_CHECK "
. "environment variable is defined.\n";
exit 255;
}
return;
}
sub get_instances_to_check {
my (%args) = @_;
my $instances = $args{instances};
my $now = $args{now} || int(time);
my $vc_file = $args{vc_file} || version_check_file();
if ( !-f $vc_file ) {
PTDEBUG && _d('Version check file', $vc_file, 'does not exist;',
'version checking all instances');
return $instances;
}
open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR";
chomp(my $file_contents = do { local $/ = undef; <$fh> });
PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents);
close $fh;
my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg;
my $check_time_limit = version_check_time_limit();
my @instances_to_check;
foreach my $instance ( @$instances ) {
my $last_check_time = $last_check_time_for{ $instance->{id} };
PTDEBUG && _d('Intsance', $instance->{id}, 'last checked',
$last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0),
'hours until next check',
sprintf '%.2f',
($check_time_limit - ($now - ($last_check_time || 0))) / 3600);
if ( !defined $last_check_time
|| ($now - $last_check_time) >= $check_time_limit ) {
PTDEBUG && _d('Time to check', Dumper($instance));
push @instances_to_check, $instance;
}
}
return \@instances_to_check;
}
sub update_check_times {
my (%args) = @_;
my $instances = $args{instances};
my $now = $args{now} || int(time);
my $vc_file = $args{vc_file} || version_check_file();
PTDEBUG && _d('Updating last check time:', $now);
my %all_instances = map {
$_->{id} => { name => $_->{name}, ts => $now }
} @$instances;
if ( -f $vc_file ) {
open my $fh, '<', $vc_file or die "Cannot read $vc_file: $OS_ERROR";
my $contents = do { local $/ = undef; <$fh> };
close $fh;
foreach my $line ( split("\n", ($contents || '')) ) {
my ($id, $ts) = split(',', $line);
if ( !exists $all_instances{$id} ) {
$all_instances{$id} = { ts => $ts }; # original ts, not updated
}
}
}
open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR";
foreach my $id ( sort keys %all_instances ) {
PTDEBUG && _d('Updated:', $id, Dumper($all_instances{$id}));
print { $fh } $id . ',' . $all_instances{$id}->{ts} . "\n";
}
close $fh;
return;
}
sub get_instance_id {
my ($instance) = @_;
my $dbh = $instance->{dbh};
my $dsn = $instance->{dsn};
my $sql = q{SELECT CONCAT(@@hostname, @@port)};
PTDEBUG && _d($sql);
my ($name) = eval { $dbh->selectrow_array($sql) };
if ( $EVAL_ERROR ) {
PTDEBUG && _d($EVAL_ERROR);
$sql = q{SELECT @@hostname};
PTDEBUG && _d($sql);
($name) = eval { $dbh->selectrow_array($sql) };
if ( $EVAL_ERROR ) {
PTDEBUG && _d($EVAL_ERROR);
$name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306);
}
else {
$sql = q{SHOW VARIABLES LIKE 'port'};
PTDEBUG && _d($sql);
my (undef, $port) = eval { $dbh->selectrow_array($sql) };
PTDEBUG && _d('port:', $port);
$name .= $port || '';
}
}
my $id = md5_hex($name);
PTDEBUG && _d('MySQL instance:', $id, $name, Dumper($dsn));
return $name, $id;
}
sub pingback {
my (%args) = @_;
my @required_args = qw(url instances);
foreach my $arg ( @required_args ) {
die "I need a $arg arugment" unless $args{$arg};
}
my $url = $args{url};
my $instances = $args{instances};
my $ua = $args{ua} || HTTPMicro->new( timeout => 3 );
my $response = $ua->request('GET', $url);
PTDEBUG && _d('Server response:', Dumper($response));
die "No response from GET $url"
if !$response;
die("GET on $url returned HTTP status $response->{status}; expected 200\n",
($response->{content} || '')) if $response->{status} != 200;
die("GET on $url did not return any programs to check")
if !$response->{content};
my $items = parse_server_response(
response => $response->{content}
);
die "Failed to parse server requested programs: $response->{content}"
if !scalar keys %$items;
my $versions = get_versions(
items => $items,
instances => $instances,
);
die "Failed to get any program versions; should have at least gotten Perl"
if !scalar keys %$versions;
my $client_content = encode_client_response(
items => $items,
versions => $versions,
general_id => md5_hex( hostname() ),
);
my $client_response = {
headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) },
content => $client_content,
};
PTDEBUG && _d('Client response:', Dumper($client_response));
$response = $ua->request('POST', $url, $client_response);
PTDEBUG && _d('Server suggestions:', Dumper($response));
die "No response from POST $url $client_response"
if !$response;
die "POST $url returned HTTP status $response->{status}; expected 200"
if $response->{status} != 200;
return unless $response->{content};
$items = parse_server_response(
response => $response->{content},
split_vars => 0,
);
die "Failed to parse server suggestions: $response->{content}"
if !scalar keys %$items;
my @suggestions = map { $_->{vars} }
sort { $a->{item} cmp $b->{item} }
values %$items;
return \@suggestions;
}
sub encode_client_response {
my (%args) = @_;
my @required_args = qw(items versions general_id);
foreach my $arg ( @required_args ) {
die "I need a $arg arugment" unless $args{$arg};
}
my ($items, $versions, $general_id) = @args{@required_args};
my @lines;
foreach my $item ( sort keys %$items ) {
next unless exists $versions->{$item};
if ( ref($versions->{$item}) eq 'HASH' ) {
my $mysql_versions = $versions->{$item};
for my $id ( sort keys %$mysql_versions ) {
push @lines, join(';', $id, $item, $mysql_versions->{$id});
}
}
else {
push @lines, join(';', $general_id, $item, $versions->{$item});
}
}
my $client_response = join("\n", @lines) . "\n";
return $client_response;
}
sub parse_server_response {
my (%args) = @_;
my @required_args = qw(response);
foreach my $arg ( @required_args ) {
die "I need a $arg arugment" unless $args{$arg};
}
my ($response) = @args{@required_args};
my %items = map {
my ($item, $type, $vars) = split(";", $_);
if ( !defined $args{split_vars} || $args{split_vars} ) {
$vars = [ split(",", ($vars || '')) ];
}
$item => {
item => $item,
type => $type,
vars => $vars,
};
} split("\n", $response);
PTDEBUG && _d('Items:', Dumper(\%items));
return \%items;
}
my %sub_for_type = (
os_version => \&get_os_version,
perl_version => \&get_perl_version,
perl_module_version => \&get_perl_module_version,
mysql_variable => \&get_mysql_variable,
);
sub valid_item {
my ($item) = @_;
return unless $item;
if ( !exists $sub_for_type{ $item->{type} } ) {
PTDEBUG && _d('Invalid type:', $item->{type});
return 0;
}
return 1;
}
sub get_versions {
my (%args) = @_;
my @required_args = qw(items);
foreach my $arg ( @required_args ) {
die "I need a $arg arugment" unless $args{$arg};
}
my ($items) = @args{@required_args};
my %versions;
foreach my $item ( values %$items ) {
next unless valid_item($item);
eval {
my $version = $sub_for_type{ $item->{type} }->(
item => $item,
instances => $args{instances},
);
if ( $version ) {
chomp $version unless ref($version);
$versions{$item->{item}} = $version;
}
};
if ( $EVAL_ERROR ) {
PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR);
}
}
return \%versions;
}
sub get_os_version {
if ( $OSNAME eq 'MSWin32' ) {
require Win32;
return Win32::GetOSDisplayName();
}
chomp(my $platform = `uname -s`);
PTDEBUG && _d('platform:', $platform);
return $OSNAME unless $platform;
chomp(my $lsb_release
= `which lsb_release 2>/dev/null | awk '{print \$1}'` || '');
PTDEBUG && _d('lsb_release:', $lsb_release);
my $release = "";
if ( $platform eq 'Linux' ) {
if ( -f "/etc/fedora-release" ) {
$release = `cat /etc/fedora-release`;
}
elsif ( -f "/etc/redhat-release" ) {
$release = `cat /etc/redhat-release`;
}
elsif ( -f "/etc/system-release" ) {
$release = `cat /etc/system-release`;
}
elsif ( $lsb_release ) {
$release = `$lsb_release -ds`;
}
elsif ( -f "/etc/lsb-release" ) {
$release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`;
$release =~ s/^\w+="([^"]+)".+/$1/;
}
elsif ( -f "/etc/debian_version" ) {
chomp(my $rel = `cat /etc/debian_version`);
$release = "Debian $rel";
if ( -f "/etc/apt/sources.list" ) {
chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`);
$release .= " ($code_name)" if $code_name;
}
}
elsif ( -f "/etc/os-release" ) { # openSUSE
chomp($release = `grep PRETTY_NAME /etc/os-release`);
$release =~ s/^PRETTY_NAME="(.+)"$/$1/;
}
elsif ( `ls /etc/*release 2>/dev/null` ) {
if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) {
$release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`;
}
else {
$release = `cat /etc/*release | head -n1`;
}
}
}
elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) {
my $rel = `uname -r`;
$release = "$platform $rel";
}
elsif ( $platform eq "SunOS" ) {
my $rel = `head -n1 /etc/release` || `uname -r`;
$release = "$platform $rel";
}
if ( !$release ) {
PTDEBUG && _d('Failed to get the release, using platform');
$release = $platform;
}
chomp($release);
$release =~ s/^"|"$//g;
PTDEBUG && _d('OS version =', $release);
return $release;
}
sub get_perl_version {
my (%args) = @_;
my $item = $args{item};
return unless $item;
my $version = sprintf '%vd', $PERL_VERSION;
PTDEBUG && _d('Perl version', $version);
return $version;
}
sub get_perl_module_version {
my (%args) = @_;
my $item = $args{item};
return unless $item;
my $var = '$' . $item->{item} . '::VERSION';
my $version = eval "use $item->{item}; $var;";
PTDEBUG && _d('Perl version for', $var, '=', $version);
return $version;
}
sub get_mysql_variable {
return get_from_mysql(
show => 'VARIABLES',
@_,
);
}
sub get_from_mysql {
my (%args) = @_;
my $show = $args{show};
my $item = $args{item};
my $instances = $args{instances};
return unless $show && $item;
if ( !$instances || !@$instances ) {
PTDEBUG && _d('Cannot check', $item,
'because there are no MySQL instances');
return;
}
if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') {
$item->{vars} = ['version_comment', 'version'];
}
my @versions;
my %version_for;
foreach my $instance ( @$instances ) {
next unless $instance->{id}; # special system instance has id=0
my $dbh = $instance->{dbh};
local $dbh->{FetchHashKeyName} = 'NAME_lc';
my $sql = qq/SHOW $show/;
PTDEBUG && _d($sql);
my $rows = $dbh->selectall_hashref($sql, 'variable_name');
my @versions;
foreach my $var ( @{$item->{vars}} ) {
$var = lc($var);
my $version = $rows->{$var}->{value};
PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version,
'on', $instance->{name});
push @versions, $version;
}
$version_for{ $instance->{id} } = join(' ', @versions);
}
return \%version_for;
}
sub _d {
my ($package, undef, $line) = caller 0;
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
map { defined $_ ? $_ : 'undef' }
@_;
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
}
1;
}
# ###########################################################################
# End VersionCheck package
# ###########################################################################
#
# parse_connection_options() subroutine parses connection-related command line
# options
#
sub parse_connection_options {
my $con = shift;
$con->{dsn} = 'dbi:mysql:';
# this option has to be first
if ($ENV{option_defaults_file}) {
$con->{dsn} .= ";mysql_read_default_file=$ENV{option_defaults_file}";
}
if ($ENV{option_defaults_extra_file}) {
$con->{dsn} .= ";mysql_read_default_file=$ENV{option_defaults_extra_file}";
}
$con->{dsn} .= ";mysql_read_default_group=xtrabackup";
if ($ENV{option_mysql_password}) {
$con->{dsn_password} = "$ENV{option_mysql_password}";
}
if ($ENV{option_mysql_user}) {
$con->{dsn_user} = "$ENV{option_mysql_user}";
}
if ($ENV{option_mysql_host}) {
$con->{dsn} .= ";host=$ENV{option_mysql_host}";
}
if ($ENV{option_mysql_port}) {
$con->{dsn} .= ";port=$ENV{option_mysql_port}";
}
if ($ENV{option_mysql_socket}) {
$con->{dsn} .= ";mysql_socket=$ENV{option_mysql_socket}";
}
}
#
# mysql_connect subroutine connects to MySQL server
#
sub mysql_connect {
my %con;
my %args = (
# Defaults
abort_on_error => 1,
@_
);
$con{abort_on_error} = $args{abort_on_error};
parse_connection_options(\%con);
$now = current_time();
print STDERR "$now $prefix Connecting to MySQL server with DSN '$con{dsn}'" .
(defined($con{dsn_user}) ? " as '$con{dsn_user}' " : "") .
" (using password: ";
if (defined($con{dsn_password})) {
print STDERR "YES).\n";
} else {
print STDERR "NO).\n";
}
eval {
$con{dbh}=DBI->connect($con{dsn}, $con{dsn_user},
$con{dsn_password}, { RaiseError => 1 });
};
if ($EVAL_ERROR) {
$con{connect_error}=$EVAL_ERROR;
} else {
$now = current_time();
print STDERR "$now $prefix Connected to MySQL server\n";
}
if ($args{abort_on_error}) {
if (!$dbd_mysql_installed) {
die "Failed to connect to MySQL server as " .
"DBD::mysql module is not installed";
} else {
if (!$con{dbh}) {
die "Failed to connect to MySQL server: " .
$con{connect_error};
}
}
}
if ($con{dbh}) {
$con{dbh}->do("SET SESSION wait_timeout=2147483");
}
return %con;
}
#
# return current local time as string in form "070816 12:23:15"
#
sub current_time {
return strftime("%y%m%d %H:%M:%S", localtime());
}
%mysql = mysql_connect(abort_on_error => 1);
$now = current_time();
print STDERR
"$now $prefix Executing a version check against the server...\n";
# Redirect STDOUT to STDERR, as VersionCheck prints alerts to STDOUT
select STDERR;
VersionCheck::version_check(
force => 1,
instances => [ {
dbh => $mysql{dbh},
dsn => $mysql{dsn}
}
]
);
# Restore STDOUT as the default filehandle
select STDOUT;
$now = current_time();
print STDERR "$now $prefix Done.\n";
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2013 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Page write filters implementation */
#include <my_base.h>
#include "common.h"
#include "write_filt.h"
#include "fil_cur.h"
#include "xtrabackup.h"
/************************************************************************
Write-through page write filter. */
static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor);
static my_bool wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
xb_write_filt_t wf_write_through = {
&wf_wt_init,
&wf_wt_process,
NULL,
NULL
};
/************************************************************************
Incremental page write filter. */
static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor);
static my_bool wf_incremental_process(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile);
static my_bool wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt,
ds_file_t *dstfile);
static void wf_incremental_deinit(xb_write_filt_ctxt_t *ctxt);
xb_write_filt_t wf_incremental = {
&wf_incremental_init,
&wf_incremental_process,
&wf_incremental_finalize,
&wf_incremental_deinit
};
/************************************************************************
Initialize incremental page write filter.
@return TRUE on success, FALSE on error. */
static my_bool
wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor)
{
char meta_name[FN_REFLEN];
xb_delta_info_t info;
ulint buf_size;
xb_wf_incremental_ctxt_t *cp =
&(ctxt->u.wf_incremental_ctxt);
ctxt->cursor = cursor;
/* allocate buffer for incremental backup (4096 pages) */
buf_size = (UNIV_PAGE_SIZE_MAX / 4 + 1) * UNIV_PAGE_SIZE_MAX;
cp->delta_buf_base = static_cast<byte *>(ut_malloc(buf_size));
memset(cp->delta_buf_base, 0, buf_size);
cp->delta_buf = static_cast<byte *>
(ut_align(cp->delta_buf_base, UNIV_PAGE_SIZE_MAX));
/* write delta meta info */
snprintf(meta_name, sizeof(meta_name), "%s%s", dst_name,
XB_DELTA_INFO_SUFFIX);
info.page_size = cursor->page_size;
info.zip_size = cursor->zip_size;
info.space_id = cursor->space_id;
if (!xb_write_delta_metadata(meta_name, &info)) {
msg("[%02u] xtrabackup: Error: "
"failed to write meta info for %s\n",
cursor->thread_n, cursor->rel_path);
return(FALSE);
}
/* change the target file name, since we are only going to write
delta pages */
strcat(dst_name, ".delta");
mach_write_to_4(cp->delta_buf, 0x78747261UL); /*"xtra"*/
cp->npages = 1;
return(TRUE);
}
/************************************************************************
Run the next batch of pages through incremental page write filter.
@return TRUE on success, FALSE on error. */
static my_bool
wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
{
ulint i;
xb_fil_cur_t *cursor = ctxt->cursor;
ulint page_size = cursor->page_size;
byte *page;
xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
for (i = 0, page = cursor->buf; i < cursor->buf_npages;
i++, page += page_size) {
if (incremental_lsn >= mach_read_from_8(page + FIL_PAGE_LSN)) {
continue;
}
/* updated page */
if (cp->npages == page_size / 4) {
/* flush buffer */
if (ds_write(dstfile, cp->delta_buf,
cp->npages * page_size)) {
return(FALSE);
}
/* clear buffer */
memset(cp->delta_buf, 0, page_size / 4 * page_size);
/*"xtra"*/
mach_write_to_4(cp->delta_buf, 0x78747261UL);
cp->npages = 1;
}
mach_write_to_4(cp->delta_buf + cp->npages * 4,
cursor->buf_page_no + i);
memcpy(cp->delta_buf + cp->npages * page_size, page,
page_size);
cp->npages++;
}
return(TRUE);
}
/************************************************************************
Flush the incremental page write filter's buffer.
@return TRUE on success, FALSE on error. */
static my_bool
wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
{
xb_fil_cur_t *cursor = ctxt->cursor;
ulint page_size = cursor->page_size;
xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
if (cp->npages != page_size / 4) {
mach_write_to_4(cp->delta_buf + cp->npages * 4, 0xFFFFFFFFUL);
}
/* Mark the final block */
mach_write_to_4(cp->delta_buf, 0x58545241UL); /*"XTRA"*/
/* flush buffer */
if (ds_write(dstfile, cp->delta_buf, cp->npages * page_size)) {
return(FALSE);
}
return(TRUE);
}
/************************************************************************
Free the incremental page write filter's buffer. */
static void
wf_incremental_deinit(xb_write_filt_ctxt_t *ctxt)
{
xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
if (cp->delta_buf_base != NULL) {
ut_free(cp->delta_buf_base);
}
}
/************************************************************************
Initialize the write-through page write filter.
@return TRUE on success, FALSE on error. */
static my_bool
wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)),
xb_fil_cur_t *cursor)
{
ctxt->cursor = cursor;
return(TRUE);
}
/************************************************************************
Write the next batch of pages to the destination datasink.
@return TRUE on success, FALSE on error. */
static my_bool
wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
{
xb_fil_cur_t *cursor = ctxt->cursor;
if (ds_write(dstfile, cursor->buf, cursor->buf_read)) {
return(FALSE);
}
return(TRUE);
}
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2013 Percona LLC and/or its affiliates.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* Page write filter interface */
#ifndef XB_WRITE_FILT_H
#define XB_WRITE_FILT_H
#include "fil_cur.h"
#include "datasink.h"
#include "compact.h"
/* Incremental page filter context */
typedef struct {
byte *delta_buf_base;
byte *delta_buf;
ulint npages;
} xb_wf_incremental_ctxt_t;
/* Page filter context used as an opaque structure by callers */
typedef struct {
xb_fil_cur_t *cursor;
union {
xb_wf_incremental_ctxt_t wf_incremental_ctxt;
xb_wf_compact_ctxt_t wf_compact_ctxt;
} u;
} xb_write_filt_ctxt_t;
typedef struct {
my_bool (*init)(xb_write_filt_ctxt_t *ctxt, char *dst_name,
xb_fil_cur_t *cursor);
my_bool (*process)(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
my_bool (*finalize)(xb_write_filt_ctxt_t *, ds_file_t *dstfile);
void (*deinit)(xb_write_filt_ctxt_t *);
} xb_write_filt_t;
extern xb_write_filt_t wf_write_through;
extern xb_write_filt_t wf_incremental;
extern xb_write_filt_t wf_compact;
#endif /* XB_WRITE_FILT_H */
/******************************************************
Percona XtraBackup: hot backup tool for InnoDB
(c) 2009-2014 Percona LLC and/or its affiliates
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************
This file incorporates work covered by the following copyright and
permission notice:
Copyright 2010 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <handler.h>
#include <trx0sys.h>
#include "common.h"
#define WSREP_XID_PREFIX "WSREPXid"
#define WSREP_XID_PREFIX_LEN MYSQL_XID_PREFIX_LEN
#define WSREP_XID_UUID_OFFSET 8
#define WSREP_XID_SEQNO_OFFSET (WSREP_XID_UUID_OFFSET + sizeof(wsrep_uuid_t))
#define WSREP_XID_GTRID_LEN (WSREP_XID_SEQNO_OFFSET + sizeof(wsrep_seqno_t))
/*! undefined seqno */
#define WSREP_SEQNO_UNDEFINED (-1)
/*! Name of file where Galera info is stored on recovery */
#define XB_GALERA_INFO_FILENAME "xtrabackup_galera_info"
/* Galera UUID type - for all unique IDs */
typedef struct wsrep_uuid {
uint8_t data[16];
} wsrep_uuid_t;
/* sequence number of a writeset, etc. */
typedef int64_t wsrep_seqno_t;
/* Undefined UUID */
static const wsrep_uuid_t WSREP_UUID_UNDEFINED = {{0,}};
/***********************************************************************//**
Check if a given WSREP XID is valid.
@return true if valid.
*/
static
bool
wsrep_is_wsrep_xid(
/*===============*/
const void* xid_ptr)
{
const XID* xid = reinterpret_cast<const XID*>(xid_ptr);
return((xid->formatID == 1 &&
xid->gtrid_length == WSREP_XID_GTRID_LEN &&
xid->bqual_length == 0 &&
!memcmp(xid->data, WSREP_XID_PREFIX, WSREP_XID_PREFIX_LEN)));
}
/***********************************************************************//**
Retrieve binary WSREP UUID from XID.
@return binary WSREP UUID represenataion, if UUID is valid, or
WSREP_UUID_UNDEFINED otherwise.
*/
static
const wsrep_uuid_t*
wsrep_xid_uuid(
/*===========*/
const XID* xid)
{
if (wsrep_is_wsrep_xid(xid)) {
return(reinterpret_cast<const wsrep_uuid_t*>
(xid->data + WSREP_XID_UUID_OFFSET));
} else {
return(&WSREP_UUID_UNDEFINED);
}
}
/***********************************************************************//**
Retrieve WSREP seqno from XID.
@return WSREP seqno, if it is valid, or WSREP_SEQNO_UNDEFINED otherwise.
*/
wsrep_seqno_t wsrep_xid_seqno(
/*==========================*/
const XID* xid)
{
if (wsrep_is_wsrep_xid(xid)) {
wsrep_seqno_t seqno;
memcpy(&seqno, xid->data + WSREP_XID_SEQNO_OFFSET,
sizeof(wsrep_seqno_t));
return(seqno);
} else {
return(WSREP_SEQNO_UNDEFINED);
}
}
/***********************************************************************//**
Write UUID to string.
@return length of UUID string representation or -EMSGSIZE if string is too
short.
*/
static
int
wsrep_uuid_print(
/*=============*/
const wsrep_uuid_t* uuid,
char* str,
size_t str_len)
{
if (str_len > 36) {
const unsigned char* u = uuid->data;
return snprintf(str, str_len,
"%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x-%02x%02x%02x%02x%02x%02x",
u[ 0], u[ 1], u[ 2], u[ 3], u[ 4], u[ 5], u[ 6],
u[ 7], u[ 8], u[ 9], u[10], u[11], u[12], u[13],
u[14], u[15]);
}
else {
return -EMSGSIZE;
}
}
/***********************************************************************
Store Galera checkpoint info in the 'xtrabackup_galera_info' file, if that
information is present in the trx system header. Otherwise, do nothing. */
void
xb_write_galera_info(bool incremental_prepare)
/*==================*/
{
FILE* fp;
XID xid;
char uuid_str[40];
wsrep_seqno_t seqno;
MY_STAT statinfo;
/* Do not overwrite existing an existing file to be compatible with
servers with older server versions */
if (!incremental_prepare &&
my_stat(XB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL) {
return;
}
memset(&xid, 0, sizeof(xid));
xid.formatID = -1;
if (!trx_sys_read_wsrep_checkpoint(&xid)) {
return;
}
if (wsrep_uuid_print(wsrep_xid_uuid(&xid), uuid_str,
sizeof(uuid_str)) < 0) {
return;
}
fp = fopen(XB_GALERA_INFO_FILENAME, "w");
if (fp == NULL) {
msg("xtrabackup: error: "
"could not create " XB_GALERA_INFO_FILENAME
", errno = %d\n",
errno);
exit(EXIT_FAILURE);
}
seqno = wsrep_xid_seqno(&xid);
msg("xtrabackup: Recovered WSREP position: %s:%lld\n",
uuid_str, (long long) seqno);
if (fprintf(fp, "%s:%lld", uuid_str, (long long) seqno) < 0) {
msg("xtrabackup: error: "
"could not write to " XB_GALERA_INFO_FILENAME
", errno = %d\n",
errno);
exit(EXIT_FAILURE);
}
fclose(fp);
}
/******************************************************
Percona XtraBackup: hot backup tool for InnoDB
(c) 2009-2014 Percona LLC and/or its affiliates
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef WSREP_H
#define WSREP_H
/***********************************************************************
Store Galera checkpoint info in the 'xtrabackup_galera_info' file, if that
information is present in the trx system header. Otherwise, do nothing. */
void
xb_write_galera_info(bool incremental_prepare);
/*==================*/
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
/* This file is required to abstract away regex(3) calls so that
my_regex is used on Windows and native calls are used on POSIX platforms. */
#ifndef XB_REGEX_H
#define XB_REGEX_H
#ifdef _WIN32
#include <my_regex.h>
typedef my_regex_t xb_regex_t;
typedef my_regmatch_t xb_regmatch_t;
#define xb_regex_init() my_regex_init(&my_charset_latin1)
#define xb_regexec(preg,string,nmatch,pmatch,eflags) \
my_regexec(preg, string, nmatch, pmatch, eflags)
#define xb_regerror(errcode,preg,errbuf,errbuf_size) \
my_regerror(errcode, preg, errbuf, errbuf_size)
#define xb_regcomp(preg,regex,cflags) \
my_regcomp(preg, regex, cflags, &my_charset_latin1)
#define xb_regfree(preg) my_regfree(preg)
#define xb_regex_end() my_regex_end()
#else /* ! _WIN32 */
#include <regex.h>
typedef regex_t xb_regex_t;
typedef regmatch_t xb_regmatch_t;
#define xb_regex_init() do { } while(0)
#define xb_regexec(preg,string,nmatch,pmatch,eflags) \
regexec(preg, string, nmatch, pmatch, eflags)
#define xb_regerror(errcode,preg,errbuf,errbuf_size) \
regerror(errcode, preg, errbuf, errbuf_size)
#define xb_regcomp(preg,regex,cflags) \
regcomp(preg, regex, cflags)
#define xb_regfree(preg) regfree(preg)
#define xb_regex_end() do { } while (0)
#endif /* _WIN32 */
#endif /* XB_REGEX_H */
/******************************************************
Copyright (c) 2014 Percona LLC and/or its affiliates.
The xbstream utility: serialize/deserialize files in the XBSTREAM format.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_global.h>
#include <my_default.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <curl/curl.h>
#include <ev.h>
#include <unistd.h>
#include <errno.h>
#include <gcrypt.h>
#include <assert.h>
#include <my_sys.h>
#include <my_dir.h>
#include <my_getopt.h>
#include <algorithm>
#include <map>
#include <string>
#include <jsmn.h>
#include "xbstream.h"
using std::min;
using std::max;
using std::map;
using std::string;
#define XBCLOUD_VERSION "1.0"
#define SWIFT_MAX_URL_SIZE 8192
#define SWIFT_MAX_HDR_SIZE 8192
#define SWIFT_CHUNK_SIZE 11 * 1024 * 1024
#if ((LIBCURL_VERSION_MAJOR >= 7) && (LIBCURL_VERSION_MINOR >= 16))
#define OLD_CURL_MULTI 0
#else
#define OLD_CURL_MULTI 1
#endif
/*****************************************************************************/
typedef struct swift_auth_info_struct swift_auth_info;
typedef struct connection_info_struct connection_info;
typedef struct socket_info_struct socket_info;
typedef struct global_io_info_struct global_io_info;
typedef struct slo_chunk_struct slo_chunk;
typedef struct container_list_struct container_list;
typedef struct object_info_struct object_info;
struct swift_auth_info_struct {
char url[SWIFT_MAX_URL_SIZE];
char token[SWIFT_MAX_HDR_SIZE];
};
struct global_io_info_struct {
struct ev_loop *loop;
struct ev_io input_event;
struct ev_timer timer_event;
CURLM *multi;
int still_running;
int eof;
curl_socket_t input_fd;
connection_info **connections;
long chunk_no;
connection_info *current_connection;
const char *url;
const char *container;
const char *token;
const char *backup_name;
};
struct socket_info_struct {
curl_socket_t sockfd;
CURL *easy;
int action;
long timeout;
struct ev_io ev;
int evset;
global_io_info *global;
};
struct connection_info_struct {
CURL *easy;
global_io_info *global;
char *buffer;
size_t buffer_size;
size_t filled_size;
size_t upload_size;
bool chunk_uploaded;
bool chunk_acked;
char error[CURL_ERROR_SIZE];
struct curl_slist *slist;
char *name;
size_t name_len;
char hash[33];
size_t chunk_no;
bool magic_verified;
size_t chunk_path_len;
xb_chunk_type_t chunk_type;
size_t payload_size;
size_t chunk_size;
int retry_count;
bool upload_started;
ulong global_idx;
};
struct slo_chunk_struct {
char name[SWIFT_MAX_URL_SIZE];
char md5[33];
int idx;
size_t size;
};
struct object_info_struct {
char hash[33];
char name[SWIFT_MAX_URL_SIZE];
size_t bytes;
};
struct container_list_struct {
size_t content_length;
size_t content_bufsize;
char *content_json;
size_t object_count;
size_t idx;
object_info *objects;
bool final;
};
enum {SWIFT, S3};
const char *storage_names[] =
{ "SWIFT", "S3", NullS};
static my_bool opt_verbose = 0;
static ulong opt_storage = SWIFT;
static const char *opt_swift_user = NULL;
static const char *opt_swift_user_id = NULL;
static const char *opt_swift_password = NULL;
static const char *opt_swift_tenant = NULL;
static const char *opt_swift_tenant_id = NULL;
static const char *opt_swift_project = NULL;
static const char *opt_swift_project_id = NULL;
static const char *opt_swift_domain = NULL;
static const char *opt_swift_domain_id = NULL;
static const char *opt_swift_region = NULL;
static const char *opt_swift_container = NULL;
static const char *opt_swift_storage_url = NULL;
static const char *opt_swift_auth_url = NULL;
static const char *opt_swift_key = NULL;
static const char *opt_swift_auth_version = NULL;
static const char *opt_name = NULL;
static const char *opt_cacert = NULL;
static ulong opt_parallel = 1;
static my_bool opt_insecure = 0;
static enum {MODE_GET, MODE_PUT, MODE_DELETE} opt_mode;
static char **file_list = NULL;
static int file_list_size = 0;
TYPELIB storage_typelib =
{array_elements(storage_names)-1, "", storage_names, NULL};
enum {
OPT_STORAGE = 256,
OPT_SWIFT_CONTAINER,
OPT_SWIFT_AUTH_URL,
OPT_SWIFT_KEY,
OPT_SWIFT_USER,
OPT_SWIFT_USER_ID,
OPT_SWIFT_PASSWORD,
OPT_SWIFT_TENANT,
OPT_SWIFT_TENANT_ID,
OPT_SWIFT_PROJECT,
OPT_SWIFT_PROJECT_ID,
OPT_SWIFT_DOMAIN,
OPT_SWIFT_DOMAIN_ID,
OPT_SWIFT_REGION,
OPT_SWIFT_STORAGE_URL,
OPT_SWIFT_AUTH_VERSION,
OPT_PARALLEL,
OPT_CACERT,
OPT_INSECURE,
OPT_VERBOSE
};
static struct my_option my_long_options[] =
{
{"help", '?', "Display this help and exit.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"storage", OPT_STORAGE, "Specify storage type S3/SWIFT.",
&opt_storage, &opt_storage, &storage_typelib,
GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"swift-auth-version", OPT_SWIFT_AUTH_VERSION,
"Swift authentication verison to use.",
&opt_swift_auth_version, &opt_swift_auth_version, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-container", OPT_SWIFT_CONTAINER,
"Swift container to store backups into.",
&opt_swift_container, &opt_swift_container, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-user", OPT_SWIFT_USER,
"Swift user name.",
&opt_swift_user, &opt_swift_user, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-user-id", OPT_SWIFT_USER_ID,
"Swift user ID.",
&opt_swift_user_id, &opt_swift_user_id, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-auth-url", OPT_SWIFT_AUTH_URL,
"Base URL of SWIFT authentication service.",
&opt_swift_auth_url, &opt_swift_auth_url, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-storage-url", OPT_SWIFT_STORAGE_URL,
"URL of object-store endpoint. Usually received from authentication "
"service. Specify to override this value.",
&opt_swift_storage_url, &opt_swift_storage_url, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-key", OPT_SWIFT_KEY,
"Swift key.",
&opt_swift_key, &opt_swift_key, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-tenant", OPT_SWIFT_TENANT,
"The tenant name. Both the --swift-tenant and --swift-tenant-id "
"options are optional, but should not be specified together.",
&opt_swift_tenant, &opt_swift_tenant, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-tenant-id", OPT_SWIFT_TENANT_ID,
"The tenant ID. Both the --swift-tenant and --swift-tenant-id "
"options are optional, but should not be specified together.",
&opt_swift_tenant_id, &opt_swift_tenant_id, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-project", OPT_SWIFT_PROJECT,
"The project name.",
&opt_swift_project, &opt_swift_project, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-project-id", OPT_SWIFT_PROJECT_ID,
"The project ID.",
&opt_swift_project_id, &opt_swift_project_id, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-domain", OPT_SWIFT_DOMAIN,
"The domain name.",
&opt_swift_domain, &opt_swift_domain, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-domain-id", OPT_SWIFT_DOMAIN_ID,
"The domain ID.",
&opt_swift_domain_id, &opt_swift_domain_id, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-password", OPT_SWIFT_PASSWORD,
"The password of the user.",
&opt_swift_password, &opt_swift_password, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"swift-region", OPT_SWIFT_REGION,
"The region object-store endpoint.",
&opt_swift_region, &opt_swift_region, 0,
GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"parallel", OPT_PARALLEL,
"Number of parallel chunk uploads.",
&opt_parallel, &opt_parallel, 0, GET_ULONG, REQUIRED_ARG,
1, 0, 0, 0, 0, 0},
{"cacert", OPT_CACERT,
"CA certificate file.",
&opt_cacert, &opt_cacert, 0, GET_STR_ALLOC, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"insecure", OPT_INSECURE,
"Do not verify server SSL certificate.",
&opt_insecure, &opt_insecure, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"verbose", OPT_VERBOSE,
"Turn ON cURL tracing.",
&opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
/* The values of these arguments should be masked
on the command line */
static const char * const masked_args[] = {
"--swift-password",
"--swift-key",
"--swift-auth-url",
"--swift-storage-url",
"--swift-container",
"--swift-user",
"--swift-tenant",
"--swift-user-id",
"--swift-tenant-id",
0
};
static map<string, ulonglong> file_chunk_count;
static
void
print_version()
{
printf("%s Ver %s for %s (%s)\n", my_progname, XBCLOUD_VERSION,
SYSTEM_TYPE, MACHINE_TYPE);
}
static
void
usage()
{
print_version();
puts("Copyright (C) 2015 Percona LLC and/or its affiliates.");
puts("This software comes with ABSOLUTELY NO WARRANTY. "
"This is free software,\nand you are welcome to modify and "
"redistribute it under the GPL license.\n");
puts("Manage backups on Cloud services.\n");
puts("Usage: ");
printf(" %s -c put [OPTIONS...] <NAME> upload backup from STDIN into "
"the cloud service with given name.\n", my_progname);
printf(" %s -c get [OPTIONS...] <NAME> [FILES...] stream specified "
"backup or individual files from cloud service into STDOUT.\n",
my_progname);
puts("\nOptions:");
my_print_help(my_long_options);
}
static
my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument __attribute__((unused)))
{
switch (optid) {
case '?':
usage();
exit(0);
}
return(FALSE);
}
static const char *load_default_groups[]=
{ "xbcloud", 0 };
/*********************************************************************//**
mask sensitive values on the command line */
static
void
mask_args(int argc, char **argv)
{
int i;
for (i = 0; i < argc-1; i++) {
int j = 0;
if (argv[i]) while (masked_args[j]) {
char *p;
if ((p = strstr(argv[i], masked_args[j]))) {
p += strlen(masked_args[j]);
while (*p && *p != '=') {
p++;
}
if (*p == '=') {
p++;
while (*p) {
*p++ = 'x';
}
}
}
j++;
}
}
}
static
int parse_args(int argc, char **argv)
{
const char *command;
if (argc < 2) {
fprintf(stderr, "Command isn't specified. "
"Supported commands are put and get\n");
usage();
exit(EXIT_FAILURE);
}
command = argv[1];
argc--; argv++;
if (strcasecmp(command, "put") == 0) {
opt_mode = MODE_PUT;
} else if (strcasecmp(command, "get") == 0) {
opt_mode = MODE_GET;
} else if (strcasecmp(command, "delete") == 0) {
opt_mode = MODE_DELETE;
} else {
fprintf(stderr, "Unknown command %s. "
"Supported commands are put and get\n", command);
usage();
exit(EXIT_FAILURE);
}
if (load_defaults("my", load_default_groups, &argc, &argv)) {
exit(EXIT_FAILURE);
}
if (handle_options(&argc, &argv, my_long_options, get_one_option)) {
exit(EXIT_FAILURE);
}
/* make sure name is specified */
if (argc < 1) {
fprintf(stderr, "Backup name is required argument\n");
exit(EXIT_FAILURE);
}
opt_name = argv[0];
argc--; argv++;
/* validate arguments */
if (opt_storage == SWIFT) {
if (opt_swift_user == NULL) {
fprintf(stderr, "Swift user is not specified\n");
exit(EXIT_FAILURE);
}
if (opt_swift_container == NULL) {
fprintf(stderr,
"Swift container is not specified\n");
exit(EXIT_FAILURE);
}
if (opt_swift_auth_url == NULL) {
fprintf(stderr, "Swift auth URL is not specified\n");
exit(EXIT_FAILURE);
}
} else {
fprintf(stderr, "Swift is only supported storage API\n");
}
if (argc > 0) {
file_list = argv;
file_list_size = argc;
}
return(0);
}
static char *hex_md5(const unsigned char *hash, char *out)
{
enum { hash_len = 16 };
char *p;
int i;
for (i = 0, p = out; i < hash_len; i++, p+=2) {
sprintf(p, "%02x", hash[i]);
}
return out;
}
/* If header starts with prefix it's value will be copied into output buffer */
static
int get_http_header(const char *prefix, const char *buffer,
char *out, size_t out_size)
{
const char *beg, *end;
size_t len, prefix_len;
prefix_len = strlen(prefix);
if (strncasecmp(buffer, prefix, prefix_len) == 0) {
beg = buffer + prefix_len;
end = strchr(beg, '\r');
len = min<size_t>(end - beg, out_size - 1);
strncpy(out, beg, len);
out[len] = 0;
return 1;
}
return 0;
}
static
size_t swift_auth_header_read_cb(char *ptr, size_t size, size_t nmemb,
void *data)
{
swift_auth_info *info = (swift_auth_info*)(data);
get_http_header("X-Storage-Url: ", ptr,
info->url, array_elements(info->url));
get_http_header("X-Auth-Token: ", ptr,
info->token, array_elements(info->token));
return nmemb * size;
}
/*********************************************************************//**
Authenticate against Swift TempAuth. Fills swift_auth_info struct.
Uses creadentials privided as global variables.
@returns true if access is granted and token received. */
static
bool
swift_temp_auth(const char *auth_url, swift_auth_info *info)
{
CURL *curl;
CURLcode res;
long http_code;
char *hdr_buf = NULL;
struct curl_slist *slist = NULL;
if (opt_swift_user == NULL) {
fprintf(stderr, "Swift user must be specified for TempAuth.\n");
return(false);
}
if (opt_swift_key == NULL) {
fprintf(stderr, "Swift key must be specified for TempAuth.\n");
return(false);
}
curl = curl_easy_init();
if (curl != NULL) {
hdr_buf = (char *)(calloc(14 + max(strlen(opt_swift_user),
strlen(opt_swift_key)), 1));
if (!hdr_buf) {
res = CURLE_FAILED_INIT;
goto cleanup;
}
sprintf(hdr_buf, "X-Auth-User: %s", opt_swift_user);
slist = curl_slist_append(slist, hdr_buf);
sprintf(hdr_buf, "X-Auth-Key: %s", opt_swift_key);
slist = curl_slist_append(slist, hdr_buf);
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_URL, auth_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
swift_auth_header_read_cb);
curl_easy_setopt(curl, CURLOPT_HEADERDATA, info);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr, "error: authentication failed: "
"curl_easy_perform(): %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code != 200 &&
http_code != 204) {
fprintf(stderr, "error: authentication failed "
"with response code: %ld\n", http_code);
res = CURLE_LOGIN_DENIED;
goto cleanup;
}
} else {
res = CURLE_FAILED_INIT;
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
cleanup:
if (hdr_buf) {
free(hdr_buf);
}
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
if (res == CURLE_OK) {
/* check that we received token and storage URL */
if (*info->url == 0) {
fprintf(stderr, "error: malformed response: "
"X-Storage-Url is missing\n");
return(false);
}
if (*info->token == 0) {
fprintf(stderr, "error: malformed response: "
"X-Auth-Token is missing\n");
return(false);
}
return(true);
}
return(false);
}
static
size_t
write_null_cb(char *buffer, size_t size, size_t nmemb, void *stream)
{
return fwrite(buffer, size, nmemb, stderr);
}
static
size_t
read_null_cb(char *ptr, size_t size, size_t nmemb, void *data)
{
return 0;
}
static
int
swift_create_container(swift_auth_info *info, const char *name)
{
char url[SWIFT_MAX_URL_SIZE];
char auth_token[SWIFT_MAX_HDR_SIZE];
CURLcode res;
long http_code;
CURL *curl;
struct curl_slist *slist = NULL;
snprintf(url, array_elements(url), "%s/%s", info->url, name);
snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
info->token);
curl = curl_easy_init();
if (curl != NULL) {
slist = curl_slist_append(slist, auth_token);
slist = curl_slist_append(slist, "Content-Length: 0");
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_null_cb);
curl_easy_setopt(curl, CURLOPT_READFUNCTION, read_null_cb);
curl_easy_setopt(curl, CURLOPT_INFILESIZE, 0L);
curl_easy_setopt(curl, CURLOPT_PUT, 1L);
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr,
"error: curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code != 201 && /* created */
http_code != 202 /* accepted (already exists) */) {
fprintf(stderr, "error: request failed "
"with response code: %ld\n", http_code);
res = CURLE_LOGIN_DENIED;
goto cleanup;
}
} else {
res = CURLE_FAILED_INIT;
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
cleanup:
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
return res;
}
/*********************************************************************//**
Delete object with given url.
@returns true if object deleted successfully. */
static
bool
swift_delete_object(swift_auth_info *info, const char *url)
{
char auth_token[SWIFT_MAX_HDR_SIZE];
CURLcode res;
long http_code;
CURL *curl;
struct curl_slist *slist = NULL;
bool ret = false;
snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
info->token);
curl = curl_easy_init();
if (curl != NULL) {
slist = curl_slist_append(slist, auth_token);
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE");
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr,
"error: curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code != 200 && /* OK */
http_code != 204 /* no content */) {
fprintf(stderr, "error: request failed "
"with response code: %ld\n", http_code);
goto cleanup;
}
ret = true;
} else {
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
cleanup:
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
return ret;
}
static int conn_upload_init(connection_info *conn);
static void conn_buffer_updated(connection_info *conn);
static connection_info *conn_new(global_io_info *global, ulong global_idx);
static void conn_cleanup(connection_info *conn);
static void conn_upload_retry(connection_info *conn);
/* Check for completed transfers, and remove their easy handles */
static void check_multi_info(global_io_info *g)
{
char *eff_url;
CURLMsg *msg;
int msgs_left;
connection_info *conn;
CURL *easy;
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy = msg->easy_handle;
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL,
&eff_url);
curl_multi_remove_handle(g->multi, easy);
curl_easy_cleanup(easy);
conn->easy = NULL;
if (conn->chunk_acked) {
conn->chunk_uploaded = true;
fprintf(stderr, "%s is done\n", conn->hash);
} else {
fprintf(stderr, "error: chunk %zu '%s' %s "
"is not uploaded, but socket closed "
"(%zu bytes of %zu left to upload)\n",
conn->chunk_no,
conn->name,
conn->hash,
conn->chunk_size - conn->upload_size,
conn->chunk_size);
conn_upload_retry(conn);
}
}
}
}
/* Die if we get a bad CURLMcode somewhere */
static void mcode_or_die(const char *where, CURLMcode code)
{
if (code != CURLM_OK)
{
const char *s;
switch (code)
{
case CURLM_BAD_HANDLE:
s = "CURLM_BAD_HANDLE";
break;
case CURLM_BAD_EASY_HANDLE:
s = "CURLM_BAD_EASY_HANDLE";
break;
case CURLM_OUT_OF_MEMORY:
s = "CURLM_OUT_OF_MEMORY";
break;
case CURLM_INTERNAL_ERROR:
s = "CURLM_INTERNAL_ERROR";
break;
case CURLM_UNKNOWN_OPTION:
s = "CURLM_UNKNOWN_OPTION";
break;
case CURLM_LAST:
s = "CURLM_LAST";
break;
default:
s = "CURLM_unknown";
break;
case CURLM_BAD_SOCKET:
s = "CURLM_BAD_SOCKET";
fprintf(stderr, "error: %s returns (%d) %s\n",
where, code, s);
/* ignore this error */
return;
}
fprintf(stderr, "error: %s returns (%d) %s\n",
where, code, s);
assert(0);
}
}
/* Called by libev when we get action on a multi socket */
static void event_cb(EV_P_ struct ev_io *w, int revents)
{
global_io_info *global = (global_io_info*)(w->data);
CURLMcode rc;
#if !(OLD_CURL_MULTI)
int action = (revents & EV_READ ? CURL_POLL_IN : 0) |
(revents & EV_WRITE ? CURL_POLL_OUT : 0);
do {
rc = curl_multi_socket_action(global->multi, w->fd, action,
&global->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
#else
do {
rc = curl_multi_socket(global->multi, w->fd,
&global->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
#endif
mcode_or_die("error: event_cb: curl_multi_socket_action", rc);
check_multi_info(global);
if (global->still_running <= 0) {
ev_timer_stop(global->loop, &global->timer_event);
}
}
static void remsock(curl_socket_t s, socket_info *fdp, global_io_info *global)
{
if (fdp) {
if (fdp->evset) {
ev_io_stop(global->loop, &fdp->ev);
}
free(fdp);
}
}
static void setsock(socket_info *fdp, curl_socket_t s, CURL *easy, int action,
global_io_info *global)
{
int kind = (action & CURL_POLL_IN ? (int)(EV_READ) : 0) |
(action & CURL_POLL_OUT ? (int)(EV_WRITE) : 0);
fdp->sockfd = s;
fdp->action = action;
fdp->easy = easy;
if (fdp->evset)
ev_io_stop(global->loop, &fdp->ev);
ev_io_init(&fdp->ev, event_cb, fdp->sockfd, kind);
fdp->ev.data = global;
fdp->evset = 1;
ev_io_start(global->loop, &fdp->ev);
}
static void addsock(curl_socket_t s, CURL *easy, int action,
global_io_info *global)
{
socket_info *fdp = (socket_info *)(calloc(sizeof(socket_info), 1));
fdp->global = global;
setsock(fdp, s, easy, action, global);
curl_multi_assign(global->multi, s, fdp);
}
static int sock_cb(CURL *easy, curl_socket_t s, int what, void *cbp,
void *sockp)
{
global_io_info *global = (global_io_info*)(cbp);
socket_info *fdp = (socket_info*)(sockp);
if (what == CURL_POLL_REMOVE) {
remsock(s, fdp, global);
} else {
if (!fdp) {
addsock(s, easy, what, global);
} else {
setsock(fdp, s, easy, what, global);
}
}
return 0;
}
/* Called by libev when our timeout expires */
static void timer_cb(EV_P_ struct ev_timer *w, int revents)
{
global_io_info *io_global = (global_io_info*)(w->data);
CURLMcode rc;
#if !(OLD_CURL_MULTI)
do {
rc = curl_multi_socket_action(io_global->multi,
CURL_SOCKET_TIMEOUT, 0,
&io_global->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
#else
do {
rc = curl_multi_socket_all(io_global->multi,
&io_global->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
#endif
mcode_or_die("timer_cb: curl_multi_socket_action", rc);
check_multi_info(io_global);
}
static connection_info *get_current_connection(global_io_info *global)
{
connection_info *conn = global->current_connection;
ulong i;
if (conn && conn->filled_size < conn->chunk_size)
return conn;
for (i = 0; i < opt_parallel; i++) {
conn = global->connections[i];
if (conn->chunk_uploaded || conn->filled_size == 0) {
global->current_connection = conn;
conn_upload_init(conn);
return conn;
}
}
return NULL;
}
/* This gets called whenever data is received from the input */
static void input_cb(EV_P_ struct ev_io *w, int revents)
{
global_io_info *io_global = (global_io_info *)(w->data);
connection_info *conn = get_current_connection(io_global);
if (conn == NULL)
return;
if (conn->filled_size < conn->chunk_size) {
if (revents & EV_READ) {
ssize_t nbytes = read(io_global->input_fd,
conn->buffer + conn->filled_size,
conn->chunk_size -
conn->filled_size);
if (nbytes > 0) {
conn->filled_size += nbytes;
conn_buffer_updated(conn);
} else if (nbytes < 0) {
if (errno != EAGAIN && errno != EINTR) {
char error[200];
my_strerror(error, sizeof(error),
errno);
fprintf(stderr, "error: failed to read "
"input stream (%s)\n", error);
/* failed to read input */
exit(1);
}
} else {
io_global->eof = 1;
ev_io_stop(io_global->loop, w);
}
}
}
assert(conn->filled_size <= conn->chunk_size);
}
static int swift_upload_read_cb(char *ptr, size_t size, size_t nmemb,
void *data)
{
size_t realsize;
connection_info *conn = (connection_info*)(data);
if (conn->filled_size == conn->upload_size &&
conn->upload_size < conn->chunk_size && !conn->global->eof) {
ssize_t nbytes;
assert(conn->global->current_connection == conn);
do {
nbytes = read(conn->global->input_fd,
conn->buffer + conn->filled_size,
conn->chunk_size - conn->filled_size);
} while (nbytes == -1 && errno == EAGAIN);
if (nbytes > 0) {
conn->filled_size += nbytes;
conn_buffer_updated(conn);
} else {
conn->global->eof = 1;
}
}
realsize = min(size * nmemb, conn->filled_size - conn->upload_size);
memcpy(ptr, conn->buffer + conn->upload_size, realsize);
conn->upload_size += realsize;
assert(conn->filled_size <= conn->chunk_size);
assert(conn->upload_size <= conn->filled_size);
return realsize;
}
static
size_t upload_header_read_cb(char *ptr, size_t size, size_t nmemb,
void *data)
{
connection_info *conn = (connection_info *)(data);
char etag[33];
if (get_http_header("Etag: ", ptr, etag, array_elements(etag))) {
if (strcmp(conn->hash, etag) != 0) {
fprintf(stderr, "error: ETag mismatch\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "acked chunk %s\n", etag);
conn->chunk_acked = true;
}
return nmemb * size;
}
static int conn_upload_init(connection_info *conn)
{
conn->filled_size = 0;
conn->upload_size = 0;
conn->chunk_uploaded = false;
conn->chunk_acked = false;
conn->chunk_size = CHUNK_HEADER_CONSTANT_LEN;
conn->magic_verified = false;
conn->chunk_path_len = 0;
conn->chunk_type = XB_CHUNK_TYPE_UNKNOWN;
conn->payload_size = 0;
conn->upload_started = false;
conn->retry_count = 0;
if (conn->name != NULL) {
conn->name[0] = 0;
}
if (conn->easy != NULL) {
conn->easy = 0;
}
if (conn->slist != NULL) {
curl_slist_free_all(conn->slist);
conn->slist = NULL;
}
return 0;
}
static void conn_upload_prepare(connection_info *conn)
{
gcry_md_hd_t md5;
gcry_md_open(&md5, GCRY_MD_MD5, 0);
gcry_md_write(md5, conn->buffer, conn->chunk_size);
hex_md5(gcry_md_read(md5, GCRY_MD_MD5), conn->hash);
gcry_md_close(md5);
}
static int conn_upload_start(connection_info *conn)
{
char token_header[SWIFT_MAX_HDR_SIZE];
char object_url[SWIFT_MAX_URL_SIZE];
char content_len[200], etag[200];
global_io_info *global;
CURLMcode rc;
global = conn->global;
fprintf(stderr, "uploading chunk %s/%s/%s.%020zu "
"(md5: %s, size: %zu)\n",
global->container, global->backup_name, conn->name,
conn->chunk_no, conn->hash, conn->chunk_size);
snprintf(object_url, array_elements(object_url), "%s/%s/%s/%s.%020zu",
global->url, global->container, global->backup_name,
conn->name, conn->chunk_no);
snprintf(content_len, sizeof(content_len), "Content-Length: %lu",
(ulong)(conn->chunk_size));
snprintf(etag, sizeof(etag), "ETag: %s", conn->hash);
snprintf(token_header, array_elements(token_header),
"X-Auth-Token: %s", global->token);
conn->slist = curl_slist_append(conn->slist, token_header);
conn->slist = curl_slist_append(conn->slist,
"Connection: keep-alive");
conn->slist = curl_slist_append(conn->slist,
"Content-Type: "
"application/octet-stream");
conn->slist = curl_slist_append(conn->slist, content_len);
conn->slist = curl_slist_append(conn->slist, etag);
conn->easy = curl_easy_init();
if (!conn->easy) {
fprintf(stderr, "error: curl_easy_init() failed\n");
return 1;
}
curl_easy_setopt(conn->easy, CURLOPT_URL, object_url);
curl_easy_setopt(conn->easy, CURLOPT_READFUNCTION,
swift_upload_read_cb);
curl_easy_setopt(conn->easy, CURLOPT_READDATA, conn);
curl_easy_setopt(conn->easy, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(conn->easy, CURLOPT_ERRORBUFFER, conn->error);
curl_easy_setopt(conn->easy, CURLOPT_PRIVATE, conn);
curl_easy_setopt(conn->easy, CURLOPT_NOPROGRESS, 1L);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_TIME, 5L);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_LIMIT, 1024L);
curl_easy_setopt(conn->easy, CURLOPT_PUT, 1L);
curl_easy_setopt(conn->easy, CURLOPT_HTTPHEADER, conn->slist);
curl_easy_setopt(conn->easy, CURLOPT_HEADERFUNCTION,
upload_header_read_cb);
curl_easy_setopt(conn->easy, CURLOPT_HEADERDATA, conn);
curl_easy_setopt(conn->easy, CURLOPT_INFILESIZE,
(long) conn->chunk_size);
if (opt_cacert != NULL)
curl_easy_setopt(conn->easy, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(conn->easy, CURLOPT_SSL_VERIFYPEER, FALSE);
rc = curl_multi_add_handle(conn->global->multi, conn->easy);
mcode_or_die("conn_upload_init: curl_multi_add_handle", rc);
#if (OLD_CURL_MULTI)
do {
rc = curl_multi_socket_all(global->multi,
&global->still_running);
} while(rc == CURLM_CALL_MULTI_PERFORM);
#endif
conn->upload_started = true;
return 0;
}
static void conn_cleanup(connection_info *conn)
{
if (conn) {
free(conn->name);
free(conn->buffer);
if (conn->slist) {
curl_slist_free_all(conn->slist);
conn->slist = NULL;
}
if (conn->easy) {
curl_easy_cleanup(conn->easy);
conn->easy = NULL;
}
}
free(conn);
}
static void conn_upload_retry(connection_info *conn)
{
/* already closed by cURL */
conn->easy = NULL;
if (conn->slist != NULL) {
curl_slist_free_all(conn->slist);
conn->slist = NULL;
}
if (conn->retry_count++ > 3) {
fprintf(stderr, "error: retry count limit reached\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "warning: retrying to upload chunk %zu of '%s'\n",
conn->chunk_no, conn->name);
conn->upload_size = 0;
conn_upload_start(conn);
}
static connection_info *conn_new(global_io_info *global, ulong global_idx)
{
connection_info *conn;
conn = (connection_info *)(calloc(1, sizeof(connection_info)));
if (conn == NULL) {
goto error;
}
conn->global = global;
conn->global_idx = global_idx;
conn->buffer_size = SWIFT_CHUNK_SIZE;
if ((conn->buffer = (char *)(calloc(conn->buffer_size, 1))) ==
NULL) {
goto error;
}
return conn;
error:
if (conn != NULL) {
conn_cleanup(conn);
}
fprintf(stderr, "error: out of memory\n");
exit(EXIT_FAILURE);
return NULL;
}
/*********************************************************************//**
Handle input buffer updates. Parse chunk header and set appropriate
buffer size. */
static
void
conn_buffer_updated(connection_info *conn)
{
bool ready_for_upload = false;
/* chunk header */
if (!conn->magic_verified &&
conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN) {
if (strncmp(XB_STREAM_CHUNK_MAGIC, conn->buffer,
sizeof(XB_STREAM_CHUNK_MAGIC) - 1) != 0) {
fprintf(stderr, "Error: magic expected\n");
exit(EXIT_FAILURE);
}
conn->magic_verified = true;
conn->chunk_path_len = uint4korr(conn->buffer
+ PATH_LENGTH_OFFSET);
conn->chunk_type = (xb_chunk_type_t)
(conn->buffer[CHUNK_TYPE_OFFSET]);
conn->chunk_size = CHUNK_HEADER_CONSTANT_LEN +
conn->chunk_path_len;
if (conn->chunk_type != XB_CHUNK_TYPE_EOF) {
conn->chunk_size += 16;
}
}
/* ordinary chunk */
if (conn->magic_verified &&
conn->payload_size == 0 &&
conn->chunk_type != XB_CHUNK_TYPE_EOF &&
conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN
+ conn->chunk_path_len + 16) {
conn->payload_size = uint8korr(conn->buffer +
CHUNK_HEADER_CONSTANT_LEN +
conn->chunk_path_len);
conn->chunk_size = conn->payload_size + 4 + 16 +
conn->chunk_path_len +
CHUNK_HEADER_CONSTANT_LEN;
if (conn->name == NULL) {
conn->name = (char*)(malloc(conn->chunk_path_len + 1));
} else if (conn->name_len < conn->chunk_path_len + 1) {
conn->name = (char*)(realloc(conn->name,
conn->chunk_path_len + 1));
}
conn->name_len = conn->chunk_path_len + 1;
memcpy(conn->name, conn->buffer + CHUNK_HEADER_CONSTANT_LEN,
conn->chunk_path_len);
conn->name[conn->chunk_path_len] = 0;
if (conn->buffer_size < conn->chunk_size) {
conn->buffer =
(char *)(realloc(conn->buffer, conn->chunk_size));
conn->buffer_size = conn->chunk_size;
}
}
/* EOF chunk has no payload */
if (conn->magic_verified &&
conn->chunk_type == XB_CHUNK_TYPE_EOF &&
conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN
+ conn->chunk_path_len) {
if (conn->name == NULL) {
conn->name = (char*)(malloc(conn->chunk_path_len + 1));
} else if (conn->name_len < conn->chunk_path_len + 1) {
conn->name = (char*)(realloc(conn->name,
conn->chunk_path_len + 1));
}
conn->name_len = conn->chunk_path_len + 1;
memcpy(conn->name, conn->buffer + CHUNK_HEADER_CONSTANT_LEN,
conn->chunk_path_len);
conn->name[conn->chunk_path_len] = 0;
}
if (conn->filled_size > 0 && conn->filled_size == conn->chunk_size) {
ready_for_upload = true;
}
/* start upload once recieved the size of the chunk */
if (!conn->upload_started && ready_for_upload) {
conn->chunk_no = file_chunk_count[conn->name]++;
conn_upload_prepare(conn);
conn_upload_start(conn);
}
}
static int init_input(global_io_info *io_global)
{
ev_io_init(&io_global->input_event, input_cb, STDIN_FILENO, EV_READ);
io_global->input_event.data = io_global;
ev_io_start(io_global->loop, &io_global->input_event);
return 0;
}
/* Update the event timer after curl_multi library calls */
static int multi_timer_cb(CURLM *multi, long timeout_ms, global_io_info *global)
{
ev_timer_stop(global->loop, &global->timer_event);
if (timeout_ms > 0) {
double t = timeout_ms / 1000.0;
ev_timer_init(&global->timer_event, timer_cb, t, 0.);
ev_timer_start(global->loop, &global->timer_event);
} else {
timer_cb(global->loop, &global->timer_event, 0);
}
return 0;
}
static
int swift_upload_parts(swift_auth_info *auth, const char *container,
const char *name)
{
global_io_info io_global;
ulong i;
#if (OLD_CURL_MULTI)
long timeout;
#endif
CURLMcode rc;
int n_dirty_buffers;
memset(&io_global, 0, sizeof(io_global));
io_global.loop = ev_default_loop(0);
init_input(&io_global);
io_global.multi = curl_multi_init();
ev_timer_init(&io_global.timer_event, timer_cb, 0., 0.);
io_global.timer_event.data = &io_global;
io_global.connections = (connection_info **)
(calloc(opt_parallel, sizeof(connection_info)));
io_global.url = auth->url;
io_global.container = container;
io_global.backup_name = name;
io_global.token = auth->token;
for (i = 0; i < opt_parallel; i++) {
io_global.connections[i] = conn_new(&io_global, i);
}
/* setup the generic multi interface options we want */
curl_multi_setopt(io_global.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
curl_multi_setopt(io_global.multi, CURLMOPT_SOCKETDATA, &io_global);
#if !(OLD_CURL_MULTI)
curl_multi_setopt(io_global.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
curl_multi_setopt(io_global.multi, CURLMOPT_TIMERDATA, &io_global);
do {
rc = curl_multi_socket_action(io_global.multi,
CURL_SOCKET_TIMEOUT, 0,
&io_global.still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
#else
curl_multi_timeout(io_global.multi, &timeout);
if (timeout >= 0) {
multi_timer_cb(io_global.multi, timeout, &io_global);
}
do {
rc = curl_multi_socket_all(io_global.multi, &io_global.still_running);
} while(rc == CURLM_CALL_MULTI_PERFORM);
#endif
ev_loop(io_global.loop, 0);
check_multi_info(&io_global);
curl_multi_cleanup(io_global.multi);
n_dirty_buffers = 0;
for (i = 0; i < opt_parallel; i++) {
connection_info *conn = io_global.connections[i];
if (conn && conn->upload_size != conn->filled_size) {
fprintf(stderr, "error: upload failed: %lu bytes left "
"in the buffer %s (uploaded = %d)\n",
(ulong)(conn->filled_size - conn->upload_size),
conn->name, conn->chunk_uploaded);
++n_dirty_buffers;
}
}
for (i = 0; i < opt_parallel; i++) {
if (io_global.connections[i] != NULL) {
conn_cleanup(io_global.connections[i]);
}
}
free(io_global.connections);
if (n_dirty_buffers > 0) {
return(EXIT_FAILURE);
}
return 0;
}
struct download_buffer_info {
off_t offset;
size_t size;
size_t result_len;
char *buf;
curl_read_callback custom_header_callback;
void *custom_header_callback_data;
};
/*********************************************************************//**
Callback to parse header of GET request on swift contaier. */
static
size_t fetch_buffer_header_cb(char *ptr, size_t size, size_t nmemb,
void *data)
{
download_buffer_info *buffer_info = (download_buffer_info*)(data);
size_t buf_size;
char content_length_str[100];
char *endptr;
if (get_http_header("Content-Length: ", ptr,
content_length_str, sizeof(content_length_str))) {
buf_size = strtoull(content_length_str, &endptr, 10);
if (buffer_info->buf == NULL) {
buffer_info->buf = (char*)(malloc(buf_size));
buffer_info->size = buf_size;
}
if (buf_size > buffer_info->size) {
buffer_info->buf = (char*)
(realloc(buffer_info->buf, buf_size));
buffer_info->size = buf_size;
}
buffer_info->result_len = buf_size;
}
if (buffer_info->custom_header_callback) {
buffer_info->custom_header_callback(ptr, size, nmemb,
buffer_info->custom_header_callback_data);
}
return nmemb * size;
}
/*********************************************************************//**
Write contents into string buffer */
static
size_t
fetch_buffer_cb(char *buffer, size_t size, size_t nmemb, void *out_buffer)
{
download_buffer_info *buffer_info = (download_buffer_info*)(out_buffer);
assert(buffer_info->size >= buffer_info->offset + size * nmemb);
memcpy(buffer_info->buf + buffer_info->offset, buffer, size * nmemb);
buffer_info->offset += size * nmemb;
return size * nmemb;
}
/*********************************************************************//**
Downloads contents of URL into buffer. Caller is responsible for
deallocating the buffer.
@return pointer to a buffer or NULL */
static
char *
swift_fetch_into_buffer(swift_auth_info *auth, const char *url,
char **buf, size_t *buf_size, size_t *result_len,
curl_read_callback header_callback,
void *header_callback_data)
{
char auth_token[SWIFT_MAX_HDR_SIZE];
download_buffer_info buffer_info;
struct curl_slist *slist = NULL;
long http_code;
CURL *curl;
CURLcode res;
memset(&buffer_info, 0, sizeof(buffer_info));
buffer_info.buf = *buf;
buffer_info.size = *buf_size;
buffer_info.custom_header_callback = header_callback;
buffer_info.custom_header_callback_data = header_callback_data;
snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
auth->token);
curl = curl_easy_init();
if (curl != NULL) {
slist = curl_slist_append(slist, auth_token);
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer_info);
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
fetch_buffer_header_cb);
curl_easy_setopt(curl, CURLOPT_HEADERDATA,
&buffer_info);
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr,
"error: curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code < 200 || http_code >= 300) {
fprintf(stderr, "error: request failed "
"with response code: %ld\n", http_code);
res = CURLE_LOGIN_DENIED;
goto cleanup;
}
} else {
res = CURLE_FAILED_INIT;
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
cleanup:
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
if (res == CURLE_OK) {
*buf = buffer_info.buf;
*buf_size = buffer_info.size;
*result_len = buffer_info.result_len;
return(buffer_info.buf);
}
free(buffer_info.buf);
*buf = NULL;
*buf_size = 0;
*result_len = 0;
return(NULL);
}
static
container_list *
container_list_new()
{
container_list *list =
(container_list *)(calloc(1, sizeof(container_list)));
list->object_count = 1000;
list->objects = (object_info*)
(calloc(list->object_count, sizeof(object_info)));
if (list->objects == NULL) {
fprintf(stderr, "error: out of memory\n");
free(list);
return(NULL);
}
return(list);
}
static
void
container_list_free(container_list *list)
{
free(list->content_json);
free(list->objects);
free(list);
}
static
void
container_list_add_object(container_list *list, const char *name,
const char *hash, size_t bytes)
{
const size_t object_count_step = 1000;
if (list->idx >= list->object_count) {
list->objects = (object_info*)
realloc(list->objects,
(list->object_count + object_count_step) *
sizeof(object_info));
memset(list->objects + list->object_count, 0,
object_count_step * sizeof(object_info));
list->object_count += object_count_step;
}
assert(list->idx <= list->object_count);
strcpy(list->objects[list->idx].name, name);
strcpy(list->objects[list->idx].hash, hash);
list->objects[list->idx].bytes = bytes;
++list->idx;
}
/*********************************************************************//**
Tokenize json string. Return array of tokens. Caller is responsoble for
deallocating the array. */
jsmntok_t *
json_tokenise(char *json, size_t len, int initial_tokens)
{
jsmn_parser parser;
jsmn_init(&parser);
unsigned int n = initial_tokens;
jsmntok_t *tokens = (jsmntok_t *)(malloc(sizeof(jsmntok_t) * n));
int ret = jsmn_parse(&parser, json, len, tokens, n);
while (ret == JSMN_ERROR_NOMEM)
{
n = n * 2 + 1;
tokens = (jsmntok_t*)(realloc(tokens, sizeof(jsmntok_t) * n));
ret = jsmn_parse(&parser, json, len, tokens, n);
}
if (ret == JSMN_ERROR_INVAL) {
fprintf(stderr, "error: invalid JSON string\n");
}
if (ret == JSMN_ERROR_PART) {
fprintf(stderr, "error: truncated JSON string\n");
}
return tokens;
}
/*********************************************************************//**
Return true if token representation equal to given string. */
static
bool
json_token_eq(const char *buf, jsmntok_t *t, const char *s)
{
size_t len = strlen(s);
assert(t->end > t->start);
return((size_t)(t->end - t->start) == len &&
(strncmp(buf + t->start, s, len) == 0));
}
/*********************************************************************//**
Copy given token as string. */
static
bool
json_token_str(const char *buf, jsmntok_t *t, char *out, int out_size)
{
size_t len = min(t->end - t->start, out_size - 1);
memcpy(out, buf + t->start, len);
out[len] = 0;
return(true);
}
/*********************************************************************//**
Parse SWIFT container list response and fill output array with values
sorted by object name. */
static
bool
swift_parse_container_list(container_list *list)
{
enum {MAX_DEPTH=20};
enum label_t {NONE, OBJECT};
char name[SWIFT_MAX_URL_SIZE];
char hash[33];
char bytes[30];
char *response = list->content_json;
struct stack_t {
jsmntok_t *t;
int n_items;
label_t label;
};
stack_t stack[MAX_DEPTH];
jsmntok_t *tokens;
int level;
size_t count = 0;
tokens = json_tokenise(list->content_json, list->content_length, 200);
stack[0].t = &tokens[0];
stack[0].label = NONE;
stack[0].n_items = 1;
level = 0;
for (size_t i = 0, j = 1; j > 0; i++, j--) {
jsmntok_t *t = &tokens[i];
assert(t->start != -1 && t->end != -1);
assert(level >= 0);
--stack[level].n_items;
switch (t->type) {
case JSMN_ARRAY:
case JSMN_OBJECT:
if (level < MAX_DEPTH - 1) {
level++;
}
stack[level].t = t;
stack[level].label = NONE;
if (t->type == JSMN_ARRAY) {
stack[level].n_items = t->size;
j += t->size;
} else {
stack[level].n_items = t->size * 2;
j += t->size * 2;
}
break;
case JSMN_PRIMITIVE:
case JSMN_STRING:
if (stack[level].t->type == JSMN_OBJECT &&
stack[level].n_items % 2 == 1) {
/* key */
if (json_token_eq(response, t, "name")) {
json_token_str(response, &tokens[i + 1],
name, sizeof(name));
}
if (json_token_eq(response, t, "hash")) {
json_token_str(response, &tokens[i + 1],
hash, sizeof(hash));
}
if (json_token_eq(response, t, "bytes")) {
json_token_str(response, &tokens[i + 1],
bytes, sizeof(bytes));
}
}
break;
}
while (stack[level].n_items == 0 && level > 0) {
if (stack[level].t->type == JSMN_OBJECT
&& level == 2) {
char *endptr;
container_list_add_object(list, name, hash,
strtoull(bytes, &endptr, 10));
++count;
}
--level;
}
}
if (count == 0) {
list->final = true;
}
free(tokens);
return(true);
}
/*********************************************************************//**
List swift container with given name. Return list of objects sorted by
object name. */
static
container_list *
swift_list(swift_auth_info *auth, const char *container, const char *path)
{
container_list *list;
char url[SWIFT_MAX_URL_SIZE];
list = container_list_new();
while (!list->final) {
/* download the list in json format */
snprintf(url, array_elements(url),
"%s/%s?format=json&limit=1000%s%s%s%s",
auth->url, container, path ? "&prefix=" : "",
path ? path : "", list->idx > 0 ? "&marker=" : "",
list->idx > 0 ?
list->objects[list->idx - 1].name : "");
list->content_json = swift_fetch_into_buffer(auth, url,
&list->content_json, &list->content_bufsize,
&list->content_length, NULL, NULL);
if (list->content_json == NULL) {
container_list_free(list);
return(NULL);
}
/* parse downloaded list */
if (!swift_parse_container_list(list)) {
fprintf(stderr, "error: unable to parse "
"container list\n");
container_list_free(list);
return(NULL);
}
}
return(list);
}
/*********************************************************************//**
Return true if chunk is a part of backup with given name. */
static
bool
chunk_belongs_to(const char *chunk_name, const char *backup_name)
{
size_t backup_name_len = strlen(backup_name);
return((strlen(chunk_name) > backup_name_len)
&& (chunk_name[backup_name_len] == '/')
&& strncmp(chunk_name, backup_name, backup_name_len) == 0);
}
/*********************************************************************//**
Return true if chunk is in given list. */
static
bool
chunk_in_list(const char *chunk_name, char **list, int list_size)
{
size_t chunk_name_len;
if (list_size == 0) {
return(true);
}
chunk_name_len = strlen(chunk_name);
if (chunk_name_len < 20) {
return(false);
}
for (int i = 0; i < list_size; i++) {
size_t item_len = strlen(list[i]);
if ((strncmp(chunk_name - item_len + chunk_name_len - 21,
list[i], item_len) == 0)
&& (chunk_name[chunk_name_len - 21] == '.')
&& (chunk_name[chunk_name_len - item_len - 22] == '/')) {
return(true);
}
}
return(false);
}
static
int swift_download(swift_auth_info *auth, const char *container,
const char *name)
{
container_list *list;
char *buf = NULL;
size_t buf_size = 0;
size_t result_len = 0;
if ((list = swift_list(auth, container, name)) == NULL) {
return(CURLE_FAILED_INIT);
}
for (size_t i = 0; i < list->idx; i++) {
const char *chunk_name = list->objects[i].name;
if (chunk_belongs_to(chunk_name, name)
&& chunk_in_list(chunk_name, file_list, file_list_size)) {
char url[SWIFT_MAX_URL_SIZE];
snprintf(url, sizeof(url), "%s/%s/%s",
auth->url, container, chunk_name);
if ((buf = swift_fetch_into_buffer(
auth, url, &buf, &buf_size, &result_len,
NULL, NULL)) == NULL) {
fprintf(stderr, "error: failed to download "
"chunk %s\n", chunk_name);
container_list_free(list);
return(CURLE_FAILED_INIT);
}
fwrite(buf, 1, result_len, stdout);
}
}
free(buf);
container_list_free(list);
return(CURLE_OK);
}
/*********************************************************************//**
Delete backup with given name from given container.
@return true if backup deleted successfully */
static
bool swift_delete(swift_auth_info *auth, const char *container,
const char *name)
{
container_list *list;
if ((list = swift_list(auth, container, name)) == NULL) {
return(CURLE_FAILED_INIT);
}
for (size_t i = 0; i < list->object_count; i++) {
const char *chunk_name = list->objects[i].name;
if (chunk_belongs_to(chunk_name, name)) {
char url[SWIFT_MAX_URL_SIZE];
snprintf(url, sizeof(url), "%s/%s/%s",
auth->url, container, chunk_name);
fprintf(stderr, "delete %s\n", chunk_name);
if (!swift_delete_object(auth, url)) {
fprintf(stderr, "error: failed to delete "
"chunk %s\n", chunk_name);
container_list_free(list);
return(CURLE_FAILED_INIT);
}
}
}
container_list_free(list);
return(CURLE_OK);
}
/*********************************************************************//**
Check if backup with given name exists.
@return true if backup exists */
static
bool swift_backup_exists(swift_auth_info *auth, const char *container,
const char *backup_name)
{
container_list *list;
if ((list = swift_list(auth, container, backup_name)) == NULL) {
fprintf(stderr, "error: unable to list container %s\n",
container);
exit(EXIT_FAILURE);
}
for (size_t i = 0; i < list->object_count; i++) {
if (chunk_belongs_to(list->objects[i].name, backup_name)) {
container_list_free(list);
return(true);
}
}
container_list_free(list);
return(false);
}
/*********************************************************************//**
Fills auth_info with response from keystone response.
@return true is response parsed successfully */
static
bool
swift_parse_keystone_response_v2(char *response, size_t response_length,
swift_auth_info *auth_info)
{
enum {MAX_DEPTH=20};
enum label_t {NONE, ACCESS, CATALOG, ENDPOINTS, TOKEN};
char filtered_url[SWIFT_MAX_URL_SIZE];
char public_url[SWIFT_MAX_URL_SIZE];
char region[SWIFT_MAX_URL_SIZE];
char id[SWIFT_MAX_URL_SIZE];
char token_id[SWIFT_MAX_URL_SIZE];
char type[SWIFT_MAX_URL_SIZE];
struct stack_t {
jsmntok_t *t;
int n_items;
label_t label;
};
stack_t stack[MAX_DEPTH];
jsmntok_t *tokens;
int level;
tokens = json_tokenise(response, response_length, 200);
stack[0].t = &tokens[0];
stack[0].label = NONE;
stack[0].n_items = 1;
level = 0;
for (size_t i = 0, j = 1; j > 0; i++, j--) {
jsmntok_t *t = &tokens[i];
assert(t->start != -1 && t->end != -1);
assert(level >= 0);
--stack[level].n_items;
switch (t->type) {
case JSMN_ARRAY:
case JSMN_OBJECT:
if (level < MAX_DEPTH - 1) {
level++;
}
stack[level].t = t;
stack[level].label = NONE;
if (t->type == JSMN_ARRAY) {
stack[level].n_items = t->size;
j += t->size;
} else {
stack[level].n_items = t->size * 2;
j += t->size * 2;
}
break;
case JSMN_PRIMITIVE:
case JSMN_STRING:
if (stack[level].t->type == JSMN_OBJECT &&
stack[level].n_items % 2 == 1) {
/* key */
if (json_token_eq(response, t, "access")) {
stack[level].label = ACCESS;
}
if (json_token_eq(response, t,
"serviceCatalog")) {
stack[level].label = CATALOG;
}
if (json_token_eq(response, t, "endpoints")) {
stack[level].label = ENDPOINTS;
}
if (json_token_eq(response, t, "token")) {
stack[level].label = TOKEN;
}
if (json_token_eq(response, t, "id")) {
json_token_str(response, &tokens[i + 1],
id, sizeof(id));
}
if (json_token_eq(response, t, "id")
&& stack[level - 1].label == TOKEN) {
json_token_str(response, &tokens[i + 1],
token_id, sizeof(token_id));
}
if (json_token_eq(response, t, "region")) {
json_token_str(response, &tokens[i + 1],
region, sizeof(region));
}
if (json_token_eq(response, t, "publicURL")) {
json_token_str(response, &tokens[i + 1],
public_url, sizeof(public_url));
}
if (json_token_eq(response, t, "type")) {
json_token_str(response, &tokens[i + 1],
type, sizeof(type));
}
}
break;
}
while (stack[level].n_items == 0 && level > 0) {
if (stack[level].t->type == JSMN_OBJECT
&& level == 6
&& stack[level - 1].t->type == JSMN_ARRAY
&& stack[level - 2].label == ENDPOINTS) {
if (opt_swift_region == NULL
|| strcmp(opt_swift_region, region) == 0) {
strncpy(filtered_url, public_url,
sizeof(filtered_url));
}
}
if (stack[level].t->type == JSMN_OBJECT &&
level == 4 &&
stack[level - 1].t->type == JSMN_ARRAY &&
stack[level - 2].label == CATALOG) {
if (strcmp(type, "object-store") == 0) {
strncpy(auth_info->url, filtered_url,
sizeof(auth_info->url));
}
}
--level;
}
}
free(tokens);
strncpy(auth_info->token, token_id, sizeof(auth_info->token));
assert(level == 0);
if (*auth_info->token == 0) {
fprintf(stderr, "error: can not receive token from response\n");
return(false);
}
if (*auth_info->url == 0) {
fprintf(stderr, "error: can not get URL from response\n");
return(false);
}
return(true);
}
/*********************************************************************//**
Authenticate against Swift TempAuth. Fills swift_auth_info struct.
Uses creadentials privided as global variables.
@returns true if access is granted and token received. */
static
bool
swift_keystone_auth_v2(const char *auth_url, swift_auth_info *info)
{
char tenant_arg[SWIFT_MAX_URL_SIZE];
char payload[SWIFT_MAX_URL_SIZE];
struct curl_slist *slist = NULL;
download_buffer_info buf_info;
long http_code;
CURLcode res;
CURL *curl;
bool auth_res = false;
memset(&buf_info, 0, sizeof(buf_info));
if (opt_swift_user == NULL) {
fprintf(stderr, "error: both --swift-user is required "
"for keystone authentication.\n");
return(false);
}
if (opt_swift_password == NULL) {
fprintf(stderr, "error: both --swift-password is required "
"for keystone authentication.\n");
return(false);
}
if (opt_swift_tenant != NULL && opt_swift_tenant_id != NULL) {
fprintf(stderr, "error: both --swift-tenant and "
"--swift-tenant-id specified for keystone "
"authentication.\n");
return(false);
}
if (opt_swift_tenant != NULL) {
snprintf(tenant_arg, sizeof(tenant_arg), ",\"%s\":\"%s\"",
"tenantName", opt_swift_tenant);
} else if (opt_swift_tenant_id != NULL) {
snprintf(tenant_arg, sizeof(tenant_arg), ",\"%s\":\"%s\"",
"tenantId", opt_swift_tenant_id);
} else {
*tenant_arg = 0;
}
snprintf(payload, sizeof(payload), "{\"auth\": "
"{\"passwordCredentials\": {\"username\":\"%s\","
"\"password\":\"%s\"}%s}}",
opt_swift_user, opt_swift_password, tenant_arg);
curl = curl_easy_init();
if (curl != NULL) {
slist = curl_slist_append(slist,
"Content-Type: application/json");
slist = curl_slist_append(slist,
"Accept: application/json");
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_POST, 1L);
curl_easy_setopt(curl, CURLOPT_URL, auth_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf_info);
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
fetch_buffer_header_cb);
curl_easy_setopt(curl, CURLOPT_HEADERDATA,
&buf_info);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr,
"error: curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code < 200 || http_code >= 300) {
fprintf(stderr, "error: request failed "
"with response code: %ld\n", http_code);
res = CURLE_LOGIN_DENIED;
goto cleanup;
}
} else {
res = CURLE_FAILED_INIT;
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
if (!swift_parse_keystone_response_v2(buf_info.buf,
buf_info.size, info)) {
goto cleanup;
}
auth_res = true;
cleanup:
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
free(buf_info.buf);
return(auth_res);
}
/*********************************************************************//**
Fills auth_info with response from keystone response.
@return true is response parsed successfully */
static
bool
swift_parse_keystone_response_v3(char *response, size_t response_length,
swift_auth_info *auth_info)
{
enum {MAX_DEPTH=20};
enum label_t {NONE, TOKEN, CATALOG, ENDPOINTS};
char url[SWIFT_MAX_URL_SIZE];
char filtered_url[SWIFT_MAX_URL_SIZE];
char region[SWIFT_MAX_URL_SIZE];
char interface[SWIFT_MAX_URL_SIZE];
char type[SWIFT_MAX_URL_SIZE];
struct stack_t {
jsmntok_t *t;
int n_items;
label_t label;
};
stack_t stack[MAX_DEPTH];
jsmntok_t *tokens;
int level;
tokens = json_tokenise(response, response_length, 200);
stack[0].t = &tokens[0];
stack[0].label = NONE;
stack[0].n_items = 1;
level = 0;
for (size_t i = 0, j = 1; j > 0; i++, j--) {
jsmntok_t *t = &tokens[i];
assert(t->start != -1 && t->end != -1);
assert(level >= 0);
--stack[level].n_items;
switch (t->type) {
case JSMN_ARRAY:
case JSMN_OBJECT:
if (level < MAX_DEPTH - 1) {
level++;
}
stack[level].t = t;
stack[level].label = NONE;
if (t->type == JSMN_ARRAY) {
stack[level].n_items = t->size;
j += t->size;
} else {
stack[level].n_items = t->size * 2;
j += t->size * 2;
}
break;
case JSMN_PRIMITIVE:
case JSMN_STRING:
if (stack[level].t->type == JSMN_OBJECT &&
stack[level].n_items % 2 == 1) {
/* key */
if (json_token_eq(response, t, "token")) {
stack[level].label = TOKEN;
fprintf(stderr, "token\n");
}
if (json_token_eq(response, t,
"catalog")) {
stack[level].label = CATALOG;
fprintf(stderr, "catalog\n");
}
if (json_token_eq(response, t, "endpoints")) {
stack[level].label = ENDPOINTS;
}
if (json_token_eq(response, t, "region")) {
json_token_str(response, &tokens[i + 1],
region, sizeof(region));
}
if (json_token_eq(response, t, "url")) {
json_token_str(response, &tokens[i + 1],
url, sizeof(url));
}
if (json_token_eq(response, t, "interface")) {
json_token_str(response, &tokens[i + 1],
interface, sizeof(interface));
}
if (json_token_eq(response, t, "type")) {
json_token_str(response, &tokens[i + 1],
type, sizeof(type));
}
}
break;
}
while (stack[level].n_items == 0 && level > 0) {
if (stack[level].t->type == JSMN_OBJECT
&& level == 6
&& stack[level - 1].t->type == JSMN_ARRAY
&& stack[level - 2].label == ENDPOINTS) {
if ((opt_swift_region == NULL
|| strcmp(opt_swift_region, region) == 0)
&& strcmp(interface, "public") == 0) {
strncpy(filtered_url, url,
sizeof(filtered_url));
}
}
if (stack[level].t->type == JSMN_OBJECT &&
level == 4 &&
stack[level - 1].t->type == JSMN_ARRAY &&
stack[level - 2].label == CATALOG) {
if (strcmp(type, "object-store") == 0) {
strncpy(auth_info->url, filtered_url,
sizeof(auth_info->url));
}
}
--level;
}
}
free(tokens);
assert(level == 0);
if (*auth_info->url == 0) {
fprintf(stderr, "error: can not get URL from response\n");
return(false);
}
return(true);
}
/*********************************************************************//**
Captures X-Subject-Token header. */
static
size_t keystone_v3_header_cb(char *ptr, size_t size, size_t nmemb, void *data)
{
swift_auth_info *info = (swift_auth_info*)(data);
get_http_header("X-Subject-Token: ", ptr,
info->token, array_elements(info->token));
return nmemb * size;
}
/*********************************************************************//**
Authenticate against Swift TempAuth. Fills swift_auth_info struct.
Uses creadentials privided as global variables.
@returns true if access is granted and token received. */
static
bool
swift_keystone_auth_v3(const char *auth_url, swift_auth_info *info)
{
char scope[SWIFT_MAX_URL_SIZE];
char domain[SWIFT_MAX_URL_SIZE];
char payload[SWIFT_MAX_URL_SIZE];
struct curl_slist *slist = NULL;
download_buffer_info buf_info;
long http_code;
CURLcode res;
CURL *curl;
bool auth_res = false;
memset(&buf_info, 0, sizeof(buf_info));
buf_info.custom_header_callback = keystone_v3_header_cb;
buf_info.custom_header_callback_data = info;
if (opt_swift_user == NULL) {
fprintf(stderr, "error: both --swift-user is required "
"for keystone authentication.\n");
return(false);
}
if (opt_swift_password == NULL) {
fprintf(stderr, "error: both --swift-password is required "
"for keystone authentication.\n");
return(false);
}
if (opt_swift_project_id != NULL && opt_swift_project != NULL) {
fprintf(stderr, "error: both --swift-project and "
"--swift-project-id specified for keystone "
"authentication.\n");
return(false);
}
if (opt_swift_domain_id != NULL && opt_swift_domain != NULL) {
fprintf(stderr, "error: both --swift-domain and "
"--swift-domain-id specified for keystone "
"authentication.\n");
return(false);
}
if (opt_swift_project_id != NULL && opt_swift_domain != NULL) {
fprintf(stderr, "error: both --swift-project-id and "
"--swift-domain specified for keystone "
"authentication.\n");
return(false);
}
if (opt_swift_project_id != NULL && opt_swift_domain_id != NULL) {
fprintf(stderr, "error: both --swift-project-id and "
"--swift-domain-id specified for keystone "
"authentication.\n");
return(false);
}
scope[0] = 0; domain[0] = 0;
if (opt_swift_domain != NULL) {
snprintf(domain, sizeof(domain),
",{\"domain\":{\"name\":\"%s\"}}",
opt_swift_domain);
} else if (opt_swift_domain_id != NULL) {
snprintf(domain, sizeof(domain),
",{\"domain\":{\"id\":\"%s\"}}",
opt_swift_domain_id);
}
if (opt_swift_project_id != NULL) {
snprintf(scope, sizeof(scope),
",\"scope\":{\"project\":{\"id\":\"%s\"}}",
opt_swift_project_id);
} else if (opt_swift_project != NULL) {
snprintf(scope, sizeof(scope),
",\"scope\":{\"project\":{\"name\":\"%s\"%s}}",
opt_swift_project_id, domain);
}
snprintf(payload, sizeof(payload), "{\"auth\":{\"identity\":"
"{\"methods\":[\"password\"],\"password\":{\"user\":"
"{\"name\":\"%s\",\"password\":\"%s\"%s}}}%s}}",
opt_swift_user, opt_swift_password,
*scope ? "" : ",\"domain\":{\"id\":\"default\"}",
scope);
curl = curl_easy_init();
if (curl != NULL) {
slist = curl_slist_append(slist,
"Content-Type: application/json");
slist = curl_slist_append(slist,
"Accept: application/json");
curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
curl_easy_setopt(curl, CURLOPT_POST, 1L);
curl_easy_setopt(curl, CURLOPT_URL, auth_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf_info);
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
fetch_buffer_header_cb);
curl_easy_setopt(curl, CURLOPT_HEADERDATA,
&buf_info);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
if (opt_cacert != NULL)
curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
if (opt_insecure)
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
fprintf(stderr,
"error: curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
goto cleanup;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
if (http_code < 200 || http_code >= 300) {
fprintf(stderr, "error: request failed "
"with response code: %ld\n", http_code);
res = CURLE_LOGIN_DENIED;
goto cleanup;
}
} else {
res = CURLE_FAILED_INIT;
fprintf(stderr, "error: curl_easy_init() failed\n");
goto cleanup;
}
if (!swift_parse_keystone_response_v3(buf_info.buf,
buf_info.size, info)) {
goto cleanup;
}
auth_res = true;
cleanup:
if (slist) {
curl_slist_free_all(slist);
}
if (curl) {
curl_easy_cleanup(curl);
}
free(buf_info.buf);
return(auth_res);
}
int main(int argc, char **argv)
{
swift_auth_info info;
char auth_url[SWIFT_MAX_URL_SIZE];
MY_INIT(argv[0]);
/* handle_options in parse_args is destructive so
* we make a copy of our argument pointers so we can
* mask the sensitive values afterwards */
char **mask_argv = (char **)malloc(sizeof(char *) * (argc - 1));
memcpy(mask_argv, argv + 1, sizeof(char *) * (argc - 1));
if (parse_args(argc, argv)) {
return(EXIT_FAILURE);
}
mask_args(argc, mask_argv); /* mask args on cmdline */
curl_global_init(CURL_GLOBAL_ALL);
if (opt_swift_auth_version == NULL || *opt_swift_auth_version == '1') {
/* TempAuth */
snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sauth/v%s/",
opt_swift_auth_url, opt_swift_auth_version ?
opt_swift_auth_version : "1.0");
if (!swift_temp_auth(auth_url, &info)) {
fprintf(stderr, "error: failed to authenticate\n");
return(EXIT_FAILURE);
}
} else if (*opt_swift_auth_version == '2') {
/* Keystone v2 */
snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sv%s/tokens",
opt_swift_auth_url, opt_swift_auth_version);
if (!swift_keystone_auth_v2(auth_url, &info)) {
fprintf(stderr, "error: failed to authenticate\n");
return(EXIT_FAILURE);
}
} else if (*opt_swift_auth_version == '3') {
/* Keystone v3 */
snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sv%s/auth/tokens",
opt_swift_auth_url, opt_swift_auth_version);
if (!swift_keystone_auth_v3(auth_url, &info)) {
fprintf(stderr, "error: failed to authenticate\n");
exit(EXIT_FAILURE);
}
}
if (opt_swift_storage_url != NULL) {
snprintf(info.url, sizeof(info.url), "%s",
opt_swift_storage_url);
}
fprintf(stderr, "Object store URL: %s\n", info.url);
if (opt_mode == MODE_PUT) {
if (swift_create_container(&info, opt_swift_container) != 0) {
fprintf(stderr, "error: failed to create "
"container %s\n",
opt_swift_container);
return(EXIT_FAILURE);
}
if (swift_backup_exists(&info, opt_swift_container, opt_name)) {
fprintf(stderr, "error: backup named '%s' "
"already exists!\n",
opt_name);
return(EXIT_FAILURE);
}
if (swift_upload_parts(&info, opt_swift_container,
opt_name) != 0) {
fprintf(stderr, "error: upload failed\n");
return(EXIT_FAILURE);
}
} else if (opt_mode == MODE_GET) {
if (swift_download(&info, opt_swift_container, opt_name)
!= CURLE_OK) {
fprintf(stderr, "error: download failed\n");
return(EXIT_FAILURE);
}
} else if (opt_mode == MODE_DELETE) {
if (swift_delete(&info, opt_swift_container, opt_name)
!= CURLE_OK) {
fprintf(stderr, "error: delete failed\n");
return(EXIT_FAILURE);
}
} else {
fprintf(stderr, "Unknown command supplied.\n");
exit(EXIT_FAILURE);
}
curl_global_cleanup();
return(EXIT_SUCCESS);
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
The xbcrypt utility: decrypt files in the XBCRYPT format.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include <my_getopt.h>
#include "common.h"
#include "xbcrypt.h"
#include <gcrypt.h>
#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
GCRY_THREAD_OPTION_PTHREAD_IMPL;
#endif
#define XBCRYPT_VERSION "1.1"
typedef enum {
RUN_MODE_NONE,
RUN_MODE_ENCRYPT,
RUN_MODE_DECRYPT
} run_mode_t;
const char *xbcrypt_encrypt_algo_names[] =
{ "NONE", "AES128", "AES192", "AES256", NullS};
TYPELIB xbcrypt_encrypt_algo_typelib=
{array_elements(xbcrypt_encrypt_algo_names)-1,"",
xbcrypt_encrypt_algo_names, NULL};
static run_mode_t opt_run_mode = RUN_MODE_ENCRYPT;
static char *opt_input_file = NULL;
static char *opt_output_file = NULL;
static ulong opt_encrypt_algo;
static char *opt_encrypt_key_file = NULL;
static void *opt_encrypt_key = NULL;
static ulonglong opt_encrypt_chunk_size = 0;
static my_bool opt_verbose = FALSE;
static uint encrypt_algos[] = { GCRY_CIPHER_NONE,
GCRY_CIPHER_AES128,
GCRY_CIPHER_AES192,
GCRY_CIPHER_AES256 };
static int encrypt_algo = 0;
static int encrypt_mode = GCRY_CIPHER_MODE_CTR;
static uint encrypt_key_len = 0;
static size_t encrypt_iv_len = 0;
static struct my_option my_long_options[] =
{
{"help", '?', "Display this help and exit.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"decrypt", 'd', "Decrypt data input to output.",
0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"input", 'i', "Optional input file. If not specified, input"
" will be read from standard input.",
&opt_input_file, &opt_input_file, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"output", 'o', "Optional output file. If not specified, output"
" will be written to standard output.",
&opt_output_file, &opt_output_file, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-algo", 'a', "Encryption algorithm.",
&opt_encrypt_algo, &opt_encrypt_algo, &xbcrypt_encrypt_algo_typelib,
GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-key", 'k', "Encryption key.",
&opt_encrypt_key, &opt_encrypt_key, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-key-file", 'f', "File which contains encryption key.",
&opt_encrypt_key_file, &opt_encrypt_key_file, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"encrypt-chunk-size", 's', "Size of working buffer for encryption in"
" bytes. The default value is 64K.",
&opt_encrypt_chunk_size, &opt_encrypt_chunk_size, 0,
GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
{"verbose", 'v', "Display verbose status output.",
&opt_verbose, &opt_verbose,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static
int
get_options(int *argc, char ***argv);
static
my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument __attribute__((unused)));
static
void
print_version(void);
static
void
usage(void);
static
int
mode_decrypt(File filein, File fileout);
static
int
mode_encrypt(File filein, File fileout);
int
main(int argc, char **argv)
{
#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
gcry_error_t gcry_error;
#endif
File filein = 0;
File fileout = 0;
MY_INIT(argv[0]);
if (get_options(&argc, &argv)) {
goto err;
}
/* Acording to gcrypt docs (and my testing), setting up the threading
callbacks must be done first, so, lets give it a shot */
#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
if (gcry_error) {
msg("%s: unable to set libgcrypt thread cbs - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return 1;
}
#endif
/* Version check should be the very first call because it
makes sure that important subsystems are intialized. */
if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) {
const char *gcrypt_version;
gcrypt_version = gcry_check_version(NULL);
/* No other library has already initialized libgcrypt. */
if (!gcrypt_version) {
msg("%s: failed to initialize libgcrypt\n",
my_progname);
return 1;
} else if (opt_verbose) {
msg("%s: using gcrypt %s\n", my_progname,
gcrypt_version);
}
}
gcry_control(GCRYCTL_DISABLE_SECMEM, 0);
gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
/* Determine the algorithm */
encrypt_algo = encrypt_algos[opt_encrypt_algo];
/* Set up the iv length */
encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo);
/* Now set up the key */
if (opt_encrypt_key == NULL && opt_encrypt_key_file == NULL) {
msg("%s: no encryption key or key file specified.\n",
my_progname);
return 1;
} else if (opt_encrypt_key && opt_encrypt_key_file) {
msg("%s: both encryption key and key file specified.\n",
my_progname);
return 1;
} else if (opt_encrypt_key_file) {
if (!xb_crypt_read_key_file(opt_encrypt_key_file,
&opt_encrypt_key,
&encrypt_key_len)) {
msg("%s: unable to read encryption key file \"%s\".\n",
opt_encrypt_key_file, my_progname);
return 1;
}
} else {
encrypt_key_len = strlen(opt_encrypt_key);
}
if (opt_input_file) {
MY_STAT mystat;
if (opt_verbose)
msg("%s: input file \"%s\".\n", my_progname,
opt_input_file);
if (my_stat(opt_input_file, &mystat, MYF(MY_WME)) == NULL) {
goto err;
}
if (!MY_S_ISREG(mystat.st_mode)) {
msg("%s: \"%s\" is not a regular file, exiting.\n",
my_progname, opt_input_file);
goto err;
}
if ((filein = my_open(opt_input_file, O_RDONLY, MYF(MY_WME)))
< 0) {
msg("%s: failed to open \"%s\".\n", my_progname,
opt_input_file);
goto err;
}
} else {
if (opt_verbose)
msg("%s: input from standard input.\n", my_progname);
filein = fileno(stdin);
}
if (opt_output_file) {
if (opt_verbose)
msg("%s: output file \"%s\".\n", my_progname,
opt_output_file);
if ((fileout = my_create(opt_output_file, 0,
O_WRONLY|O_BINARY|O_EXCL|O_NOFOLLOW,
MYF(MY_WME))) < 0) {
msg("%s: failed to create output file \"%s\".\n",
my_progname, opt_output_file);
goto err;
}
} else {
if (opt_verbose)
msg("%s: output to standard output.\n", my_progname);
fileout = fileno(stdout);
}
if (opt_run_mode == RUN_MODE_DECRYPT
&& mode_decrypt(filein, fileout)) {
goto err;
} else if (opt_run_mode == RUN_MODE_ENCRYPT
&& mode_encrypt(filein, fileout)) {
goto err;
}
if (opt_input_file && filein) {
my_close(filein, MYF(MY_WME));
}
if (opt_output_file && fileout) {
my_close(fileout, MYF(MY_WME));
}
my_cleanup_options(my_long_options);
my_end(0);
return EXIT_SUCCESS;
err:
if (opt_input_file && filein) {
my_close(filein, MYF(MY_WME));
}
if (opt_output_file && fileout) {
my_close(fileout, MYF(MY_WME));
}
my_cleanup_options(my_long_options);
my_end(0);
exit(EXIT_FAILURE);
}
static
size_t
my_xb_crypt_read_callback(void *userdata, void *buf, size_t len)
{
File* file = (File *) userdata;
return xb_read_full(*file, buf, len);
}
static
int
mode_decrypt(File filein, File fileout)
{
xb_rcrypt_t *xbcrypt_file = NULL;
void *chunkbuf = NULL;
size_t chunksize;
size_t originalsize;
void *ivbuf = NULL;
size_t ivsize;
void *decryptbuf = NULL;
size_t decryptbufsize = 0;
ulonglong ttlchunksread = 0;
ulonglong ttlbytesread = 0;
xb_rcrypt_result_t result;
gcry_cipher_hd_t cipher_handle;
gcry_error_t gcry_error;
my_bool hash_appended;
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error = gcry_cipher_open(&cipher_handle,
encrypt_algo,
encrypt_mode, 0);
if (gcry_error) {
msg("%s:decrypt: unable to open libgcrypt"
" cipher - %s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return 1;
}
gcry_error = gcry_cipher_setkey(cipher_handle,
opt_encrypt_key,
encrypt_key_len);
if (gcry_error) {
msg("%s:decrypt: unable to set libgcrypt cipher"
"key - %s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
goto err;
}
}
/* Initialize the xb_crypt format reader */
xbcrypt_file = xb_crypt_read_open(&filein, my_xb_crypt_read_callback);
if (xbcrypt_file == NULL) {
msg("%s:decrypt: xb_crypt_read_open() failed.\n", my_progname);
goto err;
}
/* Walk the encrypted chunks, decrypting them and writing out */
while ((result = xb_crypt_read_chunk(xbcrypt_file, &chunkbuf,
&originalsize, &chunksize,
&ivbuf, &ivsize, &hash_appended))
== XB_CRYPT_READ_CHUNK) {
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error = gcry_cipher_reset(cipher_handle);
if (gcry_error) {
msg("%s:decrypt: unable to reset libgcrypt"
" cipher - %s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
goto err;
}
if (ivsize) {
gcry_error = gcry_cipher_setctr(cipher_handle,
ivbuf,
ivsize);
}
if (gcry_error) {
msg("%s:decrypt: unable to set cipher iv - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
continue;
}
if (decryptbufsize < originalsize) {
decryptbuf = my_realloc(decryptbuf,
originalsize,
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
decryptbufsize = originalsize;
}
/* Try to decrypt it */
gcry_error = gcry_cipher_decrypt(cipher_handle,
decryptbuf,
originalsize,
chunkbuf,
chunksize);
if (gcry_error) {
msg("%s:decrypt: unable to decrypt chunk - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
gcry_cipher_close(cipher_handle);
goto err;
}
} else {
decryptbuf = chunkbuf;
}
if (hash_appended) {
uchar hash[XB_CRYPT_HASH_LEN];
originalsize -= XB_CRYPT_HASH_LEN;
/* ensure that XB_CRYPT_HASH_LEN is the correct length
of XB_CRYPT_HASH hashing algorithm output */
assert(gcry_md_get_algo_dlen(XB_CRYPT_HASH) ==
XB_CRYPT_HASH_LEN);
gcry_md_hash_buffer(XB_CRYPT_HASH, hash, decryptbuf,
originalsize);
if (memcmp(hash, (char *) decryptbuf + originalsize,
XB_CRYPT_HASH_LEN) != 0) {
msg("%s:%s invalid plaintext hash. "
"Wrong encrytion key specified?\n",
my_progname, __FUNCTION__);
result = XB_CRYPT_READ_ERROR;
goto err;
}
}
/* Write it out */
if (my_write(fileout, (const uchar *) decryptbuf, originalsize,
MYF(MY_WME | MY_NABP))) {
msg("%s:decrypt: unable to write output chunk.\n",
my_progname);
goto err;
}
ttlchunksread++;
ttlbytesread += chunksize;
if (opt_verbose)
msg("%s:decrypt: %llu chunks read, %llu bytes read\n.",
my_progname, ttlchunksread, ttlbytesread);
}
xb_crypt_read_close(xbcrypt_file);
if (encrypt_algo != GCRY_CIPHER_NONE)
gcry_cipher_close(cipher_handle);
if (decryptbuf && decryptbufsize)
my_free(decryptbuf);
if (opt_verbose)
msg("\n%s:decrypt: done\n", my_progname);
return 0;
err:
if (xbcrypt_file)
xb_crypt_read_close(xbcrypt_file);
if (encrypt_algo != GCRY_CIPHER_NONE)
gcry_cipher_close(cipher_handle);
if (decryptbuf && decryptbufsize)
my_free(decryptbuf);
return 1;
}
static
ssize_t
my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len)
{
File* file = (File *) userdata;
ssize_t ret = my_write(*file, buf, len, MYF(MY_WME));
posix_fadvise(*file, 0, 0, POSIX_FADV_DONTNEED);
return ret;
}
static
int
mode_encrypt(File filein, File fileout)
{
size_t bytesread;
size_t chunkbuflen;
uchar *chunkbuf = NULL;
void *ivbuf = NULL;
size_t encryptbuflen = 0;
size_t encryptedlen = 0;
void *encryptbuf = NULL;
ulonglong ttlchunkswritten = 0;
ulonglong ttlbyteswritten = 0;
xb_wcrypt_t *xbcrypt_file = NULL;
gcry_cipher_hd_t cipher_handle;
gcry_error_t gcry_error;
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error = gcry_cipher_open(&cipher_handle,
encrypt_algo,
encrypt_mode, 0);
if (gcry_error) {
msg("%s:encrypt: unable to open libgcrypt cipher - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
return 1;
}
gcry_error = gcry_cipher_setkey(cipher_handle,
opt_encrypt_key,
encrypt_key_len);
if (gcry_error) {
msg("%s:encrypt: unable to set libgcrypt cipher key - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
goto err;
}
}
posix_fadvise(filein, 0, 0, POSIX_FADV_SEQUENTIAL);
xbcrypt_file = xb_crypt_write_open(&fileout,
my_xb_crypt_write_callback);
if (xbcrypt_file == NULL) {
msg("%s:encrypt: xb_crypt_write_open() failed.\n",
my_progname);
goto err;
}
ivbuf = my_malloc(encrypt_iv_len, MYF(MY_FAE));
/* now read in data in chunk size, encrypt and write out */
chunkbuflen = opt_encrypt_chunk_size + XB_CRYPT_HASH_LEN;
chunkbuf = (uchar *) my_malloc(chunkbuflen, MYF(MY_FAE));
while ((bytesread = my_read(filein, chunkbuf, opt_encrypt_chunk_size,
MYF(MY_WME))) > 0) {
size_t origbuflen = bytesread + XB_CRYPT_HASH_LEN;
/* ensure that XB_CRYPT_HASH_LEN is the correct length
of XB_CRYPT_HASH hashing algorithm output */
assert(XB_CRYPT_HASH_LEN ==
gcry_md_get_algo_dlen(XB_CRYPT_HASH));
gcry_md_hash_buffer(XB_CRYPT_HASH, chunkbuf + bytesread,
chunkbuf, bytesread);
if (encrypt_algo != GCRY_CIPHER_NONE) {
gcry_error = gcry_cipher_reset(cipher_handle);
if (gcry_error) {
msg("%s:encrypt: unable to reset cipher - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
goto err;
}
xb_crypt_create_iv(ivbuf, encrypt_iv_len);
gcry_error = gcry_cipher_setctr(cipher_handle,
ivbuf,
encrypt_iv_len);
if (gcry_error) {
msg("%s:encrypt: unable to set cipher iv - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
continue;
}
if (encryptbuflen < origbuflen) {
encryptbuf = my_realloc(encryptbuf, origbuflen,
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
encryptbuflen = origbuflen;
}
gcry_error = gcry_cipher_encrypt(cipher_handle,
encryptbuf,
encryptbuflen,
chunkbuf,
origbuflen);
encryptedlen = origbuflen;
if (gcry_error) {
msg("%s:encrypt: unable to encrypt chunk - "
"%s : %s\n", my_progname,
gcry_strsource(gcry_error),
gcry_strerror(gcry_error));
gcry_cipher_close(cipher_handle);
goto err;
}
} else {
encryptedlen = origbuflen;
encryptbuf = chunkbuf;
}
if (xb_crypt_write_chunk(xbcrypt_file, encryptbuf,
bytesread + XB_CRYPT_HASH_LEN,
encryptedlen, ivbuf, encrypt_iv_len)) {
msg("%s:encrypt: abcrypt_write_chunk() failed.\n",
my_progname);
goto err;
}
ttlchunkswritten++;
ttlbyteswritten += encryptedlen;
if (opt_verbose)
msg("%s:encrypt: %llu chunks written, %llu bytes "
"written\n.", my_progname, ttlchunkswritten,
ttlbyteswritten);
}
my_free(ivbuf);
my_free(chunkbuf);
if (encryptbuf && encryptbuflen)
my_free(encryptbuf);
xb_crypt_write_close(xbcrypt_file);
if (encrypt_algo != GCRY_CIPHER_NONE)
gcry_cipher_close(cipher_handle);
if (opt_verbose)
msg("\n%s:encrypt: done\n", my_progname);
return 0;
err:
if (chunkbuf)
my_free(chunkbuf);
if (encryptbuf && encryptbuflen)
my_free(encryptbuf);
if (xbcrypt_file)
xb_crypt_write_close(xbcrypt_file);
if (encrypt_algo != GCRY_CIPHER_NONE)
gcry_cipher_close(cipher_handle);
return 1;
}
static
int
get_options(int *argc, char ***argv)
{
int ho_error;
if ((ho_error= handle_options(argc, argv, my_long_options,
get_one_option))) {
exit(EXIT_FAILURE);
}
return 0;
}
static
my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument __attribute__((unused)))
{
switch (optid) {
case 'd':
opt_run_mode = RUN_MODE_DECRYPT;
break;
case '?':
usage();
exit(0);
}
return FALSE;
}
static
void
print_version(void)
{
printf("%s Ver %s for %s (%s)\n", my_progname, XBCRYPT_VERSION,
SYSTEM_TYPE, MACHINE_TYPE);
}
static
void
usage(void)
{
print_version();
puts("Copyright (C) 2011 Percona Inc.");
puts("This software comes with ABSOLUTELY NO WARRANTY. "
"This is free software,\nand you are welcome to modify and "
"redistribute it under the GPL license.\n");
puts("Encrypt or decrypt files in the XBCRYPT format.\n");
puts("Usage: ");
printf(" %s [OPTIONS...]"
" # read data from specified input, encrypting or decrypting "
" and writing the result to the specified output.\n",
my_progname);
puts("\nOptions:");
my_print_help(my_long_options);
}
/******************************************************
Copyright (c) 2011 Percona LLC and/or its affiliates.
Encryption interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XBCRYPT_H
#define XBCRYPT_H
#include <my_base.h>
#include "common.h"
#define XB_CRYPT_CHUNK_MAGIC1 "XBCRYP01"
#define XB_CRYPT_CHUNK_MAGIC2 "XBCRYP02"
#define XB_CRYPT_CHUNK_MAGIC3 "XBCRYP03" /* must be same size as ^^ */
#define XB_CRYPT_CHUNK_MAGIC_CURRENT XB_CRYPT_CHUNK_MAGIC3
#define XB_CRYPT_CHUNK_MAGIC_SIZE (sizeof(XB_CRYPT_CHUNK_MAGIC1)-1)
#define XB_CRYPT_HASH GCRY_MD_SHA256
#define XB_CRYPT_HASH_LEN 32
/******************************************************************************
Write interface */
typedef struct xb_wcrypt_struct xb_wcrypt_t;
/* Callback on write for i/o, must return # of bytes written or -1 on error */
typedef ssize_t xb_crypt_write_callback(void *userdata,
const void *buf, size_t len);
xb_wcrypt_t *xb_crypt_write_open(void *userdata,
xb_crypt_write_callback *onwrite);
/* Takes buffer, original length, encrypted length iv and iv length, formats
output buffer and calls write callback.
Returns 0 on success, 1 on error */
int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen,
size_t elen, const void *iv, size_t ivlen);
/* Returns 0 on success, 1 on error */
int xb_crypt_write_close(xb_wcrypt_t *crypt);
/******************************************************************************
Read interface */
typedef struct xb_rcrypt_struct xb_rcrypt_t;
/* Callback on read for i/o, must return # of bytes read or -1 on error */
typedef size_t xb_crypt_read_callback(void *userdata, void *buf, size_t len);
xb_rcrypt_t *xb_crypt_read_open(void *userdata,
xb_crypt_read_callback *onread);
typedef enum {
XB_CRYPT_READ_CHUNK,
XB_CRYPT_READ_EOF,
XB_CRYPT_READ_ERROR
} xb_rcrypt_result_t;
xb_rcrypt_result_t xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf,
size_t *olen, size_t *elen, void **iv,
size_t *ivlen, my_bool *hash_appended);
int xb_crypt_read_close(xb_rcrypt_t *crypt);
/******************************************************************************
Utility interface */
my_bool xb_crypt_read_key_file(const char *filename,
void** key, uint *keylength);
void xb_crypt_create_iv(void* ivbuf, size_t ivlen);
#endif
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Encryption configuration file interface for XtraBackup.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <my_base.h>
#include "common.h"
#include "xbcrypt.h"
#if GCC_VERSION >= 4002
/* Workaround to avoid "gcry_ac_* is deprecated" warnings in gcrypt.h */
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <gcrypt.h>
#if GCC_VERSION >= 4002
# pragma GCC diagnostic warning "-Wdeprecated-declarations"
#endif
my_bool
xb_crypt_read_key_file(const char *filename, void** key, uint *keylength)
{
FILE *fp;
if (!(fp = my_fopen(filename, O_RDONLY, MYF(0)))) {
msg("%s:%s: unable to open config file \"%s\", errno(%d)\n",
my_progname, __FUNCTION__, filename, my_errno);
return FALSE;
}
fseek(fp, 0 , SEEK_END);
*keylength = ftell(fp);
rewind(fp);
*key = my_malloc(*keylength, MYF(MY_FAE));
*keylength = fread(*key, 1, *keylength, fp);
my_fclose(fp, MYF(0));
return TRUE;
}
void
xb_crypt_create_iv(void* ivbuf, size_t ivlen)
{
gcry_create_nonce(ivbuf, ivlen);
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
The xbcrypt format reader implementation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include "xbcrypt.h"
struct xb_rcrypt_struct {
void *userdata;
xb_crypt_read_callback *read;
void *buffer;
size_t bufsize;
void *ivbuffer;
size_t ivbufsize;
ulonglong offset;
};
xb_rcrypt_t *
xb_crypt_read_open(void *userdata, xb_crypt_read_callback *onread)
{
xb_rcrypt_t *crypt;
xb_ad(onread);
crypt = (xb_rcrypt_t *) my_malloc(sizeof(xb_rcrypt_t), MYF(MY_FAE));
crypt->userdata = userdata;
crypt->read = onread;
crypt->buffer = NULL;
crypt->bufsize = 0;
crypt->offset = 0;
crypt->ivbuffer = NULL;
crypt->ivbufsize = 0;
return crypt;
}
xb_rcrypt_result_t
xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf, size_t *olen, size_t *elen,
void **iv, size_t *ivlen, my_bool *hash_appended)
{
uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4];
uchar *ptr;
ulonglong tmp;
ulong checksum, checksum_exp, version;
size_t bytesread;
xb_rcrypt_result_t result = XB_CRYPT_READ_CHUNK;
if ((bytesread = crypt->read(crypt->userdata, tmpbuf, sizeof(tmpbuf)))
!= sizeof(tmpbuf)) {
if (bytesread == 0) {
result = XB_CRYPT_READ_EOF;
goto err;
} else {
msg("%s:%s: unable to read chunk header data at "
"offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
}
ptr = tmpbuf;
if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC3,
XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
version = 3;
} else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC2,
XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
version = 2;
} else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC1,
XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
version = 1;
} else {
msg("%s:%s: wrong chunk magic at offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
ptr += XB_CRYPT_CHUNK_MAGIC_SIZE;
crypt->offset += XB_CRYPT_CHUNK_MAGIC_SIZE;
tmp = uint8korr(ptr); /* reserved */
ptr += 8;
crypt->offset += 8;
tmp = uint8korr(ptr); /* original size */
ptr += 8;
if (tmp > INT_MAX) {
msg("%s:%s: invalid original size at offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->offset += 8;
*olen = (size_t)tmp;
tmp = uint8korr(ptr); /* encrypted size */
ptr += 8;
if (tmp > INT_MAX) {
msg("%s:%s: invalid encrypted size at offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->offset += 8;
*elen = (size_t)tmp;
checksum_exp = uint4korr(ptr); /* checksum */
ptr += 4;
crypt->offset += 4;
/* iv size */
if (version == 1) {
*ivlen = 0;
*iv = 0;
} else {
if ((bytesread = crypt->read(crypt->userdata, tmpbuf, 8))
!= 8) {
if (bytesread == 0) {
result = XB_CRYPT_READ_EOF;
goto err;
} else {
msg("%s:%s: unable to read chunk iv size at "
"offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
}
tmp = uint8korr(tmpbuf);
if (tmp > INT_MAX) {
msg("%s:%s: invalid iv size at offset 0x%llx.\n",
my_progname, __FUNCTION__, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->offset += 8;
*ivlen = (size_t)tmp;
}
if (*ivlen > crypt->ivbufsize) {
crypt->ivbuffer = my_realloc(crypt->ivbuffer, *ivlen,
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
if (crypt->ivbuffer == NULL) {
msg("%s:%s: failed to increase iv buffer to "
"%llu bytes.\n", my_progname, __FUNCTION__,
(ulonglong)*ivlen);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->ivbufsize = *ivlen;
}
if (*ivlen > 0) {
if (crypt->read(crypt->userdata, crypt->ivbuffer, *ivlen)
!= *ivlen) {
msg("%s:%s: failed to read %lld bytes for chunk iv "
"at offset 0x%llx.\n", my_progname, __FUNCTION__,
(ulonglong)*ivlen, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
*iv = crypt->ivbuffer;
}
/* for version euqals 2 we need to read in the iv data but do not init
CTR with it */
if (version == 2) {
*ivlen = 0;
*iv = 0;
}
if (*olen > crypt->bufsize) {
crypt->buffer = my_realloc(crypt->buffer, *olen,
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
if (crypt->buffer == NULL) {
msg("%s:%s: failed to increase buffer to "
"%llu bytes.\n", my_progname, __FUNCTION__,
(ulonglong)*olen);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->bufsize = *olen;
}
if (*elen > 0) {
if (crypt->read(crypt->userdata, crypt->buffer, *elen)
!= *elen) {
msg("%s:%s: failed to read %lld bytes for chunk payload "
"at offset 0x%llx.\n", my_progname, __FUNCTION__,
(ulonglong)*elen, crypt->offset);
result = XB_CRYPT_READ_ERROR;
goto err;
}
}
checksum = crc32(0, crypt->buffer, *elen);
if (checksum != checksum_exp) {
msg("%s:%s invalid checksum at offset 0x%llx, "
"expected 0x%lx, actual 0x%lx.\n", my_progname, __FUNCTION__,
crypt->offset, checksum_exp, checksum);
result = XB_CRYPT_READ_ERROR;
goto err;
}
crypt->offset += *elen;
*buf = crypt->buffer;
*hash_appended = version > 2;
goto exit;
err:
*buf = NULL;
*olen = 0;
*elen = 0;
*ivlen = 0;
*iv = 0;
exit:
return result;
}
int xb_crypt_read_close(xb_rcrypt_t *crypt)
{
if (crypt->buffer)
my_free(crypt->buffer);
if (crypt->ivbuffer)
my_free(crypt->ivbuffer);
my_free(crypt);
return 0;
}
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
The xbcrypt format writer implementation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include "xbcrypt.h"
struct xb_wcrypt_struct {
void *userdata;
xb_crypt_write_callback *write;
};
xb_wcrypt_t *
xb_crypt_write_open(void *userdata, xb_crypt_write_callback *onwrite)
{
xb_wcrypt_t *crypt;
xb_ad(onwrite);
crypt = (xb_wcrypt_t *) my_malloc(sizeof(xb_wcrypt_t), MYF(MY_FAE));
crypt->userdata = userdata;
crypt->write = onwrite;
return crypt;
}
int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen,
size_t elen, const void *iv, size_t ivlen)
{
uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4 + 8];
uchar *ptr;
ulong checksum;
xb_ad(olen <= INT_MAX);
if (olen > INT_MAX)
return 0;
xb_ad(elen <= INT_MAX);
if (elen > INT_MAX)
return 0;
xb_ad(ivlen <= INT_MAX);
if (ivlen > INT_MAX)
return 0;
ptr = tmpbuf;
memcpy(ptr, XB_CRYPT_CHUNK_MAGIC_CURRENT, XB_CRYPT_CHUNK_MAGIC_SIZE);
ptr += XB_CRYPT_CHUNK_MAGIC_SIZE;
int8store(ptr, (ulonglong)0); /* reserved */
ptr += 8;
int8store(ptr, (ulonglong)olen); /* original size */
ptr += 8;
int8store(ptr, (ulonglong)elen); /* encrypted (actual) size */
ptr += 8;
checksum = crc32(0, buf, elen);
int4store(ptr, checksum); /* checksum */
ptr += 4;
int8store(ptr, (ulonglong)ivlen); /* iv size */
ptr += 8;
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
if (crypt->write(crypt->userdata, tmpbuf, ptr-tmpbuf) == -1)
return 1;
if (crypt->write(crypt->userdata, iv, ivlen) == -1)
return 1;
if (crypt->write(crypt->userdata, buf, elen) == -1)
return 1;
return 0;
}
int xb_crypt_write_close(xb_wcrypt_t *crypt)
{
my_free(crypt);
return 0;
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
The xbstream utility: serialize/deserialize files in the XBSTREAM format.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <my_getopt.h>
#include <hash.h>
#include "common.h"
#include "xbstream.h"
#include "ds_local.h"
#include "ds_stdout.h"
#define XBSTREAM_VERSION "1.0"
#define XBSTREAM_BUFFER_SIZE (10 * 1024 * 1024UL)
#define START_FILE_HASH_SIZE 16
typedef enum {
RUN_MODE_NONE,
RUN_MODE_CREATE,
RUN_MODE_EXTRACT
} run_mode_t;
/* Need the following definitions to avoid linking with ds_*.o and their link
dependencies */
datasink_t datasink_archive;
datasink_t datasink_xbstream;
datasink_t datasink_compress;
datasink_t datasink_tmpfile;
datasink_t datasink_encrypt;
datasink_t datasink_buffer;
static run_mode_t opt_mode;
static char * opt_directory = NULL;
static my_bool opt_verbose = 0;
static struct my_option my_long_options[] =
{
{"help", '?', "Display this help and exit.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"create", 'c', "Stream the specified files to the standard output.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"extract", 'x', "Extract to disk files from the stream on the "
"standard input.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"directory", 'C', "Change the current directory to the specified one "
"before streaming or extracting.", &opt_directory, &opt_directory, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"verbose", 'v', "Print verbose output.", &opt_verbose, &opt_verbose,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
typedef struct {
char *path;
uint pathlen;
my_off_t offset;
ds_ctxt_t *ds_ctxt;
ds_file_t *file;
} file_entry_t;
static int get_options(int *argc, char ***argv);
static int mode_create(int argc, char **argv);
static int mode_extract(int argc, char **argv);
static my_bool get_one_option(int optid, const struct my_option *opt,
char *argument);
int
main(int argc, char **argv)
{
MY_INIT(argv[0]);
if (get_options(&argc, &argv)) {
goto err;
}
if (opt_mode == RUN_MODE_NONE) {
msg("%s: either -c or -x must be specified.\n", my_progname);
goto err;
}
/* Change the current directory if -C is specified */
if (opt_directory && my_setwd(opt_directory, MYF(MY_WME))) {
goto err;
}
if (opt_mode == RUN_MODE_CREATE && mode_create(argc, argv)) {
goto err;
} else if (opt_mode == RUN_MODE_EXTRACT && mode_extract(argc, argv)) {
goto err;
}
my_cleanup_options(my_long_options);
my_end(0);
return EXIT_SUCCESS;
err:
my_cleanup_options(my_long_options);
my_end(0);
exit(EXIT_FAILURE);
}
static
int
get_options(int *argc, char ***argv)
{
int ho_error;
if ((ho_error= handle_options(argc, argv, my_long_options,
get_one_option))) {
exit(EXIT_FAILURE);
}
return 0;
}
static
void
print_version(void)
{
printf("%s Ver %s for %s (%s)\n", my_progname, XBSTREAM_VERSION,
SYSTEM_TYPE, MACHINE_TYPE);
}
static
void
usage(void)
{
print_version();
puts("Copyright (C) 2011-2013 Percona LLC and/or its affiliates.");
puts("This software comes with ABSOLUTELY NO WARRANTY. "
"This is free software,\nand you are welcome to modify and "
"redistribute it under the GPL license.\n");
puts("Serialize/deserialize files in the XBSTREAM format.\n");
puts("Usage: ");
printf(" %s -c [OPTIONS...] FILES... # stream specified files to "
"standard output.\n", my_progname);
printf(" %s -x [OPTIONS...] # extract files from the stream"
"on the standard input.\n", my_progname);
puts("\nOptions:");
my_print_help(my_long_options);
}
static
int
set_run_mode(run_mode_t mode)
{
if (opt_mode != RUN_MODE_NONE) {
msg("%s: can't set specify both -c and -x.\n", my_progname);
return 1;
}
opt_mode = mode;
return 0;
}
static
my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument __attribute__((unused)))
{
switch (optid) {
case 'c':
if (set_run_mode(RUN_MODE_CREATE)) {
return TRUE;
}
break;
case 'x':
if (set_run_mode(RUN_MODE_EXTRACT)) {
return TRUE;
}
break;
case '?':
usage();
exit(0);
}
return FALSE;
}
static
int
stream_one_file(File file, xb_wstream_file_t *xbfile)
{
uchar *buf;
size_t bytes;
size_t offset;
posix_fadvise(file, 0, 0, POSIX_FADV_SEQUENTIAL);
offset = my_tell(file, MYF(MY_WME));
buf = (uchar*)(my_malloc(XBSTREAM_BUFFER_SIZE, MYF(MY_FAE)));
while ((bytes = my_read(file, buf, XBSTREAM_BUFFER_SIZE,
MYF(MY_WME))) > 0) {
if (xb_stream_write_data(xbfile, buf, bytes)) {
msg("%s: xb_stream_write_data() failed.\n",
my_progname);
my_free(buf);
return 1;
}
posix_fadvise(file, offset, XBSTREAM_BUFFER_SIZE,
POSIX_FADV_DONTNEED);
offset += XBSTREAM_BUFFER_SIZE;
}
my_free(buf);
if (bytes == (size_t) -1) {
return 1;
}
return 0;
}
static
int
mode_create(int argc, char **argv)
{
int i;
MY_STAT mystat;
xb_wstream_t *stream;
if (argc < 1) {
msg("%s: no files are specified.\n", my_progname);
return 1;
}
stream = xb_stream_write_new();
if (stream == NULL) {
msg("%s: xb_stream_write_new() failed.\n", my_progname);
return 1;
}
for (i = 0; i < argc; i++) {
char *filepath = argv[i];
File src_file;
xb_wstream_file_t *file;
if (my_stat(filepath, &mystat, MYF(MY_WME)) == NULL) {
goto err;
}
if (!MY_S_ISREG(mystat.st_mode)) {
msg("%s: %s is not a regular file, exiting.\n",
my_progname, filepath);
goto err;
}
if ((src_file = my_open(filepath, O_RDONLY, MYF(MY_WME))) < 0) {
msg("%s: failed to open %s.\n", my_progname, filepath);
goto err;
}
file = xb_stream_write_open(stream, filepath, &mystat, NULL, NULL);
if (file == NULL) {
goto err;
}
if (opt_verbose) {
msg("%s\n", filepath);
}
if (stream_one_file(src_file, file) ||
xb_stream_write_close(file) ||
my_close(src_file, MYF(MY_WME))) {
goto err;
}
}
xb_stream_write_done(stream);
return 0;
err:
xb_stream_write_done(stream);
return 1;
}
static
file_entry_t *
file_entry_new(ds_ctxt_t *ds_ctxt, const char *path, uint pathlen)
{
file_entry_t *entry;
ds_file_t *file;
entry = (file_entry_t *) my_malloc(sizeof(file_entry_t),
MYF(MY_WME | MY_ZEROFILL));
if (entry == NULL) {
return NULL;
}
entry->path = my_strndup(path, pathlen, MYF(MY_WME));
if (entry->path == NULL) {
goto err;
}
entry->pathlen = pathlen;
file = ds_open(ds_ctxt, path, NULL);
if (file == NULL) {
msg("%s: failed to create file.\n", my_progname);
goto err;
}
if (opt_verbose) {
msg("%s\n", entry->path);
}
entry->file = file;
entry->ds_ctxt = ds_ctxt;
return entry;
err:
if (entry->path != NULL) {
my_free(entry->path);
}
my_free(entry);
return NULL;
}
static
uchar *
get_file_entry_key(file_entry_t *entry, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length = entry->pathlen;
return (uchar *) entry->path;
}
static
void
file_entry_free(file_entry_t *entry)
{
ds_close(entry->file);
my_free(entry->path);
my_free(entry);
}
static
int
mode_extract(int argc __attribute__((unused)),
char **argv __attribute__((unused)))
{
xb_rstream_t *stream;
xb_rstream_result_t res;
xb_rstream_chunk_t chunk;
HASH filehash;
file_entry_t *entry;
ds_ctxt_t *ds_ctxt;
stream = xb_stream_read_new();
if (stream == NULL) {
msg("%s: xb_stream_read_new() failed.\n", my_progname);
return 1;
}
/* If --directory is specified, it is already set as CWD by now. */
ds_ctxt = ds_create(".", DS_TYPE_LOCAL);
if (my_hash_init(&filehash, &my_charset_bin, START_FILE_HASH_SIZE,
0, 0, (my_hash_get_key) get_file_entry_key,
(my_hash_free_key) file_entry_free, MYF(0))) {
msg("%s: failed to initialize file hash.\n", my_progname);
goto err;
}
while ((res = xb_stream_read_chunk(stream, &chunk)) ==
XB_STREAM_READ_CHUNK) {
/* If unknown type and ignorable flag is set, skip this chunk */
if (chunk.type == XB_CHUNK_TYPE_UNKNOWN && \
!(chunk.flags & XB_STREAM_FLAG_IGNORABLE)) {
continue;
}
/* See if we already have this file open */
entry = (file_entry_t *) my_hash_search(&filehash,
(uchar *) chunk.path,
chunk.pathlen);
if (entry == NULL) {
entry = file_entry_new(ds_ctxt, chunk.path,
chunk.pathlen);
if (entry == NULL) {
goto err;
}
if (my_hash_insert(&filehash, (uchar *) entry)) {
msg("%s: my_hash_insert() failed.\n",
my_progname);
goto err;
}
}
if (chunk.type == XB_CHUNK_TYPE_EOF) {
my_hash_delete(&filehash, (uchar *) entry);
continue;
}
if (entry->offset != chunk.offset) {
msg("%s: out-of-order chunk: real offset = 0x%llx, "
"expected offset = 0x%llx\n", my_progname,
chunk.offset, entry->offset);
goto err;
}
if (ds_write(entry->file, chunk.data, chunk.length)) {
msg("%s: my_write() failed.\n", my_progname);
goto err;
}
entry->offset += chunk.length;
};
if (res == XB_STREAM_READ_ERROR) {
goto err;
}
my_hash_free(&filehash);
ds_destroy(ds_ctxt);
xb_stream_read_done(stream);
return 0;
err:
my_hash_free(&filehash);
ds_destroy(ds_ctxt);
xb_stream_read_done(stream);
return 1;
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
The xbstream format interface.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XBSTREAM_H
#define XBSTREAM_H
#include <my_base.h>
/* Magic value in a chunk header */
#define XB_STREAM_CHUNK_MAGIC "XBSTCK01"
/* Chunk flags */
/* Chunk can be ignored if unknown version/format */
#define XB_STREAM_FLAG_IGNORABLE 0x01
/* Magic + flags + type + path len */
#define CHUNK_HEADER_CONSTANT_LEN ((sizeof(XB_STREAM_CHUNK_MAGIC) - 1) + \
1 + 1 + 4)
#define CHUNK_TYPE_OFFSET (sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1)
#define PATH_LENGTH_OFFSET (sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1)
typedef struct xb_wstream_struct xb_wstream_t;
typedef struct xb_wstream_file_struct xb_wstream_file_t;
typedef enum {
XB_STREAM_FMT_NONE,
XB_STREAM_FMT_TAR,
XB_STREAM_FMT_XBSTREAM
} xb_stream_fmt_t;
/************************************************************************
Write interface. */
typedef ssize_t xb_stream_write_callback(xb_wstream_file_t *file,
void *userdata,
const void *buf, size_t len);
xb_wstream_t *xb_stream_write_new(void);
xb_wstream_file_t *xb_stream_write_open(xb_wstream_t *stream, const char *path,
MY_STAT *mystat, void *userdata,
xb_stream_write_callback *onwrite);
int xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len);
int xb_stream_write_close(xb_wstream_file_t *file);
int xb_stream_write_done(xb_wstream_t *stream);
/************************************************************************
Read interface. */
typedef enum {
XB_STREAM_READ_CHUNK,
XB_STREAM_READ_EOF,
XB_STREAM_READ_ERROR
} xb_rstream_result_t;
typedef enum {
XB_CHUNK_TYPE_UNKNOWN = '\0',
XB_CHUNK_TYPE_PAYLOAD = 'P',
XB_CHUNK_TYPE_EOF = 'E'
} xb_chunk_type_t;
typedef struct xb_rstream_struct xb_rstream_t;
typedef struct {
uchar flags;
xb_chunk_type_t type;
uint pathlen;
char path[FN_REFLEN];
size_t length;
my_off_t offset;
void *data;
ulong checksum;
} xb_rstream_chunk_t;
xb_rstream_t *xb_stream_read_new(void);
xb_rstream_result_t xb_stream_read_chunk(xb_rstream_t *stream,
xb_rstream_chunk_t *chunk);
int xb_stream_read_done(xb_rstream_t *stream);
#endif
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
The xbstream format reader implementation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <zlib.h>
#include "common.h"
#include "xbstream.h"
/* Allocate 1 MB for the payload buffer initially */
#define INIT_BUFFER_LEN (1024 * 1024)
#ifndef MY_OFF_T_MAX
#define MY_OFF_T_MAX (~(my_off_t)0UL)
#endif
struct xb_rstream_struct {
my_off_t offset;
File fd;
void *buffer;
size_t buflen;
};
xb_rstream_t *
xb_stream_read_new(void)
{
xb_rstream_t *stream;
stream = (xb_rstream_t *) my_malloc(sizeof(xb_rstream_t), MYF(MY_FAE));
stream->buffer = my_malloc(INIT_BUFFER_LEN, MYF(MY_FAE));
stream->buflen = INIT_BUFFER_LEN;
stream->fd = fileno(stdin);
stream->offset = 0;
#ifdef __WIN__
setmode(stream->fd, _O_BINARY);
#endif
return stream;
}
static inline
xb_chunk_type_t
validate_chunk_type(uchar code)
{
switch ((xb_chunk_type_t) code) {
case XB_CHUNK_TYPE_PAYLOAD:
case XB_CHUNK_TYPE_EOF:
return (xb_chunk_type_t) code;
default:
return XB_CHUNK_TYPE_UNKNOWN;
}
}
#define F_READ(buf,len) \
do { \
if (xb_read_full(fd, buf, len) < len) { \
msg("xb_stream_read_chunk(): my_read() failed.\n"); \
goto err; \
} \
} while (0)
xb_rstream_result_t
xb_stream_read_chunk(xb_rstream_t *stream, xb_rstream_chunk_t *chunk)
{
uchar tmpbuf[16];
uchar *ptr = tmpbuf;
uint pathlen;
size_t tbytes;
ulonglong ullval;
ulong checksum_exp;
ulong checksum;
File fd = stream->fd;
xb_ad(sizeof(tmpbuf) >= CHUNK_HEADER_CONSTANT_LEN);
/* This is the only place where we expect EOF, so read with
xb_read_full() rather than F_READ() */
tbytes = xb_read_full(fd, ptr, CHUNK_HEADER_CONSTANT_LEN);
if (tbytes == 0) {
return XB_STREAM_READ_EOF;
} else if (tbytes < CHUNK_HEADER_CONSTANT_LEN) {
msg("xb_stream_read_chunk(): unexpected end of stream at "
"offset 0x%llx.\n", stream->offset);
goto err;
}
ptr = tmpbuf;
/* Chunk magic value */
if (memcmp(tmpbuf, XB_STREAM_CHUNK_MAGIC, 8)) {
msg("xb_stream_read_chunk(): wrong chunk magic at offset "
"0x%llx.\n", (ulonglong) stream->offset);
goto err;
}
ptr += 8;
stream->offset += 8;
/* Chunk flags */
chunk->flags = *ptr++;
stream->offset++;
/* Chunk type, ignore unknown ones if ignorable flag is set */
chunk->type = validate_chunk_type(*ptr);
if (chunk->type == XB_CHUNK_TYPE_UNKNOWN &&
!(chunk->flags & XB_STREAM_FLAG_IGNORABLE)) {
msg("xb_stream_read_chunk(): unknown chunk type 0x%lu at "
"offset 0x%llx.\n", (ulong) *ptr,
(ulonglong) stream->offset);
goto err;
}
ptr++;
stream->offset++;
/* Path length */
pathlen = uint4korr(ptr);
if (pathlen >= FN_REFLEN) {
msg("xb_stream_read_chunk(): path length (%lu) is too large at "
"offset 0x%llx.\n", (ulong) pathlen, stream->offset);
goto err;
}
chunk->pathlen = pathlen;
stream->offset +=4;
xb_ad((ptr + 4 - tmpbuf) == CHUNK_HEADER_CONSTANT_LEN);
/* Path */
if (chunk->pathlen > 0) {
F_READ((uchar *) chunk->path, pathlen);
stream->offset += pathlen;
}
chunk->path[pathlen] = '\0';
if (chunk->type == XB_CHUNK_TYPE_EOF) {
return XB_STREAM_READ_CHUNK;
}
/* Payload length */
F_READ(tmpbuf, 16);
ullval = uint8korr(tmpbuf);
if (ullval > (ulonglong) SIZE_T_MAX) {
msg("xb_stream_read_chunk(): chunk length is too large at "
"offset 0x%llx: 0x%llx.\n", (ulonglong) stream->offset,
ullval);
goto err;
}
chunk->length = (size_t) ullval;
stream->offset += 8;
/* Payload offset */
ullval = uint8korr(tmpbuf + 8);
if (ullval > (ulonglong) MY_OFF_T_MAX) {
msg("xb_stream_read_chunk(): chunk offset is too large at "
"offset 0x%llx: 0x%llx.\n", (ulonglong) stream->offset,
ullval);
goto err;
}
chunk->offset = (my_off_t) ullval;
stream->offset += 8;
/* Reallocate the buffer if needed */
if (chunk->length > stream->buflen) {
stream->buffer = my_realloc(stream->buffer, chunk->length,
MYF(MY_WME));
if (stream->buffer == NULL) {
msg("xb_stream_read_chunk(): failed to increase buffer "
"to %lu bytes.\n", (ulong) chunk->length);
goto err;
}
stream->buflen = chunk->length;
}
/* Checksum */
F_READ(tmpbuf, 4);
checksum_exp = uint4korr(tmpbuf);
/* Payload */
if (chunk->length > 0) {
F_READ(stream->buffer, chunk->length);
stream->offset += chunk->length;
}
checksum = crc32(0, stream->buffer, chunk->length);
if (checksum != checksum_exp) {
msg("xb_stream_read_chunk(): invalid checksum at offset "
"0x%llx: expected 0x%lx, read 0x%lx.\n",
(ulonglong) stream->offset, checksum_exp, checksum);
goto err;
}
stream->offset += 4;
chunk->data = stream->buffer;
chunk->checksum = checksum;
return XB_STREAM_READ_CHUNK;
err:
return XB_STREAM_READ_ERROR;
}
int
xb_stream_read_done(xb_rstream_t *stream)
{
my_free(stream->buffer);
my_free(stream);
return 0;
}
/******************************************************
Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
The xbstream format writer implementation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#include <mysql_version.h>
#include <my_base.h>
#include <zlib.h>
#include "common.h"
#include "xbstream.h"
/* Group writes smaller than this into a single chunk */
#define XB_STREAM_MIN_CHUNK_SIZE (10 * 1024 * 1024)
struct xb_wstream_struct {
pthread_mutex_t mutex;
};
struct xb_wstream_file_struct {
xb_wstream_t *stream;
char *path;
ulong path_len;
char chunk[XB_STREAM_MIN_CHUNK_SIZE];
char *chunk_ptr;
size_t chunk_free;
my_off_t offset;
void *userdata;
xb_stream_write_callback *write;
};
static int xb_stream_flush(xb_wstream_file_t *file);
static int xb_stream_write_chunk(xb_wstream_file_t *file,
const void *buf, size_t len);
static int xb_stream_write_eof(xb_wstream_file_t *file);
static
ssize_t
xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused)),
void *userdata __attribute__((unused)),
const void *buf, size_t len)
{
if (my_write(fileno(stdout), buf, len, MYF(MY_WME | MY_NABP)))
return -1;
return len;
}
xb_wstream_t *
xb_stream_write_new(void)
{
xb_wstream_t *stream;
stream = (xb_wstream_t *) my_malloc(sizeof(xb_wstream_t), MYF(MY_FAE));
pthread_mutex_init(&stream->mutex, NULL);
return stream;;
}
xb_wstream_file_t *
xb_stream_write_open(xb_wstream_t *stream, const char *path,
MY_STAT *mystat __attribute__((unused)),
void *userdata,
xb_stream_write_callback *onwrite)
{
xb_wstream_file_t *file;
ulong path_len;
path_len = strlen(path);
if (path_len > FN_REFLEN) {
msg("xb_stream_write_open(): file path is too long.\n");
return NULL;
}
file = (xb_wstream_file_t *) my_malloc(sizeof(xb_wstream_file_t) +
path_len + 1, MYF(MY_FAE));
file->path = (char *) (file + 1);
memcpy(file->path, path, path_len + 1);
file->path_len = path_len;
file->stream = stream;
file->offset = 0;
file->chunk_ptr = file->chunk;
file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE;
if (onwrite) {
#ifdef __WIN__
setmode(fileno(stdout), _O_BINARY);
#endif
file->userdata = userdata;
file->write = onwrite;
} else {
file->userdata = NULL;
file->write = xb_stream_default_write_callback;
}
return file;
}
int
xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len)
{
if (len < file->chunk_free) {
memcpy(file->chunk_ptr, buf, len);
file->chunk_ptr += len;
file->chunk_free -= len;
return 0;
}
if (xb_stream_flush(file))
return 1;
return xb_stream_write_chunk(file, buf, len);
}
int
xb_stream_write_close(xb_wstream_file_t *file)
{
if (xb_stream_flush(file) ||
xb_stream_write_eof(file)) {
my_free(file);
return 1;
}
my_free(file);
return 0;
}
int
xb_stream_write_done(xb_wstream_t *stream)
{
pthread_mutex_destroy(&stream->mutex);
my_free(stream);
return 0;
}
static
int
xb_stream_flush(xb_wstream_file_t *file)
{
if (file->chunk_ptr == file->chunk) {
return 0;
}
if (xb_stream_write_chunk(file, file->chunk,
file->chunk_ptr - file->chunk)) {
return 1;
}
file->chunk_ptr = file->chunk;
file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE;
return 0;
}
static
int
xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len)
{
/* Chunk magic + flags + chunk type + path_len + path + len + offset +
checksum */
uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 +
FN_REFLEN + 8 + 8 + 4];
uchar *ptr;
xb_wstream_t *stream = file->stream;
ulong checksum;
/* Write xbstream header */
ptr = tmpbuf;
/* Chunk magic */
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
*ptr++ = 0; /* Chunk flags */
*ptr++ = (uchar) XB_CHUNK_TYPE_PAYLOAD; /* Chunk type */
int4store(ptr, file->path_len); /* Path length */
ptr += 4;
memcpy(ptr, file->path, file->path_len); /* Path */
ptr += file->path_len;
int8store(ptr, len); /* Payload length */
ptr += 8;
pthread_mutex_lock(&stream->mutex);
int8store(ptr, file->offset); /* Payload offset */
ptr += 8;
checksum = crc32(0, buf, len); /* checksum */
int4store(ptr, checksum);
ptr += 4;
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
if (file->write(file, file->userdata, tmpbuf, ptr-tmpbuf) == -1)
goto err;
if (file->write(file, file->userdata, buf, len) == -1) /* Payload */
goto err;
file->offset+= len;
pthread_mutex_unlock(&stream->mutex);
return 0;
err:
pthread_mutex_unlock(&stream->mutex);
return 1;
}
static
int
xb_stream_write_eof(xb_wstream_file_t *file)
{
/* Chunk magic + flags + chunk type + path_len + path */
uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 +
FN_REFLEN];
uchar *ptr;
xb_wstream_t *stream = file->stream;
pthread_mutex_lock(&stream->mutex);
/* Write xbstream header */
ptr = tmpbuf;
/* Chunk magic */
memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
*ptr++ = 0; /* Chunk flags */
*ptr++ = (uchar) XB_CHUNK_TYPE_EOF; /* Chunk type */
int4store(ptr, file->path_len); /* Path length */
ptr += 4;
memcpy(ptr, file->path, file->path_len); /* Path */
ptr += file->path_len;
xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
if (file->write(file, file->userdata, tmpbuf,
(ulonglong) (ptr - tmpbuf)) == -1)
goto err;
pthread_mutex_unlock(&stream->mutex);
return 0;
err:
pthread_mutex_unlock(&stream->mutex);
return 1;
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/******************************************************
Copyright (c) 2011-2015 Percona LLC and/or its affiliates.
Declarations for xtrabackup.cc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XB_XTRABACKUP_H
#define XB_XTRABACKUP_H
#include <my_getopt.h>
#include "datasink.h"
#include "xbstream.h"
#include "changed_page_bitmap.h"
#ifdef __WIN__
#define XB_FILE_UNDEFINED NULL
#else
#define XB_FILE_UNDEFINED (-1)
#endif
typedef struct {
ulint page_size;
ulint zip_size;
ulint space_id;
} xb_delta_info_t;
/* ======== Datafiles iterator ======== */
typedef struct {
fil_system_t *system;
fil_space_t *space;
fil_node_t *node;
ibool started;
os_ib_mutex_t mutex;
} datafiles_iter_t;
/* value of the --incremental option */
extern lsn_t incremental_lsn;
extern char *xtrabackup_target_dir;
extern char *xtrabackup_incremental_dir;
extern char *xtrabackup_incremental_basedir;
extern char *innobase_data_home_dir;
extern char *innobase_buffer_pool_filename;
extern ds_ctxt_t *ds_meta;
extern ds_ctxt_t *ds_data;
/* The last checkpoint LSN at the backup startup time */
extern lsn_t checkpoint_lsn_start;
extern xb_page_bitmap *changed_page_bitmap;
extern ulint xtrabackup_rebuild_threads;
extern char *xtrabackup_incremental;
extern my_bool xtrabackup_incremental_force_scan;
extern lsn_t metadata_from_lsn;
extern lsn_t metadata_to_lsn;
extern lsn_t metadata_last_lsn;
extern xb_stream_fmt_t xtrabackup_stream_fmt;
extern ibool xtrabackup_stream;
extern char *xtrabackup_tables;
extern char *xtrabackup_tables_file;
extern char *xtrabackup_databases;
extern char *xtrabackup_databases_file;
extern my_bool xtrabackup_compact;
extern ibool xtrabackup_compress;
extern ibool xtrabackup_encrypt;
extern my_bool xtrabackup_backup;
extern my_bool xtrabackup_prepare;
extern my_bool xtrabackup_apply_log_only;
extern my_bool xtrabackup_copy_back;
extern my_bool xtrabackup_move_back;
extern my_bool xtrabackup_decrypt_decompress;
extern char *innobase_data_file_path;
extern char *innobase_doublewrite_file;
extern char *xtrabackup_encrypt_key;
extern char *xtrabackup_encrypt_key_file;
extern longlong innobase_log_file_size;
extern long innobase_log_files_in_group;
extern longlong innobase_page_size;
extern const char *xtrabackup_encrypt_algo_names[];
extern TYPELIB xtrabackup_encrypt_algo_typelib;
extern int xtrabackup_parallel;
extern my_bool xb_close_files;
extern const char *xtrabackup_compress_alg;
extern uint xtrabackup_compress_threads;
extern ulonglong xtrabackup_compress_chunk_size;
extern ulong xtrabackup_encrypt_algo;
extern uint xtrabackup_encrypt_threads;
extern ulonglong xtrabackup_encrypt_chunk_size;
extern my_bool xtrabackup_export;
extern char *xtrabackup_incremental_basedir;
extern char *xtrabackup_extra_lsndir;
extern char *xtrabackup_incremental_dir;
extern ulint xtrabackup_log_copy_interval;
extern my_bool xtrabackup_rebuild_indexes;
extern char *xtrabackup_stream_str;
extern long xtrabackup_throttle;
extern longlong xtrabackup_use_memory;
extern my_bool opt_galera_info;
extern my_bool opt_slave_info;
extern my_bool opt_no_lock;
extern my_bool opt_safe_slave_backup;
extern my_bool opt_rsync;
extern my_bool opt_force_non_empty_dirs;
extern my_bool opt_noversioncheck;
extern my_bool opt_no_backup_locks;
extern my_bool opt_decompress;
extern my_bool opt_remove_original;
extern char *opt_incremental_history_name;
extern char *opt_incremental_history_uuid;
extern char *opt_user;
extern char *opt_password;
extern char *opt_host;
extern char *opt_defaults_group;
extern char *opt_socket;
extern uint opt_port;
extern char *opt_login_path;
extern char *opt_log_bin;
extern const char *query_type_names[];
enum query_type_t {QUERY_TYPE_ALL, QUERY_TYPE_UPDATE,
QUERY_TYPE_SELECT};
extern TYPELIB query_type_typelib;
extern ulong opt_lock_wait_query_type;
extern ulong opt_kill_long_query_type;
extern ulong opt_decrypt_algo;
extern uint opt_kill_long_queries_timeout;
extern uint opt_lock_wait_timeout;
extern uint opt_lock_wait_threshold;
extern uint opt_debug_sleep_before_unlock;
extern uint opt_safe_slave_backup_timeout;
extern const char *opt_history;
extern my_bool opt_decrypt;
#if defined(HAVE_OPENSSL)
extern my_bool opt_use_ssl;
extern my_bool opt_ssl_verify_server_cert;
#if !defined(HAVE_YASSL)
extern char *opt_server_public_key;
#endif
#endif
enum binlog_info_enum { BINLOG_INFO_OFF, BINLOG_INFO_LOCKLESS, BINLOG_INFO_ON,
BINLOG_INFO_AUTO};
extern ulong opt_binlog_info;
void xtrabackup_io_throttling(void);
my_bool xb_write_delta_metadata(const char *filename,
const xb_delta_info_t *info);
datafiles_iter_t *datafiles_iter_new(fil_system_t *f_system);
fil_node_t *datafiles_iter_next(datafiles_iter_t *it);
void datafiles_iter_free(datafiles_iter_t *it);
/************************************************************************
Initialize the tablespace memory cache and populate it by scanning for and
opening data files */
ulint xb_data_files_init(void);
/************************************************************************
Destroy the tablespace memory cache. */
void xb_data_files_close(void);
/***********************************************************************
Reads the space flags from a given data file and returns the compressed
page size, or 0 if the space is not compressed. */
ulint xb_get_zip_size(os_file_t file);
/************************************************************************
Checks if a table specified as a name in the form "database/name" (InnoDB 5.6)
or "./database/name.ibd" (InnoDB 5.5-) should be skipped from backup based on
the --tables or --tables-file options.
@return TRUE if the table should be skipped. */
my_bool
check_if_skip_table(
/******************/
const char* name); /*!< in: path to the table */
/************************************************************************
Check if parameter is set in defaults file or via command line argument
@return true if parameter is set. */
bool
check_if_param_set(const char *param);
void
xtrabackup_backup_func(void);
my_bool
xb_get_one_option(int optid,
const struct my_option *opt __attribute__((unused)),
char *argument);
const char*
xb_get_copy_action(const char *dflt = "Copying");
#endif /* XB_XTRABACKUP_H */
/******************************************************
Copyright (c) 2013 Percona LLC and/or its affiliates.
Version numbers definitions.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*******************************************************/
#ifndef XB_VERSION_H
#define XB_VERSION_H
#define XTRABACKUP_VERSION "@XB_VERSION@"
#define XTRABACKUP_REVISION "@XB_REVISION@"
#endif /* XB_VERSION_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment