Commit 7ec0820b authored by unknown's avatar unknown

Merge gshchepa@bk-internal.mysql.com:/home/bk/mysql-5.0-opt

into  gleb.loc:/home/uchum/work/bk/5.0-opt

parents 3756819e 79435f37
......@@ -1341,3 +1341,6 @@ win/vs71cache.txt
win/vs8cache.txt
zlib/*.ds?
zlib/*.vcproj
debian/control
debian/defs.mk
include/abi_check
......@@ -13,8 +13,9 @@ export LDFLAGS="-fprofile-arcs -ftest-coverage"
# The -fprofile-arcs and -ftest-coverage options cause GCC to instrument the
# code with profiling information used by gcov.
# the -DDISABLE_TAO_ASM is needed to avoid build failures in Yassl.
extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM $debug_cflags $max_cflags -DMYSQL_SERVER_SUFFIX=-gcov"
# The -DDISABLE_TAO_ASM is needed to avoid build failures in Yassl.
# The -DHAVE_gcov enables code to write out coverage info even when crashing.
extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM $debug_cflags $max_cflags -DMYSQL_SERVER_SUFFIX=-gcov -DHAVE_gcov"
c_warnings="$c_warnings $debug_extra_warnings"
cxx_warnings="$cxx_warnings $debug_extra_warnings"
extra_configs="$pentium_configs $debug_configs --disable-shared $static_link"
......
......@@ -168,7 +168,12 @@ enum ha_extra_function {
These flags are reset by the handler::extra(HA_EXTRA_RESET) call.
*/
HA_EXTRA_DELETE_CANNOT_BATCH,
HA_EXTRA_UPDATE_CANNOT_BATCH
HA_EXTRA_UPDATE_CANNOT_BATCH,
/*
Inform handler that an "INSERT...ON DUPLICATE KEY UPDATE" will be
executed. This condition is unset by HA_EXTRA_NO_IGNORE_DUP_KEY.
*/
HA_EXTRA_INSERT_WITH_UPDATE
};
/* The following is parameter to ha_panic() */
......
......@@ -168,8 +168,23 @@ int STDCALL mysql_server_init(int argc __attribute__((unused)),
}
/*
Free all memory and resources used by the client library
NOTES
When calling this there should not be any other threads using
the library.
To make things simpler when used with windows dll's (which calls this
function automaticly), it's safe to call this function multiple times.
*/
void STDCALL mysql_server_end()
{
if (!mysql_client_init)
return;
#ifdef EMBEDDED_LIBRARY
end_embedded_server();
#endif
......
......@@ -111,7 +111,7 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
while (doc<end)
{
for (;doc<end;doc++)
for (; doc < end; doc+= mbl)
{
if (true_word_char(cs,*doc)) break;
if (*doc == FTB_RQUOT && param->quot)
......@@ -120,6 +120,7 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
*start=doc+1;
return 3; /* FTB_RBR */
}
mbl= my_mbcharlen(cs, *(uchar *)doc);
if (!param->quot)
{
if (*doc == FTB_LBR || *doc == FTB_RBR || *doc == FTB_LQUOT)
......@@ -187,10 +188,11 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end,
do
{
for (;; doc++)
for (;; doc+= mbl)
{
if (doc >= end) DBUG_RETURN(0);
if (true_word_char(cs, *doc)) break;
mbl= my_mbcharlen(cs, *(uchar *)doc);
}
mwc= length= 0;
......
CREATE TABLE t1(a BLOB) ENGINE=ARCHIVE;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
DROP TABLE t1;
......@@ -12364,3 +12364,10 @@ select * from t1;
i
1
drop table t1;
create table t1(a longblob) engine=archive;
insert into t1 set a='';
insert into t1 set a='a';
check table t1 extended;
Table Op Msg_type Msg_text
test.t1 check status OK
drop table t1;
......@@ -1843,6 +1843,45 @@ C3A4C3B6C3BCC39F
D18DD184D184D0B5D0BAD182D0B8D0B2D0BDD183D18E
drop table federated.t1;
drop table federated.t1;
create table federated.t1 (a int primary key, b varchar(64))
DEFAULT CHARSET=utf8;
create table federated.t1 (a int primary key, b varchar(64))
ENGINE=FEDERATED
connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'
DEFAULT CHARSET=utf8;
insert ignore into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
a b
1 Larry
2 Curly
truncate federated.t1;
replace into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
a b
1 Moe
2 Curly
update ignore federated.t1 set a=a+1;
select * from federated.t1;
a b
1 Moe
3 Curly
drop table federated.t1;
drop table federated.t1;
create table federated.t1 (a int primary key, b varchar(64))
DEFAULT CHARSET=utf8;
create table federated.t1 (a int primary key, b varchar(64))
ENGINE=FEDERATED
connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'
DEFAULT CHARSET=utf8;
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe")
on duplicate key update a=a+100;
ERROR 23000: Can't write; duplicate key in table 't1'
select * from federated.t1;
a b
1 Larry
2 Curly
drop table federated.t1;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
......
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
stop slave;
DROP DATABASE IF EXISTS federated;
CREATE DATABASE federated;
DROP DATABASE IF EXISTS federated;
CREATE DATABASE federated;
create table federated.t1 (a int primary key, b varchar(64))
engine=myisam;
create table federated.t1 (a int primary key, b varchar(64))
engine=federated
connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
ERROR 23000: Can't write; duplicate key in table 't1'
select * from federated.t1;
a b
1 Larry
2 Curly
truncate federated.t1;
alter table federated.t1 engine=innodb;
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
ERROR 23000: Can't write; duplicate key in table 't1'
select * from federated.t1;
a b
drop table federated.t1;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a VARCHAR(255) CHARACTER SET gbk, FULLTEXT(a));
SET NAMES utf8;
INSERT INTO t1 VALUES(0xF043616161),(0xBEF361616197C22061616161);
SELECT HEX(a) FROM t1 WHERE MATCH(a) AGAINST(0x97C22061616161 IN BOOLEAN MODE);
HEX(a)
BEF361616197C22061616161
DELETE FROM t1 LIMIT 1;
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SET NAMES latin1;
DROP TABLE t1;
......@@ -661,6 +661,14 @@ UPDATE t3 SET a = 'us' WHERE a = 'uk';
SELECT * FROM t3 WHERE a = 'uk';
a
DROP TABLE t1,t2,t3;
create table t1 (a int) engine=innodb;
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
drop table t1;
drop table t2;
ERROR 42S02: Unknown table 't2'
create table t2 (a int);
drop table t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
switch to connection c1
......
This diff is collapsed.
......@@ -1374,3 +1374,12 @@ insert into t1 values (1);
repair table t1 use_frm;
select * from t1;
drop table t1;
#
# BUG#29207 - archive table reported as corrupt by check table
#
create table t1(a longblob) engine=archive;
insert into t1 set a='';
insert into t1 set a='a';
check table t1 extended;
drop table t1;
......@@ -1576,4 +1576,57 @@ connection slave;
drop table federated.t1;
#
# BUG#21019 Federated Engine does not support REPLACE/INSERT IGNORE/UPDATE IGNORE
#
connection slave;
create table federated.t1 (a int primary key, b varchar(64))
DEFAULT CHARSET=utf8;
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table federated.t1 (a int primary key, b varchar(64))
ENGINE=FEDERATED
connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'
DEFAULT CHARSET=utf8;
insert ignore into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
truncate federated.t1;
replace into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
update ignore federated.t1 set a=a+1;
select * from federated.t1;
drop table federated.t1;
connection slave;
drop table federated.t1;
#
# BUG#25511 Federated Insert failures.
#
# When the user performs a INSERT...ON DUPLICATE KEY UPDATE, we want
# it to fail if a duplicate key exists instead of ignoring it.
#
connection slave;
create table federated.t1 (a int primary key, b varchar(64))
DEFAULT CHARSET=utf8;
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table federated.t1 (a int primary key, b varchar(64))
ENGINE=FEDERATED
connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'
DEFAULT CHARSET=utf8;
--error ER_DUP_KEY
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe")
on duplicate key update a=a+100;
select * from federated.t1;
drop table federated.t1;
connection slave;
drop table federated.t1;
source include/federated_cleanup.inc;
source include/federated.inc;
source include/have_innodb.inc;
#
# Bug#25513 Federated transaction failures
#
connection slave;
create table federated.t1 (a int primary key, b varchar(64))
engine=myisam;
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table federated.t1 (a int primary key, b varchar(64))
engine=federated
connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
--error ER_DUP_KEY
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
connection slave;
truncate federated.t1;
alter table federated.t1 engine=innodb;
connection master;
--error ER_DUP_KEY
insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe");
select * from federated.t1;
drop table federated.t1;
connection slave;
drop table federated.t1;
source include/federated_cleanup.inc;
--source include/have_gbk.inc
#
# test of new fulltext search features
#
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
#
# BUG#29299 - repeatable myisam fulltext index corruption
#
CREATE TABLE t1(a VARCHAR(255) CHARACTER SET gbk, FULLTEXT(a));
SET NAMES utf8;
INSERT INTO t1 VALUES(0xF043616161),(0xBEF361616197C22061616161);
SELECT HEX(a) FROM t1 WHERE MATCH(a) AGAINST(0x97C22061616161 IN BOOLEAN MODE);
DELETE FROM t1 LIMIT 1;
CHECK TABLE t1;
SET NAMES latin1;
DROP TABLE t1;
# End of 5.0 tests
......@@ -636,6 +636,20 @@ SELECT * FROM t3 WHERE a = 'uk';
DROP TABLE t1,t2,t3;
#
# Test bug when trying to drop data file which no InnoDB directory entry
#
create table t1 (a int) engine=innodb;
copy_file $MYSQLTEST_VARDIR/master-data/test/t1.frm $MYSQLTEST_VARDIR/master-data/test/t2.frm;
--error 1146
select * from t2;
drop table t1;
--error 1051
drop table t2;
create table t2 (a int);
drop table t2;
#
# Bug #29154: LOCK TABLES is not atomic when >1 InnoDB tables are locked
......
......@@ -570,6 +570,25 @@ my_bool hash_update(HASH *hash,byte *record,byte *old_key,uint old_key_length)
previous->next=pos->next; /* unlink pos */
/* Move data to correct position */
if (new_index == empty)
{
/*
At this point record is unlinked from the old chain, thus it holds
random position. By the chance this position is equal to position
for the first element in the new chain. That means updated record
is the only record in the new chain.
*/
if (empty != idx)
{
/*
Record was moved while unlinking it from the old chain.
Copy data to a new position.
*/
data[empty]= org_link;
}
data[empty].next= NO_RECORD;
DBUG_RETURN(0);
}
pos=data+new_index;
new_pos_index=hash_rec_mask(hash,pos,blength,records);
if (new_index != new_pos_index)
......
......@@ -205,7 +205,7 @@ bool archive_db_init()
else
{
zoffset_size= 2 << ((zlibCompileFlags() >> 6) & 3);
switch (sizeof(z_off_t)) {
switch (zoffset_size) {
case 2:
max_zfile_size= INT_MAX16;
break;
......@@ -676,6 +676,7 @@ int ha_archive::real_write_row(byte *buf, gzFile writer)
total_row_length+= ((Field_blob*) table->field[*ptr])->get_length();
if (share->approx_file_size > max_zfile_size - total_row_length)
{
gzflush(writer, Z_SYNC_FLUSH);
info(HA_STATUS_TIME);
share->approx_file_size= (ulong) data_file_length;
if (share->approx_file_size > max_zfile_size - total_row_length)
......@@ -1204,7 +1205,6 @@ bool ha_archive::is_crashed() const
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc= 0;
byte *buf;
const char *old_proc_info=thd->proc_info;
ha_rows count= share->rows_recorded;
DBUG_ENTER("ha_archive::check");
......@@ -1213,25 +1213,13 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
/* Flush any waiting data */
gzflush(share->archive_write, Z_SYNC_FLUSH);
/*
First we create a buffer that we can use for reading rows, and can pass
to get_row().
*/
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
rc= HA_ERR_OUT_OF_MEM;
/*
Now we will rewind the archive file so that we are positioned at the
start of the file.
*/
if (!rc)
read_data_header(archive);
if (!rc)
while (!(rc= get_row(archive, buf)))
count--;
my_free((char*)buf, MYF(0));
read_data_header(archive);
while (!(rc= get_row(archive, table->record[0])))
count--;
thd->proc_info= old_proc_info;
......
This diff is collapsed.
......@@ -157,6 +157,9 @@ class ha_federated: public handler
MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
int remote_error_number;
char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE];
bool ignore_duplicates, replace_duplicates;
bool insert_dup_update;
DYNAMIC_STRING bulk_insert;
private:
/*
......@@ -171,6 +174,14 @@ class ha_federated: public handler
bool records_in_range);
int stash_remote_error();
bool append_stmt_insert(String *query);
int read_next(byte *buf, MYSQL_RES *result);
int index_read_idx_with_result_set(byte *buf, uint index,
const byte *key,
uint key_len,
ha_rkey_function find_flag,
MYSQL_RES **result);
public:
ha_federated(TABLE *table_arg);
~ha_federated()
......@@ -256,6 +267,8 @@ class ha_federated: public handler
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
void start_bulk_insert(ha_rows rows);
int end_bulk_insert();
int write_row(byte *buf);
int update_row(const byte *old_data, byte *new_data);
int delete_row(const byte *buf);
......@@ -284,6 +297,7 @@ class ha_federated: public handler
int rnd_pos(byte *buf, byte *pos); //required
void position(const byte *record); //required
int info(uint); //required
int extra(ha_extra_function operation);
void update_auto_increment(void);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
......@@ -298,14 +312,7 @@ class ha_federated: public handler
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); //required
virtual bool get_error_message(int error, String *buf);
int read_next(byte *buf, MYSQL_RES *result);
int index_read_idx_with_result_set(byte *buf, uint index,
const byte *key,
uint key_len,
ha_rkey_function find_flag,
MYSQL_RES **result);
bool get_error_message(int error, String *buf);
};
bool federated_db_init(void);
......
......@@ -504,7 +504,7 @@ convert_error_code_to_mysql(
} else if (error == (int) DB_TABLE_NOT_FOUND) {
return(HA_ERR_KEY_NOT_FOUND);
return(HA_ERR_NO_SUCH_TABLE);
} else if (error == (int) DB_TOO_BIG_RECORD) {
......
......@@ -715,6 +715,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
*/
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (duplic == DUP_UPDATE)
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
......@@ -2434,6 +2436,8 @@ bool Delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
using_opt_replace= 1;
}
if (info.handle_duplicates == DUP_UPDATE)
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
......@@ -2761,6 +2765,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (info.handle_duplicates == DUP_UPDATE)
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd->no_trans_update.stmt= FALSE;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
......@@ -3226,6 +3232,8 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (info.handle_duplicates == DUP_UPDATE)
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update.stmt= FALSE;
......
......@@ -241,6 +241,15 @@ void write_core(int sig)
void write_core(int sig)
{
signal(sig, SIG_DFL);
#ifdef HAVE_gcov
/*
For GCOV build, crashing will prevent the writing of code coverage
information from this process, causing gcov output to be incomplete.
So we force the writing of coverage information here before terminating.
*/
extern void __gcov_flush(void);
__gcov_flush();
#endif
pthread_kill(pthread_self(), sig);
#if defined(P_MYID) && !defined(SCO)
/* On Solaris, the above kill is not enough */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment