Commit 80cfee65 authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-5.1-new

into  mysql.com:/home/my/mysql-5.1


sql/field.cc:
  Auto merged
parents e29c1d01 1944d425
......@@ -1320,16 +1320,16 @@ start_master()
if [ x$DO_DDD = x1 ]
then
$ECHO "set args $master_args" > $GDB_MASTER_INIT
$ECHO "set args $master_args" > $GDB_MASTER_INIT$1
manager_launch master ddd -display $DISPLAY --debugger \
"gdb -x $GDB_MASTER_INIT" $MASTER_MYSQLD
"gdb -x $GDB_MASTER_INIT$1" $MASTER_MYSQLD
elif [ x$DO_GDB = x1 ]
then
if [ x$MANUAL_GDB = x1 ]
then
$ECHO "set args $master_args" > $GDB_MASTER_INIT
$ECHO "set args $master_args" > $GDB_MASTER_INIT$1
$ECHO "To start gdb for the master , type in another window:"
$ECHO "cd $CWD ; gdb -x $GDB_MASTER_INIT $MASTER_MYSQLD"
$ECHO "cd $CWD ; gdb -x $GDB_MASTER_INIT$1 $MASTER_MYSQLD"
wait_for_master=1500
else
( $ECHO set args $master_args;
......@@ -1341,9 +1341,9 @@ disa 1
end
r
EOF
fi ) > $GDB_MASTER_INIT
fi ) > $GDB_MASTER_INIT$1
manager_launch master $XTERM -display $DISPLAY \
-title "Master" -e gdb -x $GDB_MASTER_INIT $MASTER_MYSQLD
-title "Master" -e gdb -x $GDB_MASTER_INIT$1 $MASTER_MYSQLD
fi
else
manager_launch master $MASTER_MYSQLD $master_args
......@@ -1965,10 +1965,10 @@ then
$MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK1 -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=`expr $MASTER_MYPORT+1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT --protocol=tcp -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=`expr $MASTER_MYPORT+1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
sleep_until_file_deleted 0 $MASTER_MYPID
sleep_until_file_deleted 0 $MASTER_MYPID"1"
sleep_until_file_deleted 0 $SLAVE_MYPID
......
......@@ -6,6 +6,37 @@ flush table t1;
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
unlock tables;
lock table t1 read;
lock table t1 read;
flush table t1;
select * from t1;
a
1
unlock tables;
select * from t1;
a
1
unlock tables;
lock table t1 write;
lock table t1 read;
flush table t1;
select * from t1;
a
1
unlock tables;
unlock tables;
lock table t1 read;
lock table t1 write;
flush table t1;
select * from t1;
a
1
unlock tables;
unlock tables;
select * from t1;
a
1
drop table t1;
create table t1(table_id char(20) primary key);
create table t2(table_id char(20) primary key);
......
......@@ -486,7 +486,6 @@ select s1 from t1 where s1 in (select version from
information_schema.tables) union select version from
information_schema.tables;
s1
0
10
drop table t1;
SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets;
......
......@@ -1615,7 +1615,7 @@ t2 CREATE TABLE `t2` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t2;
create table t2 (id int(11) not null, id2 int(11) not null, constraint t1_id_fk foreign key (id2,id) references t1 (id)) engine = innodb;
ERROR HY000: Can't create table './test/t2' (errno: 150)
ERROR HY000: Can't create table 'test.t2' (errno: 150)
create table t2 (a int auto_increment primary key, b int, index(b), foreign key (b) references t1(id), unique(b)) engine=innodb;
show create table t2;
Table Create Table
......@@ -2437,7 +2437,7 @@ a b
20 NULL
drop table t1;
create table t1 (v varchar(65530), key(v));
ERROR HY000: Can't create table './test/t1' (errno: 139)
ERROR HY000: Can't create table 'test.t1' (errno: 139)
create table t1 (v varchar(65536));
Warnings:
Note 1246 Converting column 'v' from VARCHAR to TEXT
......@@ -2580,19 +2580,19 @@ character set = latin1 engine = innodb;
drop table t1, t2, t3, t4, t5, t6, t7, t8, t9;
create table t1 (col1 varchar(768), index (col1))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t1.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t1' (errno: 139)
create table t2 (col1 varchar(768) primary key)
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t2.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t2' (errno: 139)
create table t3 (col1 varbinary(768) primary key)
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t3.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t3' (errno: 139)
create table t4 (col1 text, index(col1(768)))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t4.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t4' (errno: 139)
create table t5 (col1 blob, index(col1(768)))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t5.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t5' (errno: 139)
CREATE TABLE t1
(
id INT PRIMARY KEY
......
......@@ -181,6 +181,9 @@ select * from t4;
ERROR HY000: All tables in the MERGE table are not identically defined
alter table t4 add column c int;
ERROR HY000: All tables in the MERGE table are not identically defined
flush tables;
select * from t4;
ERROR HY000: All tables in the MERGE table are not identically defined
create database mysqltest;
create table mysqltest.t6 (a int not null primary key auto_increment, message char(20));
create table t5 (a int not null, b char(20), key(a)) engine=MERGE UNION=(test.t1,mysqltest.t6);
......
......@@ -201,18 +201,18 @@ create table t1 (
pk1 bit(9) not null primary key,
b int
) engine=ndbcluster;
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 739 'Unsupported primary key length' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)
create table t1 (
pk1 int not null primary key,
b bit(9),
key(b)
) engine=ndbcluster;
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 743 'Unsupported character set in table or index' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)
......@@ -11,11 +11,11 @@ partitions 3
(partition x1 values less than (5) nodegroup 12,
partition x2 values less than (10) nodegroup 13,
partition x3 values less than (20) nodegroup 14);
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 771 'Given NODEGROUP doesn't exist in this cluster' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)
CREATE TABLE t1 (
a int not null,
b int not null,
......
......@@ -5,6 +5,8 @@ reset query cache;
flush status;
drop table if exists t1,t2,t3,t4,t11,t21;
drop database if exists mysqltest;
drop table if exists ```a`;
drop view if exists v1;
create table t1 (a int not null);
insert into t1 values (1),(2),(3);
select * from t1;
......
......@@ -135,3 +135,14 @@ d c
bar 2
foo 1
drop table t1, t2;
create temporary table t1 (a int);
insert into t1 values (4711);
select * from t1;
a
4711
truncate t1;
insert into t1 values (42);
select * from t1;
a
42
drop table t1;
......@@ -384,7 +384,7 @@ set sql_quote_show_create=1;
set sql_safe_updates=1;
set sql_select_limit=1;
set sql_warnings=1;
set global table_cache=100;
set global table_open_cache=100;
set storage_engine=myisam;
set global thread_cache_size=100;
set timestamp=1, timestamp=default;
......@@ -516,11 +516,11 @@ SET GLOBAL MYISAM_DATA_POINTER_SIZE= 7;
SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE';
Variable_name Value
myisam_data_pointer_size 7
SET GLOBAL table_cache=-1;
SHOW VARIABLES LIKE 'table_cache';
SET GLOBAL table_open_cache=-1;
SHOW VARIABLES LIKE 'table_open_cache';
Variable_name Value
table_cache 1
SET GLOBAL table_cache=DEFAULT;
table_open_cache 1
SET GLOBAL table_open_cache=DEFAULT;
set character_set_results=NULL;
select ifnull(@@character_set_results,"really null");
ifnull(@@character_set_results,"really null")
......
drop database if exists mysqltest;
drop view if exists v1;
grant create view on test.* to test@localhost;
show grants for test@localhost;
Grants for test@localhost
......
......@@ -9,10 +9,63 @@ drop table if exists t1,t2;
--enable_warnings
create table t1 (a int not null auto_increment primary key);
insert into t1 values(0);
# Test for with read lock + flush
lock table t1 read;
flush table t1;
check table t1;
unlock tables;
# Test for with 2 read lock in different thread + flush
lock table t1 read;
connect (locker,localhost,root,,test);
connection locker;
lock table t1 read;
connection default;
send flush table t1;
connection locker;
--sleep 2
select * from t1;
unlock tables;
connection default;
reap;
select * from t1;
unlock tables;
# Test for with a write lock and a waiting read lock + flush
lock table t1 write;
connection locker;
send lock table t1 read;
connection default;
sleep 2;
flush table t1;
select * from t1;
unlock tables;
connection locker;
reap;
unlock tables;
connection default;
# Test for with a read lock and a waiting write lock + flush
lock table t1 read;
connection locker;
send lock table t1 write;
connection default;
sleep 2;
flush table t1;
select * from t1;
unlock tables;
connection locker;
reap;
unlock tables;
select * from t1;
connection default;
drop table t1;
disconnect locker;
#
# In the following test FLUSH TABLES produces a deadlock
......
......@@ -51,6 +51,9 @@ create table t4 (a int not null, b char(10), key(a)) engine=MERGE UNION=(t1,t2);
select * from t4;
--error 1168
alter table t4 add column c int;
flush tables;
--error 1168
select * from t4;
#
# Test tables in different databases
......
......@@ -495,6 +495,11 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
#
# Test alter table and a concurrent multi update
# (This will force update to reopen tables)
#
create table t1 (a int, b int);
insert into t1 values (1, 2), (2, 3), (3, 4);
create table t2 (a int);
......@@ -511,6 +516,7 @@ send alter table t1 add column c int default 100 after a;
connect (updater,localhost,root,,test);
connection updater;
sleep 2;
send update t1, v1 set t1.b=t1.a+t1.b+v1.b where t1.a=v1.a;
connection locker;
......
......@@ -14,6 +14,10 @@ flush status;
--disable_warnings
drop table if exists t1,t2,t3,t4,t11,t21;
drop database if exists mysqltest;
# Fix possible left overs from other tests
drop table if exists ```a`;
drop view if exists v1;
--enable_warnings
#
......
......@@ -115,3 +115,15 @@ select d, c from t1 left join t2 on b = c where a = 3 order by d;
drop table t1, t2;
# End of 4.1 tests
#
# Test truncate with temporary tables
#
create temporary table t1 (a int);
insert into t1 values (4711);
select * from t1;
truncate t1;
insert into t1 values (42);
select * from t1;
drop table t1;
......@@ -258,7 +258,7 @@ set sql_quote_show_create=1;
set sql_safe_updates=1;
set sql_select_limit=1;
set sql_warnings=1;
set global table_cache=100;
set global table_open_cache=100;
set storage_engine=myisam;
set global thread_cache_size=100;
set timestamp=1, timestamp=default;
......@@ -390,9 +390,9 @@ SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE';
# Bug #6958: negative arguments to integer options wrap around
#
SET GLOBAL table_cache=-1;
SHOW VARIABLES LIKE 'table_cache';
SET GLOBAL table_cache=DEFAULT;
SET GLOBAL table_open_cache=-1;
SHOW VARIABLES LIKE 'table_open_cache';
SET GLOBAL table_open_cache=DEFAULT;
#
# Bugs12363: character_set_results is nullable,
......
# Can't test with embedded server
-- source include/not_embedded.inc
--disable_warnings
drop database if exists mysqltest;
drop view if exists v1;
--enable_warnings
# simple test of grants
grant create view on test.* to test@localhost;
show grants for test@localhost;
......
......@@ -152,3 +152,15 @@
obj:*/libz.so.*
fun:gzflush
}
#
# Warning from my_thread_init becasue mysqld dies before kill thread exists
#
{
my_thread_init kill thread memory loss second
Memcheck:Leak
fun:calloc
fun:my_thread_init
fun:kill_server_thread
}
......@@ -109,7 +109,7 @@ static inline void hash_free_elements(HASH *hash)
void hash_free(HASH *hash)
{
DBUG_ENTER("hash_free");
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
DBUG_PRINT("enter",("hash: 0x%lx", hash));
hash_free_elements(hash);
hash->free= 0;
......
......@@ -73,7 +73,7 @@ uint dirname_part(my_string to, const char *name)
SYNPOSIS
convert_dirname()
to Store result here
from Original filename
from Original filename. May be == to
from_end Pointer at end of filename (normally end \0)
IMPLEMENTATION
......@@ -101,6 +101,7 @@ char *convert_dirname(char *to, const char *from, const char *from_end)
#ifdef BACKSLASH_MBTAIL
CHARSET_INFO *fs= fs_character_set();
#endif
DBUG_ENTER("convert_dirname");
/* We use -2 here, becasue we need place for the last FN_LIBCHAR */
if (!from_end || (from_end - from) > FN_REFLEN-2)
......@@ -149,5 +150,5 @@ char *convert_dirname(char *to, const char *from, const char *from_end)
*to++=FN_LIBCHAR;
*to=0;
}
return to; /* Pointer to end of dir */
DBUG_RETURN(to); /* Pointer to end of dir */
} /* convert_dirname */
......@@ -107,16 +107,27 @@ void pack_dirname(my_string to, const char *from)
} /* pack_dirname */
/* remove unwanted chars from dirname */
/* if "/../" removes prev dir; "/~/" removes all before ~ */
/* "//" is same as "/", except on Win32 at start of a file */
/* "/./" is removed */
/* Unpacks home_dir if "~/.." used */
/* Unpacks current dir if if "./.." used */
/*
remove unwanted chars from dirname
uint cleanup_dirname(register my_string to, const char *from)
/* to may be == from */
SYNOPSIS
cleanup_dirname()
to Store result here
from Dirname to fix. May be same as to
IMPLEMENTATION
"/../" removes prev dir
"/~/" removes all before ~
//" is same as "/", except on Win32 at start of a file
"/./" is removed
Unpacks home_dir if "~/.." used
Unpacks current dir if if "./.." used
RETURN
# length of new name
*/
uint cleanup_dirname(register my_string to, const char *from)
{
reg5 uint length;
reg2 my_string pos;
......
......@@ -28,9 +28,12 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
char *end, *copy;
char buff[FN_REFLEN];
DYNAMIC_ARRAY t_arr;
DBUG_ENTER("init_tmpdir");
DBUG_PRINT("enter", ("pathlist: %s", pathlist ? pathlist : "NULL"));
pthread_mutex_init(&tmpdir->mutex, MY_MUTEX_INIT_FAST);
if (my_init_dynamic_array(&t_arr, sizeof(char*), 1, 5))
return TRUE;
goto err;
if (!pathlist || !pathlist[0])
{
/* Get default temporary directory */
......@@ -46,12 +49,13 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
}
do
{
uint length;
end=strcend(pathlist, DELIM);
convert_dirname(buff, pathlist, end);
if (!(copy=my_strdup(buff, MYF(MY_WME))))
return TRUE;
if (insert_dynamic(&t_arr, (gptr)&copy))
return TRUE;
strmake(buff, pathlist, (uint) (end-pathlist));
length= cleanup_dirname(buff, buff);
if (!(copy= my_strdup_with_length(buff, length, MYF(MY_WME))) ||
insert_dynamic(&t_arr, (gptr) &copy))
DBUG_RETURN(TRUE)
pathlist=end+1;
}
while (*end);
......@@ -59,12 +63,20 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
tmpdir->list=(char **)t_arr.buffer;
tmpdir->max=t_arr.elements-1;
tmpdir->cur=0;
return FALSE;
DBUG_RETURN(FALSE);
err:
delete_dynamic(&t_arr); /* Safe to free */
pthread_mutex_destroy(&tmpdir->mutex);
DBUG_RETURN(TRUE);
}
char *my_tmpdir(MY_TMPDIR *tmpdir)
{
char *dir;
if (!tmpdir->max)
return tmpdir->list[0];
pthread_mutex_lock(&tmpdir->mutex);
dir=tmpdir->list[tmpdir->cur];
tmpdir->cur= (tmpdir->cur == tmpdir->max) ? 0 : tmpdir->cur+1;
......
......@@ -396,6 +396,7 @@ char *strdup_root(MEM_ROOT *root,const char *str)
return strmake_root(root, str, (uint) strlen(str));
}
char *strmake_root(MEM_ROOT *root,const char *str, uint len)
{
char *pos;
......
......@@ -219,7 +219,7 @@ static handler* example_create_handler(TABLE *table)
}
ha_example::ha_example(TABLE *table_arg)
ha_example::ha_example(TABLE_SHARE *table_arg)
:handler(&example_hton, table_arg)
{}
......
......@@ -45,7 +45,7 @@ class ha_example: public handler
EXAMPLE_SHARE *share; /* Shared lock info */
public:
ha_example(TABLE *table_arg);
ha_example(TABLE_SHARE *table_arg);
~ha_example()
{
}
......
......@@ -55,7 +55,7 @@
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
static int tina_init= 0;
static handler* tina_create_handler(TABLE *table);
static handler *tina_create_handler(TABLE_SHARE *table);
handlerton tina_hton= {
"CSV",
......@@ -285,17 +285,17 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
}
static handler* tina_create_handler(TABLE *table)
static handler *tina_create_handler(TABLE_SHARE *table)
{
return new ha_tina(table);
}
ha_tina::ha_tina(TABLE *table_arg)
ha_tina::ha_tina(TABLE_SHARE *table_arg)
:handler(&tina_hton, table_arg),
/*
These definitions are found in hanler.h
These are not probably completely right.
These definitions are found in handler.h
They are not probably completely right.
*/
current_position(0), next_position(0), chain_alloced(0),
chain_size(DEFAULT_CHAIN_LENGTH), records_is_known(0)
......@@ -308,6 +308,7 @@ ha_tina::ha_tina(TABLE *table_arg)
/*
Encode a buffer into the quoted format.
*/
int ha_tina::encode_quote(byte *buf)
{
char attribute_buffer[1024];
......
......@@ -56,7 +56,7 @@ class ha_tina: public handler
bool records_is_known;
public:
ha_tina(TABLE *table_arg);
ha_tina(TABLE_SHARE *table_arg);
~ha_tina()
{
if (chain_alloced)
......
This diff is collapsed.
This diff is collapsed.
......@@ -135,7 +135,7 @@ static HASH archive_open_tables;
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
/* Static declarations for handerton */
static handler *archive_create_handler(TABLE *table);
static handler *archive_create_handler(TABLE_SHARE *table);
/* dummy handlerton - only to have something to return from archive_db_init */
......@@ -172,7 +172,7 @@ handlerton archive_hton = {
HTON_NO_FLAGS
};
static handler *archive_create_handler(TABLE *table)
static handler *archive_create_handler(TABLE_SHARE *table)
{
return new ha_archive(table);
}
......@@ -242,7 +242,7 @@ int archive_db_end(ha_panic_function type)
return 0;
}
ha_archive::ha_archive(TABLE *table_arg)
ha_archive::ha_archive(TABLE_SHARE *table_arg)
:handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
{
/* Set our original buffer from pre-allocated memory */
......
......@@ -58,7 +58,7 @@ class ha_archive: public handler
bool bulk_insert; /* If we are performing a bulk insert */
public:
ha_archive(TABLE *table_arg);
ha_archive(TABLE_SHARE *table_arg);
~ha_archive()
{
}
......
This diff is collapsed.
......@@ -84,7 +84,7 @@ class ha_berkeley: public handler
DBT *get_pos(DBT *to, byte *pos);
public:
ha_berkeley(TABLE *table_arg);
ha_berkeley(TABLE_SHARE *table_arg);
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong index_flags(uint idx, uint part, bool all_parts) const;
......
......@@ -24,7 +24,7 @@
/* Static declarations for handlerton */
static handler *blackhole_create_handler(TABLE *table);
static handler *blackhole_create_handler(TABLE_SHARE *table);
/* Blackhole storage engine handlerton */
......@@ -63,7 +63,7 @@ handlerton blackhole_hton= {
};
static handler *blackhole_create_handler(TABLE *table)
static handler *blackhole_create_handler(TABLE_SHARE *table)
{
return new ha_blackhole(table);
}
......@@ -73,7 +73,7 @@ static handler *blackhole_create_handler(TABLE *table)
** BLACKHOLE tables
*****************************************************************************/
ha_blackhole::ha_blackhole(TABLE *table_arg)
ha_blackhole::ha_blackhole(TABLE_SHARE *table_arg)
:handler(&blackhole_hton, table_arg)
{}
......@@ -112,13 +112,12 @@ int ha_blackhole::create(const char *name, TABLE *table_arg,
const char *ha_blackhole::index_type(uint key_number)
{
DBUG_ENTER("ha_blackhole::index_type");
DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ?
DBUG_RETURN((table_share->key_info[key_number].flags & HA_FULLTEXT) ?
"FULLTEXT" :
(table->key_info[key_number].flags & HA_SPATIAL) ?
(table_share->key_info[key_number].flags & HA_SPATIAL) ?
"SPATIAL" :
(table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
"RTREE" :
"BTREE");
(table_share->key_info[key_number].algorithm ==
HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE");
}
int ha_blackhole::write_row(byte * buf)
......
......@@ -28,7 +28,7 @@ class ha_blackhole: public handler
THR_LOCK thr_lock;
public:
ha_blackhole(TABLE *table_arg);
ha_blackhole(TABLE_SHARE *table_arg);
~ha_blackhole()
{
}
......@@ -49,7 +49,7 @@ class ha_blackhole: public handler
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
......
This diff is collapsed.
......@@ -173,7 +173,7 @@ class ha_federated: public handler
int stash_remote_error();
public:
ha_federated(TABLE *table_arg);
ha_federated(TABLE_SHARE *table_arg);
~ha_federated() {}
/* The name that will be used for display purposes */
const char *table_type() const { return "FEDERATED"; }
......@@ -232,8 +232,7 @@ class ha_federated: public handler
*/
double scan_time()
{
DBUG_PRINT("info",
("records %d", records));
DBUG_PRINT("info", ("records %lu", (ulong) records));
return (double)(records*1000);
}
/*
......
......@@ -24,7 +24,7 @@
#include "ha_heap.h"
static handler *heap_create_handler(TABLE *table);
static handler *heap_create_handler(TABLE_SHARE *table);
handlerton heap_hton= {
"MEMORY",
......@@ -59,7 +59,7 @@ handlerton heap_hton= {
HTON_CAN_RECREATE
};
static handler *heap_create_handler(TABLE *table)
static handler *heap_create_handler(TABLE_SHARE *table)
{
return new ha_heap(table);
}
......@@ -69,7 +69,7 @@ static handler *heap_create_handler(TABLE *table)
** HEAP tables
*****************************************************************************/
ha_heap::ha_heap(TABLE *table_arg)
ha_heap::ha_heap(TABLE_SHARE *table_arg)
:handler(&heap_hton, table_arg), file(0), records_changed(0),
key_stats_ok(0)
{}
......@@ -490,8 +490,7 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
int ha_heap::delete_table(const char *name)
{
char buff[FN_REFLEN];
int error= heap_delete_table(fn_format(buff,name,"","",
MY_REPLACE_EXT|MY_UNPACK_FILENAME));
int error= heap_delete_table(name);
return error == ENOENT ? 0 : error;
}
......@@ -537,7 +536,6 @@ int ha_heap::create(const char *name, TABLE *table_arg,
ha_rows max_rows;
HP_KEYDEF *keydef;
HA_KEYSEG *seg;
char buff[FN_REFLEN];
int error;
TABLE_SHARE *share= table_arg->s;
bool found_real_auto_increment= 0;
......@@ -618,7 +616,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
}
}
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
max_rows = (ha_rows) (table_arg->in_use->variables.max_heap_table_size /
mem_per_row);
if (table_arg->found_next_number_field)
{
......@@ -633,8 +631,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
hp_create_info.with_auto_increment= found_real_auto_increment;
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
error= heap_create(fn_format(buff,name,"","",
MY_REPLACE_EXT|MY_UNPACK_FILENAME),
error= heap_create(name,
keys, keydef, share->reclength,
(ulong) ((share->max_rows < max_rows &&
share->max_rows) ?
......
......@@ -31,7 +31,7 @@ class ha_heap: public handler
uint records_changed;
bool key_stats_ok;
public:
ha_heap(TABLE *table);
ha_heap(TABLE_SHARE *table);
~ha_heap() {}
const char *table_type() const
{
......@@ -40,7 +40,7 @@ class ha_heap: public handler
}
const char *index_type(uint inx)
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
"BTREE" : "HASH");
}
/* Rows also use a fixed-size format */
......@@ -54,7 +54,7 @@ class ha_heap: public handler
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
HA_ONLY_WHOLE_INDEX);
}
......
......@@ -205,7 +205,7 @@ static int innobase_rollback(THD* thd, bool all);
static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
static int innobase_savepoint(THD* thd, void *savepoint);
static int innobase_release_savepoint(THD* thd, void *savepoint);
static handler *innobase_create_handler(TABLE *table);
static handler *innobase_create_handler(TABLE_SHARE *table);
handlerton innobase_hton = {
"InnoDB",
......@@ -245,7 +245,7 @@ handlerton innobase_hton = {
};
static handler *innobase_create_handler(TABLE *table)
static handler *innobase_create_handler(TABLE_SHARE *table)
{
return new ha_innobase(table);
}
......@@ -826,7 +826,7 @@ check_trx_exists(
/*************************************************************************
Construct ha_innobase handler. */
ha_innobase::ha_innobase(TABLE *table_arg)
ha_innobase::ha_innobase(TABLE_SHARE *table_arg)
:handler(&innobase_hton, table_arg),
int_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY |
......@@ -4820,8 +4820,8 @@ ha_innobase::create(
/* Look for a primary key */
primary_key_no= (table->s->primary_key != MAX_KEY ?
(int) table->s->primary_key :
primary_key_no= (form->s->primary_key != MAX_KEY ?
(int) form->s->primary_key :
-1);
/* Our function row_get_mysql_key_number_for_index assumes
......
......@@ -81,7 +81,7 @@ class ha_innobase: public handler
/* Init values for the class: */
public:
ha_innobase(TABLE *table_arg);
ha_innobase(TABLE_SHARE *table_arg);
~ha_innobase() {}
/*
Get the row type from the storage engine. If this method returns
......
......@@ -50,7 +50,7 @@ TYPELIB myisam_stats_method_typelib= {
** MyISAM tables
*****************************************************************************/
static handler *myisam_create_handler(TABLE *table);
static handler *myisam_create_handler(TABLE_SHARE *table);
/* MyISAM handlerton */
......@@ -92,7 +92,7 @@ handlerton myisam_hton= {
};
static handler *myisam_create_handler(TABLE *table)
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
......@@ -178,7 +178,7 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
}
ha_myisam::ha_myisam(TABLE *table_arg)
ha_myisam::ha_myisam(TABLE_SHARE *table_arg)
:handler(&myisam_hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
......@@ -358,7 +358,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = "check";
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method;
......@@ -446,7 +446,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name= "analyze";
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
......@@ -474,7 +474,7 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
HA_CHECK_OPT tmp_check_opt;
char *backup_dir= thd->lex->backup_dir;
char src_path[FN_REFLEN], dst_path[FN_REFLEN];
const char *table_name= table->s->table_name;
const char *table_name= table->s->table_name.str;
int error;
const char* errmsg;
DBUG_ENTER("restore");
......@@ -483,8 +483,8 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
MI_NAME_DEXT))
DBUG_RETURN(HA_ADMIN_INVALID);
if (my_copy(src_path, fn_format(dst_path, table->s->path, "",
MI_NAME_DEXT, 4), MYF(MY_WME)))
strxmov(dst_path, table->s->normalized_path.str, MI_NAME_DEXT, NullS);
if (my_copy(src_path, dst_path, MYF(MY_WME)))
{
error= HA_ADMIN_FAILED;
errmsg= "Failed in my_copy (Error %d)";
......@@ -501,8 +501,8 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "restore";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg, my_errno);
DBUG_RETURN(error);
......@@ -514,7 +514,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
{
char *backup_dir= thd->lex->backup_dir;
char src_path[FN_REFLEN], dst_path[FN_REFLEN];
const char *table_name= table->s->table_name;
const char *table_name= table->s->table_name.str;
int error;
const char *errmsg;
DBUG_ENTER("ha_myisam::backup");
......@@ -527,9 +527,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
goto err;
}
if (my_copy(fn_format(src_path, table->s->path, "", reg_ext,
MY_UNPACK_FILENAME),
dst_path,
strxmov(src_path, table->s->normalized_path.str, reg_ext, NullS);
if (my_copy(src_path, dst_path,
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
{
error = HA_ADMIN_FAILED;
......@@ -546,9 +545,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
goto err;
}
if (my_copy(fn_format(src_path, table->s->path, "", MI_NAME_DEXT,
MY_UNPACK_FILENAME),
dst_path,
strxmov(src_path, table->s->normalized_path.str, MI_NAME_DEXT, NullS);
if (my_copy(src_path, dst_path,
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
{
errmsg = "Failed copying .MYD file (errno: %d)";
......@@ -563,8 +561,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "backup";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag = 0;
mi_check_print_error(&param,errmsg, my_errno);
DBUG_RETURN(error);
......@@ -655,7 +653,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
ha_rows rows= file->state->records;
DBUG_ENTER("ha_myisam::repair");
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.tmpfile_createflag = O_RDWR | O_TRUNC;
param.using_global_keycache = 1;
......@@ -826,8 +824,8 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "assign_to_keycache";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
}
......@@ -894,8 +892,8 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "preload_keys";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
DBUG_RETURN(error);
......@@ -1149,8 +1147,8 @@ bool ha_myisam::check_and_repair(THD *thd)
old_query= thd->query;
old_query_length= thd->query_length;
pthread_mutex_lock(&LOCK_thread_count);
thd->query= (char*) table->s->table_name;
thd->query_length= (uint32) strlen(table->s->table_name);
thd->query= table->s->table_name.str;
thd->query_length= table->s->table_name.length;
pthread_mutex_unlock(&LOCK_thread_count);
if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
......@@ -1337,6 +1335,10 @@ void ha_myisam::info(uint flag)
ref_length= info.reflength;
share->db_options_in_use= info.options;
block_size= myisam_block_size;
/* Update share */
if (share->tmp_table == NO_TMP_TABLE)
pthread_mutex_lock(&share->mutex);
share->keys_in_use.set_prefix(share->keys);
share->keys_in_use.intersect_extended(info.key_map);
share->keys_for_keyread.intersect(share->keys_in_use);
......@@ -1345,6 +1347,9 @@ void ha_myisam::info(uint flag)
memcpy((char*) table->key_info[0].rec_per_key,
(char*) info.rec_per_key,
sizeof(table->key_info[0].rec_per_key)*share->key_parts);
if (share->tmp_table == NO_TMP_TABLE)
pthread_mutex_unlock(&share->mutex);
raid_type= info.raid_type;
raid_chunks= info.raid_chunks;
raid_chunksize= info.raid_chunksize;
......@@ -1353,7 +1358,7 @@ void ha_myisam::info(uint flag)
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
*/
data_file_name=index_file_name=0;
data_file_name= index_file_name= 0;
fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2);
if (strcmp(name_buff, info.data_file_name))
data_file_name=info.data_file_name;
......@@ -1448,7 +1453,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
MI_KEYDEF *keydef;
MI_COLUMNDEF *recinfo,*recinfo_pos;
HA_KEYSEG *keyseg;
TABLE_SHARE *share= table->s;
TABLE_SHARE *share= table_arg->s;
uint options= share->db_options_in_use;
DBUG_ENTER("ha_myisam::create");
......
......@@ -43,7 +43,7 @@ class ha_myisam: public handler
int repair(THD *thd, MI_CHECK &param, bool optimize);
public:
ha_myisam(TABLE *table_arg);
ha_myisam(TABLE_SHARE *table_arg);
~ha_myisam() {}
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
......@@ -51,7 +51,7 @@ class ha_myisam: public handler
ulong table_flags() const { return int_table_flags; }
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
......
......@@ -32,7 +32,7 @@
** MyISAM MERGE tables
*****************************************************************************/
static handler *myisammrg_create_handler(TABLE *table);
static handler *myisammrg_create_handler(TABLE_SHARE *table);
/* MyISAM MERGE handlerton */
......@@ -69,13 +69,13 @@ handlerton myisammrg_hton= {
HTON_CAN_RECREATE
};
static handler *myisammrg_create_handler(TABLE *table)
static handler *myisammrg_create_handler(TABLE_SHARE *table)
{
return new ha_myisammrg(table);
}
ha_myisammrg::ha_myisammrg(TABLE *table_arg)
ha_myisammrg::ha_myisammrg(TABLE_SHARE *table_arg)
:handler(&myisammrg_hton, table_arg), file(0)
{}
......@@ -302,7 +302,6 @@ void ha_myisammrg::info(uint flag)
errkey = info.errkey;
table->s->keys_in_use.set_prefix(table->s->keys);
table->s->db_options_in_use= info.options;
table->s->is_view= 1;
mean_rec_length= info.reclength;
block_size=0;
update_time=0;
......@@ -456,9 +455,9 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
for (pos= table_names; tables; tables= tables->next_local)
{
const char *table_name;
TABLE **tbl= 0;
TABLE *tbl= 0;
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
tbl= find_temporary_table(thd, tables->db, tables->table_name);
tbl= find_temporary_table(thd, tables);
if (!tbl)
{
/*
......@@ -487,7 +486,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
else
table_name= (*tbl)->s->path;
table_name= tbl->s->path.str;
*pos++= table_name;
}
*pos=0;
......@@ -503,6 +502,7 @@ void ha_myisammrg::append_create_info(String *packet)
const char *current_db;
uint db_length;
THD *thd= current_thd;
MYRG_TABLE *open_table, *first;
if (file->merge_insert_method != MERGE_INSERT_DISABLED)
{
......@@ -510,10 +510,9 @@ void ha_myisammrg::append_create_info(String *packet)
packet->append(get_type(&merge_insert_method,file->merge_insert_method-1));
}
packet->append(STRING_WITH_LEN(" UNION=("));
MYRG_TABLE *open_table,*first;
current_db= table->s->db;
db_length= (uint) strlen(current_db);
current_db= table->s->db.str;
db_length= table->s->db.length;
for (first=open_table=file->open_tables ;
open_table != file->end_table ;
......
......@@ -28,7 +28,7 @@ class ha_myisammrg: public handler
MYRG_INFO *file;
public:
ha_myisammrg(TABLE *table_arg);
ha_myisammrg(TABLE_SHARE *table_arg);
~ha_myisammrg() {}
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
......@@ -37,11 +37,12 @@ class ha_myisammrg: public handler
{
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE);
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE |
HA_NO_COPY_ON_ALTER);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
......
This diff is collapsed.
......@@ -478,7 +478,7 @@ class Thd_ndb
class ha_ndbcluster: public handler
{
public:
ha_ndbcluster(TABLE *table);
ha_ndbcluster(TABLE_SHARE *table);
~ha_ndbcluster();
int open(const char *name, int mode, uint test_if_locked);
......@@ -620,7 +620,7 @@ static void set_tabname(const char *pathname, char *tabname);
const char *path,
const char *db,
const char *table_name);
int drop_table();
int intern_drop_table();
int create_index(const char *name, KEY *key_info, bool unique);
int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info);
......
......@@ -66,7 +66,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
MODULE create/delete handler object
****************************************************************************/
static handler* partition_create_handler(TABLE *table);
static handler *partition_create_handler(TABLE_SHARE *share);
handlerton partition_hton = {
"partition",
......@@ -101,31 +101,25 @@ handlerton partition_hton = {
HTON_NOT_USER_SELECTABLE
};
static handler* partition_create_handler(TABLE *table)
static handler *partition_create_handler(TABLE_SHARE *share)
{
return new ha_partition(table);
return new ha_partition(share);
}
ha_partition::ha_partition(TABLE *table)
:handler(&partition_hton, table), m_part_info(NULL), m_create_handler(FALSE),
ha_partition::ha_partition(TABLE_SHARE *share)
:handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
m_is_sub_partitioned(0)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_handler_variables();
if (table)
{
if (table->s->part_info)
{
m_part_info= table->s->part_info;
m_is_sub_partitioned= is_sub_partitioned(m_part_info);
}
}
DBUG_VOID_RETURN;
}
ha_partition::ha_partition(partition_info *part_info)
:handler(&partition_hton, NULL), m_part_info(part_info), m_create_handler(TRUE),
:handler(&partition_hton, NULL), m_part_info(part_info),
m_create_handler(TRUE),
m_is_sub_partitioned(is_sub_partitioned(m_part_info))
{
......@@ -230,64 +224,64 @@ ha_partition::~ha_partition()
int ha_partition::ha_initialise()
{
handler **file_array, *file;
DBUG_ENTER("ha_partition::set_up_constants");
DBUG_ENTER("ha_partition::ha_initialise");
if (m_part_info)
if (m_create_handler)
{
m_tot_parts= get_tot_partitions(m_part_info);
DBUG_ASSERT(m_tot_parts > 0);
if (m_create_handler)
{
if (new_handlers_from_part_info())
DBUG_RETURN(1);
}
else if (get_from_handler_file(table->s->path))
{
my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
if (new_handlers_from_part_info())
DBUG_RETURN(1);
}
/*
We create all underlying table handlers here. We only do it if we have
access to the partition info. We do it in this special method to be
able to report allocation errors.
*/
/*
Set up table_flags, low_byte_first, primary_key_is_clustered and
has_transactions since they are called often in all kinds of places,
other parameters are calculated on demand.
HA_FILE_BASED is always set for partition handler since we use a
special file for handling names of partitions, engine types.
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= m_file[0]->table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
m_has_transactions= TRUE;
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
{
file= *file_array;
if (m_low_byte_first != file->low_byte_first())
{
// Cannot have handlers with different endian
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
if (!file->has_transactions())
m_has_transactions= FALSE;
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
} while (*(++file_array));
m_table_flags&= ~(HA_CAN_GEOMETRY & HA_CAN_FULLTEXT &
HA_CAN_SQL_HANDLER & HA_CAN_INSERT_DELAYED);
}
else if (!table_share || !table_share->normalized_path.str)
{
/*
TODO RONM:
Make sure that the tree works without partition defined, compiles
and goes through mysql-test-run.
Called with dummy table share (delete, rename and alter table)
Don't need to set-up table flags other than
HA_FILE_BASED here
*/
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
}
else if (get_from_handler_file(table_share->normalized_path.str))
{
my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
DBUG_RETURN(1);
}
/*
We create all underlying table handlers here. We do it in this special
method to be able to report allocation errors.
Set up table_flags, low_byte_first, primary_key_is_clustered and
has_transactions since they are called often in all kinds of places,
other parameters are calculated on demand.
HA_FILE_BASED is always set for partition handler since we use a
special file for handling names of partitions, engine types.
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= m_file[0]->table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
m_has_transactions= TRUE;
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
{
file= *file_array;
if (m_low_byte_first != file->low_byte_first())
{
// Cannot have handlers with different endian
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
if (!file->has_transactions())
m_has_transactions= FALSE;
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
} while (*(++file_array));
m_table_flags&= ~(HA_CAN_GEOMETRY & HA_CAN_FULLTEXT &
HA_CAN_SQL_HANDLER & HA_CAN_INSERT_DELAYED);
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
}
......@@ -720,7 +714,7 @@ bool ha_partition::create_handlers()
bzero(m_file, alloc_len);
for (i= 0; i < m_tot_parts; i++)
{
if (!(m_file[i]= get_new_handler(table, current_thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, current_thd->mem_root,
(enum db_type) m_engine_array[i])))
DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
......@@ -764,7 +758,7 @@ bool ha_partition::new_handlers_from_part_info()
do
{
part_elem= part_it++;
if (!(m_file[i]= get_new_handler(table, thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
......@@ -772,7 +766,7 @@ bool ha_partition::new_handlers_from_part_info()
{
for (j= 0; j < m_part_info->no_subparts; j++)
{
if (!(m_file[i]= get_new_handler(table, thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
......@@ -913,7 +907,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
do
{
create_partition_name(name_buff, name, name_buffer_ptr);
if ((error= (*file)->ha_open((const char*) name_buff, mode,
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
m_no_locks+= (*file)->lock_count();
......
......@@ -122,6 +122,11 @@ class ha_partition :public handler
PARTITION_SHARE *share; /* Shared lock info */
public:
void set_part_info(partition_info *part_info)
{
m_part_info= part_info;
m_is_sub_partitioned= is_sub_partitioned(part_info);
}
/*
-------------------------------------------------------------------------
MODULE create/delete handler object
......@@ -133,7 +138,7 @@ class ha_partition :public handler
partition handler.
-------------------------------------------------------------------------
*/
ha_partition(TABLE * table);
ha_partition(TABLE_SHARE * table);
ha_partition(partition_info * part_info);
~ha_partition();
/*
......
This diff is collapsed.
......@@ -86,6 +86,7 @@
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
#define HA_NO_COPY_ON_ALTER (1 << 31)
/* Flags for partition handlers */
#define HA_CAN_PARTITION (1 << 0) /* Partition support */
......@@ -311,6 +312,7 @@ typedef struct xid_t XID;
struct st_table;
typedef struct st_table TABLE;
typedef struct st_table_share TABLE_SHARE;
struct st_foreign_key_info;
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
typedef bool (stat_print_fn)(THD *thd, const char *type, const char *file,
......@@ -411,7 +413,7 @@ typedef struct
void *(*create_cursor_read_view)();
void (*set_cursor_read_view)(void *);
void (*close_cursor_read_view)(void *);
handler *(*create)(TABLE *table);
handler *(*create)(TABLE_SHARE *table);
void (*drop_database)(char* path);
int (*panic)(enum ha_panic_function flag);
int (*release_temporary_latches)(THD *thd);
......@@ -739,8 +741,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
KEY *key_info,
const key_range *key_spec,
part_id_range *part_spec);
bool mysql_unpack_partition(THD *thd, uchar *part_buf, uint part_info_len,
TABLE* table, enum db_type default_db_type);
bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
uint part_info_len, TABLE *table,
enum db_type default_db_type);
#endif
......@@ -765,7 +768,8 @@ class handler :public Sql_alloc
friend class ha_partition;
#endif
protected:
struct st_table *table; /* The table definition */
struct st_table_share *table_share; /* The table definition */
struct st_table *table; /* The current open table */
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
......@@ -826,8 +830,8 @@ class handler :public Sql_alloc
MY_BITMAP *read_set;
MY_BITMAP *write_set;
handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg),
ht(ht_arg),
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), ht(ht_arg),
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
......@@ -839,16 +843,19 @@ class handler :public Sql_alloc
{}
virtual ~handler(void)
{
ha_deallocate_read_write_set();
/* TODO: DBUG_ASSERT(inited == NONE); */
}
virtual int ha_initialise();
int ha_open(const char *name, int mode, int test_if_locked);
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
void change_table_ptr(TABLE *table_arg) { table=table_arg; }
void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
table= table_arg;
table_share= share;
}
virtual double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
......@@ -1034,7 +1041,6 @@ class handler :public Sql_alloc
}
void ha_set_primary_key_in_read_set();
int ha_allocate_read_write_set(ulong no_fields);
void ha_deallocate_read_write_set();
void ha_clear_all_set();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
......@@ -1408,7 +1414,8 @@ extern ulong total_ha, total_ha_2pc;
/* lookups */
enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type);
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
enum db_type db_type);
enum db_type ha_checktype(THD *thd, enum db_type database_type,
bool no_substitute, bool report_error);
bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag);
......@@ -1422,10 +1429,12 @@ void ha_close_connection(THD* thd);
my_bool ha_storage_engine_is_enabled(enum db_type database_type);
bool ha_flush_logs(enum db_type db_type=DB_TYPE_DEFAULT);
void ha_drop_database(char* path);
int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
bool update_create_info);
int ha_delete_table(THD *thd, enum db_type db_type, const char *path,
const char *alias, bool generate_warning);
const char *db, const char *alias, bool generate_warning);
/* statistics and info */
bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat);
......
This diff is collapsed.
......@@ -689,7 +689,7 @@ class Item {
// used in row subselects to get value of elements
virtual void bring_value() {}
Field *tmp_table_field_from_field_type(TABLE *table);
Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length);
virtual Item_field *filed_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }
......
......@@ -1228,7 +1228,7 @@ enum_field_types Item_func_ifnull::field_type() const
Field *Item_func_ifnull::tmp_table_field(TABLE *table)
{
return tmp_table_field_from_field_type(table);
return tmp_table_field_from_field_type(table, 0);
}
double
......
......@@ -462,7 +462,6 @@ Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
CHARSET_INFO *cs)
{
Item *res;
LINT_INIT(res);
switch (cast_type) {
case ITEM_CAST_BINARY: res= new Item_func_binary(a); break;
......@@ -478,6 +477,10 @@ Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
res= new Item_char_typecast(a, len, cs ? cs :
current_thd->variables.collation_connection);
break;
default:
DBUG_ASSERT(0);
res= 0;
break;
}
return res;
}
......
......@@ -362,41 +362,43 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const
}
Field *Item_func::tmp_table_field(TABLE *t_arg)
Field *Item_func::tmp_table_field(TABLE *table)
{
Field *res;
LINT_INIT(res);
Field *field;
LINT_INIT(field);
switch (result_type()) {
case INT_RESULT:
if (max_length > 11)
res= new Field_longlong(max_length, maybe_null, name, t_arg,
unsigned_flag);
field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
else
res= new Field_long(max_length, maybe_null, name, t_arg,
unsigned_flag);
field= new Field_long(max_length, maybe_null, name, unsigned_flag);
break;
case REAL_RESULT:
res= new Field_double(max_length, maybe_null, name, t_arg, decimals);
field= new Field_double(max_length, maybe_null, name, decimals);
break;
case STRING_RESULT:
res= make_string_field(t_arg);
return make_string_field(table);
break;
case DECIMAL_RESULT:
res= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
decimals,
unsigned_flag),
maybe_null, name, t_arg, decimals, unsigned_flag);
field= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
decimals,
unsigned_flag),
maybe_null, name, decimals, unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be chosen
DBUG_ASSERT(0);
field= 0;
break;
}
return res;
if (field)
field->init(table);
return field;
}
my_decimal *Item_func::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed);
......@@ -4637,7 +4639,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
{
maybe_null= 1;
m_name->init_qname(current_thd);
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
......@@ -4648,9 +4651,11 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
{
maybe_null= 1;
m_name->init_qname(current_thd);
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
void
Item_func_sp::cleanup()
{
......@@ -4705,16 +4710,15 @@ Item_func_sp::sp_result_field(void) const
DBUG_RETURN(0);
}
}
if (!dummy_table->s)
if (!dummy_table->alias)
{
char *empty_name= (char *) "";
TABLE_SHARE *share;
dummy_table->s= share= &dummy_table->share_not_to_be_used;
dummy_table->alias = empty_name;
dummy_table->maybe_null = maybe_null;
dummy_table->alias= empty_name;
dummy_table->maybe_null= maybe_null;
dummy_table->in_use= current_thd;
share->table_cache_key = empty_name;
share->table_name = empty_name;
dummy_table->s->table_cache_key.str = empty_name;
dummy_table->s->table_name.str= empty_name;
dummy_table->s->db.str= empty_name;
}
field= m_sp->make_field(max_length, name, dummy_table);
DBUG_RETURN(field);
......
......@@ -133,6 +133,7 @@ Item_subselect::select_transformer(JOIN *join)
bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
{
char const *save_where= thd_param->where;
uint8 uncacheable;
bool res;
DBUG_ASSERT(fixed == 0);
......@@ -178,15 +179,17 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
fix_length_and_dec();
}
else
return 1;
uint8 uncacheable= engine->uncacheable();
if (uncacheable)
goto err;
if ((uncacheable= engine->uncacheable()))
{
const_item_cache= 0;
if (uncacheable & UNCACHEABLE_RAND)
used_tables_cache|= RAND_TABLE_BIT;
}
fixed= 1;
err:
thd->where= save_where;
return res;
}
......@@ -1797,7 +1800,7 @@ void subselect_uniquesubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
str->append(tab->table->s->table_name);
str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);
......@@ -1815,7 +1818,7 @@ void subselect_indexsubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
str->append(tab->table->s->table_name);
str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);
......
......@@ -143,26 +143,33 @@ bool Item_sum::walk (Item_processor processor, byte *argument)
Field *Item_sum::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
Field *field;
switch (result_type()) {
case REAL_RESULT:
return new Field_double(max_length,maybe_null,name,table,decimals);
field= new Field_double(max_length, maybe_null, name, decimals);
break;
case INT_RESULT:
return new Field_longlong(max_length,maybe_null,name,table,unsigned_flag);
field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
break;
case STRING_RESULT:
if (max_length > 255 && convert_blob_length)
return new Field_varstring(convert_blob_length, maybe_null,
name, table,
collation.collation);
return make_string_field(table);
if (max_length <= 255 || !convert_blob_length)
return make_string_field(table);
field= new Field_varstring(convert_blob_length, maybe_null,
name, table->s, collation.collation);
break;
case DECIMAL_RESULT:
return new Field_new_decimal(max_length, maybe_null, name, table,
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
return 0;
}
if (field)
field->init(table);
return field;
}
......@@ -312,9 +319,10 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
Field *field;
if (args[0]->type() == Item::FIELD_ITEM)
{
Field *field= ((Item_field*) args[0])->field;
field= ((Item_field*) args[0])->field;
if ((field= create_tmp_field_from_field(current_thd, field, name, table,
NULL, convert_blob_length)))
......@@ -328,16 +336,21 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
*/
switch (args[0]->field_type()) {
case MYSQL_TYPE_DATE:
return new Field_date(maybe_null, name, table, collation.collation);
field= new Field_date(maybe_null, name, collation.collation);
break;
case MYSQL_TYPE_TIME:
return new Field_time(maybe_null, name, table, collation.collation);
field= new Field_time(maybe_null, name, collation.collation);
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
return new Field_datetime(maybe_null, name, table, collation.collation);
default:
field= new Field_datetime(maybe_null, name, collation.collation);
break;
default:
return Item_sum::create_tmp_field(group, table, convert_blob_length);
}
return Item_sum::create_tmp_field(group, table, convert_blob_length);
if (field)
field->init(table);
return field;
}
......@@ -839,6 +852,7 @@ Item *Item_sum_avg::copy_or_same(THD* thd)
Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
Field *field;
if (group)
{
/*
......@@ -846,14 +860,18 @@ Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size : sizeof(double)) + sizeof(longlong),
0, name, table, &my_charset_bin);
0, name, &my_charset_bin);
}
if (hybrid_type == DECIMAL_RESULT)
return new Field_new_decimal(max_length, maybe_null, name, table,
else if (hybrid_type == DECIMAL_RESULT)
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
return new Field_double(max_length, maybe_null, name, table, decimals);
else
field= new Field_double(max_length, maybe_null, name, decimals);
if (field)
field->init(table);
return field;
}
......@@ -1018,6 +1036,7 @@ Item *Item_sum_variance::copy_or_same(THD* thd)
Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
Field *field;
if (group)
{
/*
......@@ -1025,15 +1044,19 @@ Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size0 + dec_bin_size1 :
sizeof(double)*2) + sizeof(longlong),
0, name, table, &my_charset_bin);
0, name, &my_charset_bin);
}
if (hybrid_type == DECIMAL_RESULT)
return new Field_new_decimal(max_length, maybe_null, name, table,
else if (hybrid_type == DECIMAL_RESULT)
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
return new Field_double(max_length, maybe_null,name,table,decimals);
else
field= new Field_double(max_length, maybe_null, name, decimals);
if (field)
field->init(table);
return field;
}
......
......@@ -3002,18 +3002,6 @@ get_date_time_result_type(const char *format, uint length)
}
Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg)
{
if (cached_field_type == MYSQL_TYPE_TIME)
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATE)
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATETIME)
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
}
void Item_func_str_to_date::fix_length_and_dec()
{
char format_buff[64];
......
......@@ -340,10 +340,10 @@ class Item_date :public Item_func
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
int save_in_field(Field *to, bool no_conversions);
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
}
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -355,9 +355,9 @@ class Item_date_func :public Item_str_func
Item_date_func(Item *a,Item *b) :Item_str_func(a,b) {}
Item_date_func(Item *a,Item *b, Item *c) :Item_str_func(a,b,c) {}
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -378,9 +378,9 @@ class Item_func_curtime :public Item_func
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
String *val_str(String *str);
void fix_length_and_dec();
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
/*
Abstract method that defines which time zone is used for conversion.
......@@ -618,9 +618,9 @@ class Item_func_sec_to_time :public Item_str_func
}
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
const char *func_name() const { return "sec_to_time"; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -741,9 +741,9 @@ class Item_date_typecast :public Item_typecast_maybe_null
bool get_date(TIME *ltime, uint fuzzy_date);
const char *cast_type() const { return "date"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
void fix_length_and_dec()
{
......@@ -763,9 +763,9 @@ class Item_time_typecast :public Item_typecast_maybe_null
bool get_time(TIME *ltime);
const char *cast_type() const { return "time"; }
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -778,9 +778,9 @@ class Item_datetime_typecast :public Item_typecast_maybe_null
String *val_str(String *str);
const char *cast_type() const { return "datetime"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -796,9 +796,9 @@ class Item_func_makedate :public Item_str_func
decimals=0;
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -816,18 +816,9 @@ class Item_func_add_time :public Item_str_func
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
/*
TODO:
Change this when we support
microseconds in TIME/DATETIME
*/
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
if (cached_field_type == MYSQL_TYPE_TIME)
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
else if (cached_field_type == MYSQL_TYPE_DATETIME)
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
void print(String *str);
const char *func_name() const { return "add_time"; }
......@@ -847,9 +838,9 @@ class Item_func_timediff :public Item_str_func
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null= 1;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -866,9 +857,9 @@ class Item_func_maketime :public Item_str_func
decimals=0;
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
......@@ -942,7 +933,10 @@ class Item_func_str_to_date :public Item_str_func
const char *func_name() const { return "str_to_date"; }
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
Field *tmp_table_field(TABLE *t_arg);
Field *tmp_table_field(TABLE *table)
{
return tmp_table_field_from_field_type(table, 1);
}
};
......
......@@ -25,5 +25,8 @@
Field *Item_sum_unique_users::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
return new Field_long(9,maybe_null,name,table,1);
Field *field= new Field_long(9, maybe_null, name, 1);
if (field)
field->init(table);
return field;
}
......@@ -28,7 +28,7 @@
** Used when calculating key for NEXT_NUMBER
*/
int find_ref_key(TABLE *table,Field *field, uint *key_length)
int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length)
{
reg2 int i;
reg3 KEY *key_info;
......@@ -38,8 +38,8 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length)
/* Test if some key starts as fieldpos */
for (i= 0, key_info= table->key_info ;
i < (int) table->s->keys ;
for (i= 0, key_info= key ;
i < (int) key_count ;
i++, key_info++)
{
if (key_info->key_part[0].offset == fieldpos)
......@@ -50,8 +50,8 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length)
}
/* Test if some key contains fieldpos */
for (i= 0, key_info= table->key_info ;
i < (int) table->s->keys ;
for (i= 0, key_info= key;
i < (int) key_count ;
i++, key_info++)
{
uint j;
......
......@@ -357,12 +357,15 @@ void mysql_lock_abort(THD *thd, TABLE *table)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
DBUG_ENTER("mysql_lock_abort");
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
thr_abort_locks(locked->locks[i]->lock);
my_free((gptr) locked,MYF(0));
}
DBUG_VOID_RETURN;
}
......@@ -482,8 +485,8 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE &&
count != 1)
{
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db,
table_ptr[i]->s->table_name);
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db.str,
table_ptr[i]->s->table_name.str);
DBUG_RETURN(0);
}
}
......@@ -610,32 +613,35 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list)
DBUG_ENTER("lock_table_name");
DBUG_PRINT("enter",("db: %s name: %s", db, table_list->table_name));
safe_mutex_assert_owner(&LOCK_open);
key_length=(uint) (strmov(strmov(key,db)+1,table_list->table_name)
-key)+ 1;
key_length= create_table_def_key(thd, key, table_list, 0);
/* Only insert the table if we haven't insert it already */
for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ;
table ;
table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length))
{
if (table->in_use == thd)
{
DBUG_PRINT("info", ("Table is in use"));
table->s->version= 0; // Ensure no one can use this
table->locked_by_name= 1;
DBUG_RETURN(0);
}
}
/*
Create a table entry with the right key and with an old refresh version
Note that we must use my_malloc() here as this is freed by the table
cache
*/
if (!(table= (TABLE*) my_malloc(sizeof(*table)+key_length,
MYF(MY_WME | MY_ZEROFILL))))
if (!(table= (TABLE*) my_malloc(sizeof(*table)+ sizeof(TABLE_SHARE)+
key_length, MYF(MY_WME | MY_ZEROFILL))))
DBUG_RETURN(-1);
table->s= &table->share_not_to_be_used;
memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length);
table->s->db= table->s->table_cache_key;
table->s->key_length=key_length;
table->in_use=thd;
table->s= (TABLE_SHARE*) (table+1);
memcpy((table->s->table_cache_key.str= (char*) (table->s+1)), key,
key_length);
table->s->table_cache_key.length= key_length;
table->s->tmp_table= INTERNAL_TMP_TABLE; // for intern_close_table
table->in_use= thd;
table->locked_by_name=1;
table_list->table=table;
......@@ -665,8 +671,17 @@ static bool locked_named_table(THD *thd, TABLE_LIST *table_list)
{
for (; table_list ; table_list=table_list->next_local)
{
if (table_list->table && table_is_used(table_list->table,0))
return 1;
TABLE *table= table_list->table;
if (table)
{
TABLE *save_next= table->next;
bool result;
table->next= 0;
result= table_is_used(table_list->table, 0);
table->next= save_next;
if (result)
return 1;
}
}
return 0; // All tables are locked
}
......@@ -676,6 +691,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
{
bool result=0;
DBUG_ENTER("wait_for_locked_table_names");
safe_mutex_assert_owner(&LOCK_open);
while (locked_named_table(thd,table_list))
......@@ -685,7 +701,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
result=1;
break;
}
wait_for_refresh(thd);
wait_for_condition(thd, &LOCK_open, &COND_refresh);
pthread_mutex_lock(&LOCK_open);
}
DBUG_RETURN(result);
......@@ -1037,5 +1053,3 @@ bool make_global_read_lock_block_commit(THD *thd)
thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock
DBUG_RETURN(error);
}
......@@ -602,8 +602,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables,
bool if_exists, bool drop_temporary,
bool log_query);
int quick_rm_table(enum db_type base,const char *db,
const char *table_name);
bool quick_rm_table(enum db_type base,const char *db,
const char *table_name);
void close_cached_table(THD *thd, TABLE *table);
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list);
bool mysql_change_db(THD *thd,const char *name,bool no_access_check);
......@@ -634,7 +634,10 @@ bool check_dup(const char *db, const char *name, TABLE_LIST *tables);
bool table_cache_init(void);
void table_cache_free(void);
uint cached_tables(void);
bool table_def_init(void);
void table_def_free(void);
uint cached_open_tables(void);
uint cached_table_definitions(void);
void kill_mysql(void);
void close_connection(THD *thd, uint errcode, bool lock);
bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
......@@ -781,15 +784,18 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
bool reset_auto_increment);
bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create);
uint create_table_def_key(THD *thd, byte *key, TABLE_LIST *table_list,
bool tmp_table);
TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, byte *key,
uint key_length, uint db_flags, int *error);
void release_table_share(TABLE_SHARE *share, enum release_type type);
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update);
TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem,
bool *refresh, uint flags);
bool reopen_name_locked_table(THD* thd, TABLE_LIST* table);
TABLE *find_locked_table(THD *thd, const char *db,const char *table_name);
bool reopen_table(TABLE *table,bool locked);
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh);
void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
bool send_refresh);
bool close_data_tables(THD *thd,const char *db, const char *table_name);
bool wait_for_tables(THD *thd);
bool table_is_used(TABLE *table, bool wait_for_name_lock);
......@@ -986,7 +992,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
COND **conds);
int setup_ftfuncs(SELECT_LEX* select);
int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order);
void wait_for_refresh(THD *thd);
void wait_for_condition(THD *thd, pthread_mutex_t *mutex,
pthread_cond_t *cond);
int open_tables(THD *thd, TABLE_LIST **tables, uint *counter, uint flags);
int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables);
bool open_and_lock_tables(THD *thd,TABLE_LIST *tables);
......@@ -1005,9 +1012,12 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table,
const char *db_name,
const char *table_name);
TABLE_LIST *unique_table(TABLE_LIST *table, TABLE_LIST *table_list);
TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name);
bool close_temporary_table(THD *thd, const char *db, const char *table_name);
void close_temporary(TABLE *table, bool delete_table);
TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name);
TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list);
bool close_temporary_table(THD *thd, TABLE_LIST *table_list);
void close_temporary_table(THD *thd, TABLE *table, bool free_share,
bool delete_table);
void close_temporary(TABLE *table, bool free_share, bool delete_table);
bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
void remove_db_from_cache(const char *db);
......@@ -1086,7 +1096,7 @@ void print_plan(JOIN* join, double read_time, double record_count,
#endif
void mysql_print_status();
/* key.cc */
int find_ref_key(TABLE *form,Field *field, uint *offset);
int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length);
void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length);
void key_restore(byte *to_record, byte *from_key, KEY *key_info,
uint key_length);
......@@ -1176,7 +1186,7 @@ extern ulong delayed_rows_in_use,delayed_insert_errors;
extern ulong slave_open_temp_tables;
extern ulong query_cache_size, query_cache_min_res_unit;
extern ulong slow_launch_threads, slow_launch_time;
extern ulong table_cache_size;
extern ulong table_cache_size, table_def_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
......@@ -1371,23 +1381,36 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
void unireg_init(ulong options);
void unireg_end(void);
bool mysql_create_frm(THD *thd, my_string file_name,
bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info,handler *db_type);
int rea_create_table(THD *thd, my_string file_name,
const char *db, const char *table,
int rea_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info, handler *file);
List<create_field> &create_field,
uint key_count,KEY *key_info,
handler *file);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
my_string *errpos);
/* table.cc */
TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, byte *key,
uint key_length);
void init_tmp_table_share(TABLE_SHARE *share, const char *key, uint key_length,
const char *table_name, const char *path);
void free_table_share(TABLE_SHARE *share);
int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags,
TABLE *outparam);
int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
uint prgflag, uint ha_open_flags, TABLE *outparam);
int readfrm(const char *name, const void** data, uint* length);
int writefrm(const char* name, const void* data, uint len);
int closefrm(TABLE *table);
int closefrm(TABLE *table, bool free_share);
int read_string(File file, gptr *to, uint length);
void free_blobs(TABLE *table);
int set_zone(int nr,int min_zone,int max_zone);
......@@ -1446,8 +1469,8 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
const char *newname);
ulong next_io_size(ulong pos);
void append_unescaped(String *res, const char *pos, uint length);
int create_frm(THD *thd, char *name, const char *db, const char *table,
uint reclength,uchar *fileinfo,
int create_frm(THD *thd, const char *name, const char *db, const char *table,
uint reclength, uchar *fileinfo,
HA_CREATE_INFO *create_info, uint keys);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
int rename_file_ext(const char * from,const char * to,const char * ext);
......
......@@ -447,7 +447,8 @@ uint tc_heuristic_recover= 0;
uint volatile thread_count, thread_running;
ulonglong thd_startup_options;
ulong back_log, connect_timeout, concurrency, server_id;
ulong table_cache_size, thread_stack, what_to_log;
ulong table_cache_size, table_def_size;
ulong thread_stack, what_to_log;
ulong query_buff_size, slow_launch_time, slave_open_temp_tables;
ulong open_files_limit, max_binlog_size, max_relay_log_size;
ulong slave_net_timeout, slave_trans_retries;
......@@ -1113,6 +1114,7 @@ void clean_up(bool print_message)
#endif
query_cache_destroy();
table_cache_free();
table_def_free();
hostname_cache_free();
item_user_lock_free();
lex_free(); /* Free some memory */
......@@ -1411,7 +1413,7 @@ static void network_init(void)
struct sockaddr_un UNIXaddr;
#endif
int arg=1;
DBUG_ENTER("server_init");
DBUG_ENTER("network_init");
set_ports();
......@@ -2775,7 +2777,7 @@ static int init_thread_environment()
{
(void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_open,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_open, NULL);
(void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST);
......@@ -2937,7 +2939,11 @@ static void init_ssl()
static int init_server_components()
{
DBUG_ENTER("init_server_components");
if (table_cache_init() || hostname_cache_init())
/*
We need to call each of these following functions to ensure that
all things are initialized so that unireg_abort() doesn't fail
*/
if (table_cache_init() | table_def_init() | hostname_cache_init())
unireg_abort(1);
query_cache_result_size_limit(query_cache_limit);
......@@ -3379,9 +3385,7 @@ int main(int argc, char **argv)
*/
check_data_home(mysql_real_data_home);
if (my_setwd(mysql_real_data_home,MYF(MY_WME)))
{
unireg_abort(1); /* purecov: inspected */
}
mysql_data_home= mysql_data_home_buff;
mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
mysql_data_home[1]=0;
......@@ -3396,7 +3400,6 @@ int main(int argc, char **argv)
set_user(mysqld_user, user_info);
}
if (opt_bin_log && !server_id)
{
server_id= !master_host ? 1 : 2;
......@@ -3418,7 +3421,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
}
if (init_server_components())
exit(1);
unireg_abort(1);
network_init();
......@@ -3594,8 +3597,8 @@ static char *add_quoted_string(char *to, const char *from, char *to_end)
uint length= (uint) (to_end-to);
if (!strchr(from, ' '))
return strnmov(to, from, length);
return strxnmov(to, length, "\"", from, "\"", NullS);
return strmake(to, from, length-1);
return strxnmov(to, length-1, "\"", from, "\"", NullS);
}
......@@ -4563,7 +4566,7 @@ enum options_mysqld
OPT_RELAY_LOG_PURGE,
OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME,
OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING,
OPT_SORT_BUFFER, OPT_TABLE_CACHE,
OPT_SORT_BUFFER, OPT_TABLE_OPEN_CACHE, OPT_TABLE_DEF_CACHE,
OPT_THREAD_CONCURRENCY, OPT_THREAD_CACHE_SIZE,
OPT_TMP_TABLE_SIZE, OPT_THREAD_STACK,
OPT_WAIT_TIMEOUT, OPT_MYISAM_REPAIR_THREADS,
......@@ -5952,13 +5955,21 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.sync_replication_timeout,
0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0},
#endif /* HAVE_REPLICATION */
{"table_cache", OPT_TABLE_CACHE,
"The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0},
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
"seconds to wait for a table level lock before returning an error. Used"
" only if the connection has active cursors.",
{"table_cache", OPT_TABLE_OPEN_CACHE,
"Deprecated; use --table_open_cache instead.",
(gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 64, 1, 512*1024L, 0, 1, 0},
{"table_definition_cache", OPT_TABLE_DEF_CACHE,
"The number of cached table definitions.",
(gptr*) &table_def_size, (gptr*) &table_def_size,
0, GET_ULONG, REQUIRED_ARG, 128, 1, 512*1024L, 0, 1, 0},
{"table_open_cache", OPT_TABLE_OPEN_CACHE,
"The number of cached open tables.",
(gptr*) &table_cache_size, (gptr*) &table_cache_size,
0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, 0, 1, 0},
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT,
"Timeout in seconds to wait for a table level lock before returning an "
"error. Used only if the connection has active cursors.",
(gptr*) &table_lock_wait_timeout, (gptr*) &table_lock_wait_timeout,
0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
......@@ -6158,7 +6169,8 @@ struct show_var_st status_vars[]= {
{"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST},
{"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST},
{"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST},
{"Open_tables", (char*) 0, SHOW_OPENTABLES},
{"Open_table_definitions", (char*) 0, SHOW_TABLE_DEFINITIONS},
{"Open_tables", (char*) 0, SHOW_OPEN_TABLES},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST},
......@@ -6989,6 +7001,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case OPT_MYISAM_STATS_METHOD:
{
ulong method_conv;
LINT_INIT(method_conv);
myisam_stats_method_str= argument;
int method;
if ((method=find_type(argument, &myisam_stats_method_typelib, 2)) <= 0)
......
......@@ -910,6 +910,7 @@ int QUICK_ROR_INTERSECT_SELECT::init()
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
handler *save_file= file;
THD *thd;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
if (reuse_handler)
......@@ -931,11 +932,12 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
}
THD *thd= current_thd;
if (!(file= get_new_handler(head, thd->mem_root, head->s->db_type)))
thd= head->in_use;
if (!(file= get_new_handler(head->s, thd->mem_root, head->s->db_type)))
goto failure;
DBUG_PRINT("info", ("Allocated new handler %p", file));
if (file->ha_open(head->s->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED))
if (file->ha_open(head, head->s->normalized_path.str, head->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
{
/* Caller will free the memory */
goto failure;
......@@ -6201,6 +6203,14 @@ int QUICK_RANGE_SELECT::reset()
multi_range_buff->buffer= mrange_buff;
multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz;
multi_range_buff->end_of_used_area= mrange_buff;
#ifdef HAVE_purify
/*
We need this until ndb will use the buffer efficiently
(Now ndb stores complete row in here, instead of only the used fields
which gives us valgrind warnings in compare_record[])
*/
bzero((char*) mrange_buff, mrange_bufsiz);
#endif
}
DBUG_RETURN(0);
}
......
......@@ -355,11 +355,11 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
{
char old_path[FN_REFLEN], new_path[FN_REFLEN], arc_path[FN_REFLEN];
strxnmov(old_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
strxnmov(old_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/",
old_name, reg_ext, NullS);
(void) unpack_filename(old_path, old_path);
strxnmov(new_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
strxnmov(new_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/",
new_name, reg_ext, NullS);
(void) unpack_filename(new_path, new_path);
......@@ -367,7 +367,7 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
return 1;
/* check if arc_dir exists */
strxnmov(arc_path, FN_REFLEN, mysql_data_home, "/", schema, "/arc", NullS);
strxnmov(arc_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/arc", NullS);
(void) unpack_filename(arc_path, arc_path);
if (revision > 0 && !access(arc_path, F_OK))
......@@ -414,7 +414,7 @@ sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root,
char *end, *sign;
File_parser *parser;
File file;
DBUG_ENTER("sql__parse_prepare");
DBUG_ENTER("sql_parse_prepare");
if (!my_stat(file_name->str, &stat_info, MYF(MY_WME)))
{
......
......@@ -421,7 +421,9 @@ sys_var_thd_ulong sys_sync_replication_timeout(
&SV::sync_replication_timeout);
#endif
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_long_ptr sys_table_cache_size("table_cache",
sys_var_long_ptr sys_table_def_size("table_definition_cache",
&table_def_size);
sys_var_long_ptr sys_table_cache_size("table_open_cache",
&table_cache_size);
sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout",
&table_lock_wait_timeout);
......@@ -877,7 +879,8 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_TZNAME
{"system_time_zone", system_time_zone, SHOW_CHAR},
#endif
{"table_cache", (char*) &table_cache_size, SHOW_LONG},
{"table_definition_cache", (char*) &table_def_size, SHOW_LONG},
{"table_open_cache", (char*) &table_cache_size, SHOW_LONG},
{"table_lock_wait_timeout", (char*) &table_lock_wait_timeout, SHOW_LONG },
{sys_table_type.name, (char*) &sys_table_type, SHOW_SYS},
{sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS},
......
......@@ -513,7 +513,7 @@ void st_relay_log_info::close_temporary_tables()
Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them.
*/
close_temporary(table, 0);
close_temporary(table, 1, 0);
}
save_temporary_tables= 0;
slave_open_temp_tables= 0;
......@@ -1296,7 +1296,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
error=file->repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
if (error)
my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name);
my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name.str);
err:
close_thread_tables(thd);
......
......@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "mysql_priv.h"
#include "sp.h"
#include "sp_head.h"
......@@ -463,10 +462,12 @@ static void
sp_returns_type(THD *thd, String &result, sp_head *sp)
{
TABLE table;
TABLE_SHARE share;
Field *field;
bzero(&table, sizeof(table));
bzero((char*) &table, sizeof(table));
bzero((char*) &share, sizeof(share));
table.in_use= thd;
table.s = &table.share_not_to_be_used;
table.s = &share;
field= sp->make_field(0, 0, &table);
field->sql_type(result);
delete field;
......
......@@ -243,6 +243,10 @@ sp_eval_func_item(THD *thd, Item **it_addr, enum enum_field_types type,
Item *old_item_next, *old_free_list, **p_free_list;
DBUG_PRINT("info", ("type: %d", type));
LINT_INIT(old_item_next);
LINT_INIT(old_free_list);
LINT_INIT(p_free_list);
if (!it)
DBUG_RETURN(NULL);
......@@ -518,7 +522,7 @@ void
sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
{
DBUG_ENTER("sp_head::init_strings");
uchar *endp; /* Used to trim the end */
const uchar *endp; /* Used to trim the end */
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
......@@ -711,12 +715,14 @@ sp_head::make_field(uint max_length, const char *name, TABLE *dummy)
Field *field;
DBUG_ENTER("sp_head::make_field");
field= ::make_field((char *)0,
!m_returns_len ? max_length : m_returns_len,
(uchar *)"", 0, m_returns_pack, m_returns, m_returns_cs,
m_geom_returns, Field::NONE,
m_returns_typelib,
name ? name : (const char *)m_name.str, dummy);
field= ::make_field(dummy->s, (char *)0,
!m_returns_len ? max_length : m_returns_len,
(uchar *)"", 0, m_returns_pack, m_returns, m_returns_cs,
m_geom_returns, Field::NONE,
m_returns_typelib,
name ? name : (const char *)m_name.str);
if (field)
field->init(dummy);
DBUG_RETURN(field);
}
......
......@@ -129,7 +129,7 @@ class sp_head :private Query_arena
TYPELIB *m_returns_typelib; // For FUNCTIONs only
uint m_returns_len; // For FUNCTIONs only
uint m_returns_pack; // For FUNCTIONs only
uchar *m_tmp_query; // Temporary pointer to sub query string
const uchar *m_tmp_query; // Temporary pointer to sub query string
uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value
st_sp_chistics *m_chistics;
ulong m_sql_mode; // For SHOW CREATE and execution
......@@ -178,7 +178,7 @@ class sp_head :private Query_arena
*/
HASH m_sroutines;
// Pointers set during parsing
uchar *m_param_begin, *m_param_end, *m_body_begin;
const uchar *m_param_begin, *m_param_end, *m_body_begin;
/*
Security context for stored routine which should be run under
......
......@@ -2220,10 +2220,10 @@ void free_grant_table(GRANT_TABLE *grant_table)
/* Search after a matching grant. Prefer exact grants before not exact ones */
static GRANT_NAME *name_hash_search(HASH *name_hash,
const char *host,const char* ip,
const char *db,
const char *user, const char *tname,
bool exact)
const char *host,const char* ip,
const char *db,
const char *user, const char *tname,
bool exact)
{
char helping [NAME_LEN*2+USERNAME_LENGTH+3];
uint len;
......@@ -4680,7 +4680,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
by the searched record, if it exists.
*/
DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'",
table->s->table_name, user_str, host_str));
table->s->table_name.str, user_str, host_str));
host_field->store(host_str, user_from->host.length, system_charset_info);
user_field->store(user_str, user_from->user.length, system_charset_info);
......@@ -4723,7 +4723,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
{
#ifdef EXTRA_DEBUG
DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'",
table->s->table_name, user_str, host_str));
table->s->table_name.str, user_str, host_str));
#endif
while ((error= table->file->rnd_next(table->record[0])) !=
HA_ERR_END_OF_FILE)
......
This diff is collapsed.
This diff is collapsed.
......@@ -664,7 +664,8 @@ void THD::add_changed_table(TABLE *table)
DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
table->file->has_transactions());
add_changed_table(table->s->table_cache_key, table->s->key_length);
add_changed_table(table->s->table_cache_key.str,
table->s->table_cache_key.length);
DBUG_VOID_RETURN;
}
......@@ -1059,7 +1060,8 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange,
if (!dirname_length(exchange->file_name))
{
strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "", NullS);
strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
NullS);
(void) fn_format(path, exchange->file_name, path, "", option);
}
else
......
......@@ -628,6 +628,7 @@ typedef struct system_status_var
ulong net_big_packet_count;
ulong opened_tables;
ulong opened_shares;
ulong select_full_join_count;
ulong select_full_range_join_count;
ulong select_range_count;
......
......@@ -272,7 +272,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
ulong length;
length= (ulong) (strxnmov(buf, sizeof(buf), "default-character-set=",
length= (ulong) (strxnmov(buf, sizeof(buf)-1, "default-character-set=",
create->default_table_charset->csname,
"\ndefault-collation=",
create->default_table_charset->name,
......
This diff is collapsed.
......@@ -179,8 +179,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list)
}
orig_table_list->derived_result= derived_result;
orig_table_list->table= table;
orig_table_list->table_name= (char*) table->s->table_name;
orig_table_list->table_name_length= strlen((char*)table->s->table_name);
orig_table_list->table_name= table->s->table_name.str;
orig_table_list->table_name_length= table->s->table_name.length;
table->derived_select_number= first_select->select_number;
table->s->tmp_table= TMP_TABLE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -286,7 +286,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#endif
if (!dirname_length(ex->file_name))
{
strxnmov(name, FN_REFLEN, mysql_real_data_home, tdb, NullS);
strxnmov(name, FN_REFLEN-1, mysql_real_data_home, tdb, NullS);
(void) fn_format(name, ex->file_name, name, "",
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1183,7 +1183,8 @@ multi_update::initialize_tables(JOIN *join)
/* ok to be on stack as this is not referenced outside of this func */
Field_string offset(table->file->ref_length, 0, "offset",
table, &my_charset_bin);
&my_charset_bin);
offset.init(table);
if (!(ifield= new Item_field(((Field *) &offset))))
DBUG_RETURN(1);
ifield->maybe_null= 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment