Commit 20b09855 authored by unknown's avatar unknown

Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-5.1

into  a193-229-222-105.elisa-laajakaista.fi:/home/my/bk/mysql-5.1


BitKeeper/etc/config:
  Auto merged
sql/field.cc:
  Auto merged
sql/ha_berkeley.cc:
  Auto merged
sql/ha_berkeley.h:
  Auto merged
sql/ha_heap.cc:
  Auto merged
sql/ha_heap.h:
  Auto merged
sql/ha_innodb.cc:
  Auto merged
sql/ha_innodb.h:
  Auto merged
sql/ha_myisam.cc:
  Auto merged
sql/ha_myisam.h:
  Auto merged
sql/ha_myisammrg.cc:
  Auto merged
sql/ha_myisammrg.h:
  Auto merged
sql/ha_ndbcluster.h:
  Auto merged
sql/handler.h:
  Auto merged
sql/mysql_priv.h:
  Auto merged
sql/mysqld.cc:
  Auto merged
sql/set_var.cc:
  Auto merged
sql/set_var.h:
  Auto merged
sql/sql_class.h:
  Auto merged
sql/sql_lex.h:
  Auto merged
sql/sql_table.cc:
  Auto merged
sql/ha_ndbcluster.cc:
  Merge.
sql/field.h:
  Auto merged
parents 96c05032 f1e25513
......@@ -73,3 +73,8 @@ hours:
[nick:]checkout:get
checkout:edit
eoln:unix
license: BKL5433d4e6925a06a150001200fff9b
licsign1: YgAAAo0AAAADgAAAAEYUtZil1XCmH6z+LTlQMDJ+1ZeBLIgtHo1azUxQ8/8G1JuW
licsign2: fxW3y9raSlpYVAleJSaBDKYiVtEuSdaUN2ILLo6Wc8TJmLl0aprUy7Lh/m/Sq/YC
licsign3: 0H7qah3bdItuw7NGNSLfBzigbKOF6kPbU84VlAUhOqLR2e5Zf32SBZhtCYGA
......@@ -537,3 +537,57 @@ create table t1 ( a timestamp );
alter table t1 add unique ( a(1) );
ERROR HY000: Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys
drop table t1;
create table t1 (v varchar(32));
insert into t1 values ('def'),('abc'),('hij'),('3r4f');
select * from t1;
v
def
abc
hij
3r4f
alter table t1 change v v2 varchar(32);
select * from t1;
v2
def
abc
hij
3r4f
alter table t1 change v2 v varchar(64);
select * from t1;
v
def
abc
hij
3r4f
update t1 set v = 'lmn' where v = 'hij';
select * from t1;
v
def
abc
lmn
3r4f
alter table t1 add i int auto_increment not null primary key first;
select * from t1;
i v
1 def
2 abc
3 lmn
4 3r4f
update t1 set i=5 where i=3;
select * from t1;
i v
1 def
2 abc
5 lmn
4 3r4f
alter table t1 change i i bigint;
select * from t1;
i v
1 def
2 abc
5 lmn
4 3r4f
alter table t1 add unique key (i, v);
select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
i v
4 3r4f
......@@ -360,3 +360,30 @@ create table t1 ( a timestamp );
--error 1089
alter table t1 add unique ( a(1) );
drop table t1;
#
# Some additional tests for new, faster alter table.
# Note that most of the whole alter table code is being
# tested all around the test suite already.
#
create table t1 (v varchar(32));
insert into t1 values ('def'),('abc'),('hij'),('3r4f');
select * from t1;
# Fast alter, no copy performed
alter table t1 change v v2 varchar(32);
select * from t1;
# Fast alter, no copy performed
alter table t1 change v2 v varchar(64);
select * from t1;
update t1 set v = 'lmn' where v = 'hij';
select * from t1;
# Regular alter table
alter table t1 add i int auto_increment not null primary key first;
select * from t1;
update t1 set i=5 where i=3;
select * from t1;
alter table t1 change i i bigint;
select * from t1;
alter table t1 add unique key (i, v);
select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
......@@ -67,6 +67,7 @@ inline int field_type2index (enum_field_types field_type)
((int)FIELDTYPE_TEAR_FROM) + (field_type - FIELDTYPE_TEAR_TO) - 1);
}
static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
{
/* MYSQL_TYPE_DECIMAL -> */
......@@ -5906,6 +5907,26 @@ int Field_str::store(double nr)
}
uint Field::is_equal(create_field *new_field)
{
return (new_field->sql_type == type());
}
uint Field_str::is_equal(create_field *new_field)
{
if (((new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) &&
!(flags & (BINCMP_FLAG | BINARY_FLAG))) ||
(!(new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) &&
(flags & (BINCMP_FLAG | BINARY_FLAG))))
return 0; /* One of the fields is binary and the other one isn't */
return ((new_field->sql_type == type()) &&
new_field->charset == field_charset &&
new_field->length == max_length());
}
int Field_string::store(longlong nr)
{
char buff[64];
......@@ -6665,6 +6686,22 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
}
uint Field_varstring::is_equal(create_field *new_field)
{
if (new_field->sql_type == type() &&
new_field->charset == field_charset)
{
if (new_field->length == max_length())
return IS_EQUAL_YES;
if (new_field->length > max_length() &&
((new_field->length <= 255 && max_length() <= 255) ||
(new_field->length > 255 && max_length() > 255)))
return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length
}
return IS_EQUAL_NO;
}
/****************************************************************************
** blob type
** A blob is saved as a length and a pointer. The length is stored in the
......@@ -7774,6 +7811,17 @@ bool Field_num::eq_def(Field *field)
}
uint Field_num::is_equal(create_field *new_field)
{
return ((new_field->sql_type == type()) &&
((new_field->flags & UNSIGNED_FLAG) == (uint) (flags &
UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
(new_field->length >= max_length()));
}
/*
Bit field.
......
......@@ -29,6 +29,7 @@
class Send_field;
class Protocol;
class create_field;
struct st_cache_field;
void field_conv(Field *to,Field *from);
......@@ -315,6 +316,8 @@ public:
int warn_if_overflow(int op_result);
/* maximum possible display length */
virtual uint32 max_length()= 0;
virtual uint is_equal(create_field *new_field);
/* convert decimal to longlong with overflow check */
longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag,
int *err);
......@@ -355,6 +358,7 @@ public:
bool eq_def(Field *field);
int store_decimal(const my_decimal *);
my_decimal *val_decimal(my_decimal *);
uint is_equal(create_field *new_field);
};
......@@ -379,6 +383,7 @@ public:
uint32 max_length() { return field_length; }
friend class create_field;
my_decimal *val_decimal(my_decimal *);
uint is_equal(create_field *new_field);
};
......@@ -1097,6 +1102,7 @@ public:
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
uint is_equal(create_field *new_field);
};
......
......@@ -2653,4 +2653,14 @@ int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2)
return 0;
}
bool ha_berkeley::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
if (table_changes < IS_EQUAL_YES)
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
#endif /* HAVE_BERKELEY_DB */
......@@ -152,6 +152,7 @@ class ha_berkeley: public handler
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
bool primary_key_is_clustered() { return true; }
int cmp_ref(const byte *ref1, const byte *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};
extern bool berkeley_shared_data;
......
......@@ -629,3 +629,15 @@ ulonglong ha_heap::get_auto_increment()
ha_heap::info(HA_STATUS_AUTO);
return auto_increment_value;
}
bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
/* Check that auto_increment value was not changed */
if ((table_changes != IS_EQUAL_YES &&
info->used_fields & HA_CREATE_USED_AUTO) &&
info->auto_increment_value != 0)
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
......@@ -104,6 +104,7 @@ public:
HEAP_PTR ptr2=*(HEAP_PTR*)ref2;
return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0);
}
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
private:
void update_key_stats();
};
......@@ -7132,4 +7132,24 @@ innobase_rollback_by_xid(
}
}
bool ha_innobase::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
if (table_changes != IS_EQUAL_YES)
return COMPATIBLE_DATA_NO;
/* Check that auto_increment value was not changed */
if ((info->used_fields & HA_CREATE_USED_AUTO) &&
info->auto_increment_value != 0)
return COMPATIBLE_DATA_NO;
/* Check that row format didn't change */
if ((info->used_fields & HA_CREATE_USED_AUTO) &&
get_row_type() != info->row_type)
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
#endif /* HAVE_INNOBASE_DB */
......@@ -202,6 +202,8 @@ class ha_innobase: public handler
static ulonglong get_mysql_bin_log_pos();
bool primary_key_is_clustered() { return true; }
int cmp_ref(const byte *ref1, const byte *ref2);
bool ha_innobase::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
};
extern struct show_var_st innodb_status_variables[];
......
......@@ -1697,3 +1697,25 @@ uint ha_myisam::checksum() const
return (uint)file->s->state.checksum;
}
bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
uint options= table->s->db_options_in_use;
if (info->auto_increment_value != auto_increment_value ||
info->raid_type != raid_type ||
info->raid_chunks != raid_chunks ||
info->raid_chunksize != raid_chunksize ||
info->data_file_name != data_file_name ||
info->index_file_name != index_file_name ||
table_changes == IS_EQUAL_NO)
return COMPATIBLE_DATA_NO;
if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)) !=
(info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)))
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
......@@ -123,6 +123,7 @@ class ha_myisam: public handler
int backup(THD* thd, HA_CHECK_OPT* check_opt);
int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
#ifdef HAVE_REPLICATION
int dump(THD* thd, int fd);
int net_read_dump(NET* net);
......
......@@ -515,3 +515,14 @@ void ha_myisammrg::append_create_info(String *packet)
}
packet->append(')');
}
bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
/*
For myisammrg, we should always re-generate the mapping file as this
is trivial to do
*/
return COMPATIBLE_DATA_NO;
}
......@@ -82,4 +82,5 @@ class ha_myisammrg: public handler
void update_create_info(HA_CREATE_INFO *create_info);
void append_create_info(String *packet);
MYRG_INFO *myrg_info() { return file; }
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};
......@@ -7498,4 +7498,27 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
tab.setNodeGroupIds(&node_group, no_fragments);
DBUG_VOID_RETURN;
}
bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
return COMPATIBLE_DATA_NO;
if (table_changes != IS_EQUAL_YES)
return COMPATIBLE_DATA_NO;
/* Check that auto_increment value was not changed */
if ((info->used_fields & HA_CREATE_USED_AUTO) &&
info->auto_increment_value != 0)
return COMPATIBLE_DATA_NO;
/* Check that row format didn't change */
if ((info->used_fields & HA_CREATE_USED_AUTO) &&
get_row_type() != info->row_type)
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
#endif /* HAVE_NDBCLUSTER_DB */
......@@ -540,6 +540,10 @@ static void set_tabname(const char *pathname, char *tabname);
uint key_length,
qc_engine_callback *engine_callback,
ulonglong *engine_data);
bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
private:
int alter_table_name(const char *to);
int drop_table();
......
......@@ -226,6 +226,9 @@ typedef ulonglong my_xid; // this line is the same as in log_event.h
#define MAXGTRIDSIZE 64
#define MAXBQUALSIZE 64
#define COMPATIBLE_DATA_YES 0
#define COMPATIBLE_DATA_NO 1
struct xid_t {
long formatID;
long gtrid_length;
......@@ -1258,6 +1261,9 @@ public:
Pops the top if condition stack, if stack is not empty
*/
virtual void cond_pop() { return; };
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
};
/* Some extern variables used with handlers */
......
......@@ -392,6 +392,13 @@ void debug_sync_point(const char* lock_name, uint lock_timeout);
#define STRING_BUFFER_USUAL_SIZE 80
/*
Some defines for exit codes for ::is_equal class functions.
*/
#define IS_EQUAL_NO 0
#define IS_EQUAL_YES 1
#define IS_EQUAL_PACK_LENGTH 2
enum enum_parsing_place
{
NO_MATTER,
......
......@@ -4341,6 +4341,7 @@ enum options_mysqld
OPT_ENABLE_SHARED_MEMORY,
OPT_SHARED_MEMORY_BASE_NAME,
OPT_OLD_PASSWORDS,
OPT_OLD_ALTER_TABLE,
OPT_EXPIRE_LOGS_DAYS,
OPT_GROUP_CONCAT_MAX_LEN,
OPT_DEFAULT_COLLATION,
......@@ -4877,6 +4878,11 @@ Disable with --skip-ndbcluster (will save memory).",
(gptr*) &opt_no_mix_types, (gptr*) &opt_no_mix_types, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
#endif
{"old-alter-table", OPT_OLD_ALTER_TABLE,
"Use old, non-optimized alter table.",
(gptr*) &global_system_variables.old_alter_table,
(gptr*) &max_system_variables.old_alter_table, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"old-passwords", OPT_OLD_PASSWORDS, "Use old password encryption method (needed for 4.0 and older clients).",
(gptr*) &global_system_variables.old_passwords,
(gptr*) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG,
......@@ -6069,6 +6075,7 @@ static void mysql_init_variables(void)
global_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
max_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
global_system_variables.old_passwords= 0;
global_system_variables.old_alter_table= 0;
/* Variables that depends on compile options */
#ifndef DBUG_OFF
......
......@@ -284,6 +284,8 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count",
&SV::net_retry_count,
0, fix_net_retry_count);
sys_var_thd_bool sys_new_mode("new", &SV::new_mode);
sys_var_thd_bool sys_old_alter_table("old_alter_table",
&SV::old_alter_table);
sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords);
sys_var_thd_ulong sys_optimizer_prune_level("optimizer_prune_level",
&SV::optimizer_prune_level);
......@@ -632,6 +634,7 @@ sys_var *sys_variables[]=
&sys_net_wait_timeout,
&sys_net_write_timeout,
&sys_new_mode,
&sys_old_alter_table,
&sys_old_passwords,
&sys_optimizer_prune_level,
&sys_optimizer_search_depth,
......@@ -907,6 +910,7 @@ struct show_var_st init_vars[]= {
{sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS},
{sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS},
{sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS},
{sys_old_alter_table.name, (char*) &sys_old_alter_table, SHOW_SYS},
{sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS},
{"open_files_limit", (char*) &open_files_limit, SHOW_LONG},
{sys_optimizer_prune_level.name, (char*) &sys_optimizer_prune_level,
......
......@@ -877,6 +877,7 @@ public:
/* updated in sql_acl.cc */
extern sys_var_thd_bool sys_old_alter_table;
extern sys_var_thd_bool sys_old_passwords;
extern LEX_STRING default_key_cache_base;
......
......@@ -564,6 +564,7 @@ struct system_variables
my_bool ndb_use_exact_count;
my_bool ndb_use_transactions;
#endif /* HAVE_NDBCLUSTER_DB */
my_bool old_alter_table;
my_bool old_passwords;
/* Only charset part of these variables is sensible */
......
......@@ -649,6 +649,7 @@ typedef class st_select_lex SELECT_LEX;
#define ALTER_KEYS_ONOFF 512
#define ALTER_CONVERT 1024
#define ALTER_FORCE 2048
#define ALTER_RECREATE 4096
typedef struct st_alter_info
{
......
......@@ -1347,6 +1347,34 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
}
/*
Set table default charset, if not set
SYNOPSIS
set_table_default_charset()
create_info Table create information
DESCRIPTION
If the table character set was not given explicitely,
let's fetch the database default character set and
apply it to the table.
*/
static void set_table_default_charset(THD *thd,
HA_CREATE_INFO *create_info, char *db)
{
if (!create_info->default_table_charset)
{
HA_CREATE_INFO db_info;
char path[FN_REFLEN];
/* Abuse build_table_path() to build the path to the db.opt file */
build_table_path(path, sizeof(path), db, MY_DB_OPT_FILE, "");
load_db_opt(thd, path, &db_info);
create_info->default_table_charset= db_info.default_table_charset;
}
}
/*
Extend long VARCHAR fields to blob & prepare field if it's a blob
......@@ -1581,20 +1609,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
}
#endif
/*
If the table character set was not given explicitely,
let's fetch the database default character set and
apply it to the table.
*/
if (!create_info->default_table_charset)
{
HA_CREATE_INFO db_info;
char path[FN_REFLEN];
/* Abuse build_table_path() to build the path to the db.opt file */
build_table_path(path, sizeof(path), db, MY_DB_OPT_FILE, "");
load_db_opt(thd, path, &db_info);
create_info->default_table_charset= db_info.default_table_charset;
}
set_table_default_charset(thd, create_info, (char*) db);
if (mysql_prepare_table(thd, create_info, &fields,
&keys, internal_tmp_table, &db_options, file,
......@@ -3090,6 +3105,166 @@ int mysql_drop_indexes(THD *thd, TABLE_LIST *table_list,
#endif /* NOT_USED */
#define ALTER_TABLE_DATA_CHANGED 1
#define ALTER_TABLE_INDEX_CHANGED 2
/*
SYNOPSIS
compare tables()
table original table
create_list fields in new table
key_list keys in new table
create_info create options in new table
DESCRIPTION
'table' (first argument) contains information of the original
table, which includes all corresponding parts that the new
table has in arguments create_list, key_list and create_info.
By comparing the changes between the original and new table
we can determine how much it has changed after ALTER TABLE
and whether we need to make a copy of the table, or just change
the .frm file.
RETURN VALUES
0 No copy needed
1 Data changes, copy needed
2 Index changes, copy needed
*/
uint compare_tables(TABLE *table, List<create_field> *create_list,
List<Key> *key_list, HA_CREATE_INFO *create_info,
ALTER_INFO *alter_info, uint order_num)
{
Field **f_ptr, *field;
uint changes= 0, tmp;
List_iterator_fast<create_field> new_field_it(*create_list);
create_field *new_field;
/*
Some very basic checks. If number of fields changes, or the
handler, we need to run full ALTER TABLE. In the future
new fields can be added and old dropped without copy, but
not yet.
Test also that engine was not given during ALTER TABLE, or
we are force to run regular alter table (copy).
E.g. ALTER TABLE tbl_name ENGINE=MyISAM.
For the following ones we also want to run regular alter table:
ALTER TABLE tbl_name ORDER BY ..
ALTER TABLE tbl_name CONVERT TO CHARACTER SET ..
At the moment we can't handle altering temporary tables without a copy.
We also test if OPTIMIZE TABLE was given and was mapped to alter table.
In that case we always do full copy.
*/
if (table->s->fields != create_list->elements ||
table->s->db_type != create_info->db_type ||
table->s->tmp_table ||
create_info->used_fields & HA_CREATE_USED_ENGINE ||
create_info->used_fields & HA_CREATE_USED_CHARSET ||
create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
(alter_info->flags & ALTER_RECREATE) ||
order_num)
return ALTER_TABLE_DATA_CHANGED;
/*
Go through fields and check if the original ones are compatible
with new table.
*/
for (f_ptr= table->field, new_field= new_field_it++;
(field= *f_ptr); f_ptr++, new_field= new_field_it++)
{
/* Make sure we have at least the default charset in use. */
if (!new_field->charset)
new_field->charset= create_info->default_table_charset;
/* Check that NULL behavior is same for old and new fields */
if ((new_field->flags & NOT_NULL_FLAG) !=
(uint) (field->flags & NOT_NULL_FLAG))
return ALTER_TABLE_DATA_CHANGED;
/* Don't pack rows in old tables if the user has requested this. */
if (create_info->row_type == ROW_TYPE_DYNAMIC ||
(new_field->flags & BLOB_FLAG) ||
new_field->sql_type == MYSQL_TYPE_VARCHAR &&
create_info->row_type != ROW_TYPE_FIXED)
create_info->table_options|= HA_OPTION_PACK_RECORD;
/* Evaluate changes bitmap and send to check_if_incompatible_data() */
if (!(tmp= field->is_equal(new_field)))
return ALTER_TABLE_DATA_CHANGED;
changes|= tmp;
}
/* Check if changes are compatible with current handler without a copy */
if (table->file->check_if_incompatible_data(create_info, changes))
return ALTER_TABLE_DATA_CHANGED;
/*
Go through keys and check if the original ones are compatible
with new table.
*/
KEY *table_key_info= table->key_info;
List_iterator_fast<Key> key_it(*key_list);
Key *key= key_it++;
/* Check if the number of key elements has changed */
if (table->s->keys != key_list->elements)
return ALTER_TABLE_INDEX_CHANGED;
for (uint i= 0; i < table->s->keys; i++, table_key_info++, key= key_it++)
{
/*
Check that the key types are compatible between old and new tables.
*/
if (table_key_info->algorithm != key->algorithm ||
((key->type == Key::PRIMARY || key->type == Key::UNIQUE) &&
!(table_key_info->flags & HA_NOSAME)) ||
(!(key->type == Key::PRIMARY || key->type == Key::UNIQUE) &&
(table_key_info->flags & HA_NOSAME)) ||
((key->type == Key::SPATIAL) &&
!(table_key_info->flags & HA_SPATIAL)) ||
(!(key->type == Key::SPATIAL) &&
(table_key_info->flags & HA_SPATIAL)) ||
((key->type == Key::FULLTEXT) &&
!(table_key_info->flags & HA_FULLTEXT)) ||
(!(key->type == Key::FULLTEXT) &&
(table_key_info->flags & HA_FULLTEXT)))
return ALTER_TABLE_INDEX_CHANGED;
if (table_key_info->key_parts != key->columns.elements)
return ALTER_TABLE_INDEX_CHANGED;
/*
Check that the key parts remain compatible between the old and
new tables.
*/
KEY_PART_INFO *table_key_part= table_key_info->key_part;
List_iterator_fast<key_part_spec> key_part_it(key->columns);
key_part_spec *key_part= key_part_it++;
for (uint j= 0; j < table_key_info->key_parts; j++,
table_key_part++, key_part= key_part_it++)
{
/*
Key definition has changed if we are using a different field or
if the used key length is different
(If key_part->length == 0 it means we are using the whole field)
*/
if (strcmp(key_part->field_name, table_key_part->field->field_name) ||
(key_part->length && key_part->length != table_key_part->length) ||
(key_part->length == 0 && table_key_part->length !=
table_key_part->field->pack_length()))
return ALTER_TABLE_INDEX_CHANGED;
}
}
return 0; // Tables are compatible
}
/*
Alter table
*/
......@@ -3111,7 +3286,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
ulonglong next_insert_id;
uint db_create_options, used_fields;
enum db_type old_db_type,new_db_type;
bool need_copy_table;
uint need_copy_table= 0;
DBUG_ENTER("mysql_alter_table");
thd->proc_info="init";
......@@ -3389,8 +3564,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
def_it.remove();
}
}
else
{ // Use old field value
else // This field was not dropped and not changed, add it to the list
{ // for the new table.
create_list.push_back(def=new create_field(field,field));
alter_it.rewind(); // Change default if ALTER
Alter_column *alter;
......@@ -3603,17 +3778,22 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (table->s->tmp_table)
create_info->options|=HA_LEX_CREATE_TMP_TABLE;
set_table_default_charset(thd, create_info, db);
if (thd->variables.old_alter_table)
need_copy_table= 1;
else
need_copy_table= compare_tables(table, &create_list, &key_list,
create_info, alter_info, order_num);
/*
better have a negative test here, instead of positive, like
alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|...
so that ALTER TABLE won't break when somebody will add new flag
*/
need_copy_table= (alter_info->flags &
~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) ||
(create_info->used_fields &
~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD)) ||
table->s->tmp_table);
create_info->frm_only= !need_copy_table;
if (!need_copy_table)
create_info->frm_only= 1;
/*
Handling of symlinked tables:
......@@ -3920,7 +4100,7 @@ end_temporary:
err:
DBUG_RETURN(TRUE);
}
/* mysql_alter_table */
static int
copy_data_between_tables(TABLE *from,TABLE *to,
......@@ -4132,7 +4312,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
create_info.row_type=ROW_TYPE_NOT_USED;
create_info.default_table_charset=default_charset_info;
/* Force alter table to recreate table */
lex->alter_info.flags= ALTER_CHANGE_COLUMN;
lex->alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE);
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment