Commit 62e111e6 authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-5.1-runtime

into  bodhi.(none):/opt/local/work/mysql-5.1-runtime


sql/sql_table.cc:
  Auto merged
parents 25fa7692 226700ee
set autocommit=1;
reset master;
create table bug16206 (a int);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; insert into bug16206 values(2)
drop table bug16206;
reset master;
create table bug16206 (a int) engine= bdb;
insert into bug16206 values(0);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
insert into bug16206 values(3);
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb
f n Query 1 n use `test`; insert into bug16206 values(0)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; BEGIN
f n Query 1 n use `test`; insert into bug16206 values(2)
f n Query 1 n use `test`; COMMIT
f n Query 1 n use `test`; insert into bug16206 values(3)
drop table bug16206;
set autocommit=0;
End of 5.0 tests
-- source include/not_embedded.inc
-- source include/have_bdb.inc
#
# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode
#
set autocommit=1;
let $VERSION=`select version()`;
reset master;
create table bug16206 (a int);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
--replace_result $VERSION VERSION
--replace_column 1 f 2 n 5 n
show binlog events;
drop table bug16206;
reset master;
create table bug16206 (a int) engine= bdb;
insert into bug16206 values(0);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
insert into bug16206 values(3);
--replace_result $VERSION VERSION
--replace_column 1 f 2 n 5 n
show binlog events;
drop table bug16206;
set autocommit=0;
--echo End of 5.0 tests
......@@ -1574,9 +1574,13 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
}
else
{
THD *thd= ha_thd();
/* Copy record to new handler */
copied++;
if ((result= m_new_file[new_part]->write_row(m_rec0)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
result= m_new_file[new_part]->ha_write_row(m_rec0);
reenable_binlog(thd);
if (result)
goto error;
}
}
......@@ -2694,6 +2698,7 @@ int ha_partition::write_row(uchar * buf)
longlong func_value;
bool autoincrement_lock= FALSE;
my_bitmap_map *old_map;
THD *thd= ha_thd();
#ifdef NOT_NEEDED
uchar *rec0= m_rec0;
#endif
......@@ -2765,7 +2770,9 @@ int ha_partition::write_row(uchar * buf)
}
m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %d", part_id));
error= m_file[part_id]->write_row(buf);
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[part_id]->ha_write_row(buf);
reenable_binlog(thd);
exit:
if (autoincrement_lock)
pthread_mutex_unlock(&table_share->mutex);
......@@ -2806,6 +2813,7 @@ int ha_partition::write_row(uchar * buf)
int ha_partition::update_row(const uchar *old_data, uchar *new_data)
{
THD *thd= ha_thd();
uint32 new_part_id, old_part_id;
int error= 0;
longlong func_value;
......@@ -2840,16 +2848,25 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
if (new_part_id == old_part_id)
{
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
error= m_file[new_part_id]->update_row(old_data, new_data);
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_update_row(old_data, new_data);
reenable_binlog(thd);
goto exit;
}
else
{
DBUG_PRINT("info", ("Update from partition %d to partition %d",
old_part_id, new_part_id));
if ((error= m_file[new_part_id]->write_row(new_data)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_write_row(new_data);
reenable_binlog(thd);
if (error)
goto exit;
if ((error= m_file[old_part_id]->delete_row(old_data)))
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[old_part_id]->ha_delete_row(old_data);
reenable_binlog(thd);
if (error)
{
#ifdef IN_THE_FUTURE
(void) m_file[new_part_id]->delete_last_inserted_row(new_data);
......@@ -3980,7 +3997,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
{
handler *file= file= m_file[m_part_spec.start_part];
handler *file= m_file[m_part_spec.start_part];
int error;
DBUG_ENTER("ha_partition::handle_unordered_next");
......
......@@ -1674,22 +1674,6 @@ class handler :public Sql_alloc
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
/** These are only called from sql_select for internal temporary tables */
virtual int write_row(uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
virtual int update_row(const uchar *old_data __attribute__((unused)),
uchar *new_data __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
virtual int delete_row(const uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
/**
use_hidden_primary_key() is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
......@@ -1721,6 +1705,21 @@ class handler :public Sql_alloc
*/
virtual int rnd_init(bool scan)= 0;
virtual int rnd_end() { return 0; }
virtual int write_row(uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
virtual int update_row(const uchar *old_data __attribute__((unused)),
uchar *new_data __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
virtual int delete_row(const uchar *buf __attribute__((unused)))
{
return HA_ERR_WRONG_COMMAND;
}
/**
Reset state of file to after 'open'.
This function is called after every statement for all tables used
......
......@@ -10554,13 +10554,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*/
while (!table->file->rnd_next(new_table.record[1]))
{
write_err= new_table.file->write_row(new_table.record[1]);
write_err= new_table.file->ha_write_row(new_table.record[1]);
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
}
/* copy row that filled HEAP table */
if ((write_err=new_table.file->write_row(table->record[0])))
if ((write_err=new_table.file->ha_write_row(table->record[0])))
{
if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
!ignore_last_dupp_key_error)
......@@ -12023,7 +12023,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
if ((error=table->file->write_row(table->record[0])))
if ((error=table->file->ha_write_row(table->record[0])))
{
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
......@@ -12085,7 +12085,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
......@@ -12109,7 +12109,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy);
if ((error=table->file->write_row(table->record[0])))
if ((error=table->file->ha_write_row(table->record[0])))
{
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error, 0))
......@@ -12145,7 +12145,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_fields(&join->tmp_table_param); // Groups are copied twice.
copy_funcs(join->tmp_table_param.items_to_copy);
if (!(error=table->file->write_row(table->record[0])))
if (!(error=table->file->ha_write_row(table->record[0])))
join->send_records++; // New group
else
{
......@@ -12161,7 +12161,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error=table->file->update_row(table->record[1],
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
......@@ -12205,7 +12205,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->sum_funcs_end[send_group_parts]);
if (!join->having || join->having->val_int())
{
int error= table->file->write_row(table->record[0]);
int error= table->file->ha_write_row(table->record[0]);
if (error && create_myisam_from_heap(join->thd, table,
&join->tmp_table_param,
error, 0))
......@@ -13433,7 +13433,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (having && !having->val_int())
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
error=file->rnd_next(record);
continue;
......@@ -13460,7 +13460,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
}
else if (!found)
......@@ -13557,7 +13557,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
if (having && !having->val_int())
{
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
continue;
}
......@@ -13574,7 +13574,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
if ((error=file->delete_row(record)))
if ((error=file->ha_delete_row(record)))
goto err;
}
else
......@@ -15582,7 +15582,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
if ((write_error= table_arg->file->write_row(table_arg->record[0])))
if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
{
if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
write_error, 0))
......
......@@ -7059,7 +7059,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
copy_ptr->do_copy(copy_ptr);
}
prev_insert_id= to->file->next_insert_id;
error=to->file->write_row(to->record[0]);
error=to->file->ha_write_row(to->record[0]);
to->auto_increment_field_not_null= FALSE;
if (error)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment