Commit fd2efc88 authored by joreland@mysql.com's avatar joreland@mysql.com

Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0

into  mysql.com:/home/jonas/src/mysql-5.0
parents dc571a55 9ef31a28
......@@ -6,7 +6,7 @@ AC_PREREQ(2.58)dnl Minimum Autoconf version required.
AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM
# Don't forget to also update the NDB lines below.
AM_INIT_AUTOMAKE(mysql, 5.0.9-beta)
AM_INIT_AUTOMAKE(mysql, 5.0.10-beta)
AM_CONFIG_HEADER(config.h)
PROTOCOL_VERSION=10
......@@ -17,7 +17,7 @@ SHARED_LIB_VERSION=14:0:0
# ndb version
NDB_VERSION_MAJOR=5
NDB_VERSION_MINOR=0
NDB_VERSION_BUILD=9
NDB_VERSION_BUILD=10
NDB_VERSION_STATUS="beta"
# Set all version vars based on $VERSION. How do we do this more elegant ?
......
......@@ -1249,36 +1249,18 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
}
int ha_ndbcluster::set_primary_key_from_old_data(NdbOperation *op, const byte *old_data)
int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record)
{
KEY* key_info= table->key_info + table->s->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key_from_old_data");
DBUG_ENTER("set_primary_key_from_record");
for (; key_part != end; key_part++)
{
Field* field= key_part->field;
if (set_ndb_key(op, field,
key_part->fieldnr-1, old_data+key_part->offset))
ERR_RETURN(op->getNdbError());
}
DBUG_RETURN(0);
}
int ha_ndbcluster::set_primary_key(NdbOperation *op)
{
DBUG_ENTER("set_primary_key");
KEY* key_info= table->key_info + table->s->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
for (; key_part != end; key_part++)
{
Field* field= key_part->field;
if (set_ndb_key(op, field,
key_part->fieldnr-1, field->ptr))
key_part->fieldnr-1, record+key_part->offset))
ERR_RETURN(op->getNdbError());
}
DBUG_RETURN(0);
......@@ -1423,11 +1405,9 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
int res;
if ((res= set_primary_key_from_old_data(op, old_data)))
if ((res= set_primary_key_from_record(op, old_data)))
ERR_RETURN(trans->getNdbError());
// Read all unreferenced non-key field(s)
for (i= 0; i < no_fields; i++)
{
......@@ -1470,7 +1450,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
Peek to check if a particular row already exists
*/
int ha_ndbcluster::peek_row()
int ha_ndbcluster::peek_row(const byte *record)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
......@@ -1483,7 +1463,7 @@ int ha_ndbcluster::peek_row()
ERR_RETURN(trans->getNdbError());
int res;
if ((res= set_primary_key(op)))
if ((res= set_primary_key_from_record(op, record)))
ERR_RETURN(trans->getNdbError());
if (execute_no_commit_ie(this,trans) != 0)
......@@ -1928,7 +1908,7 @@ int ha_ndbcluster::write_row(byte *record)
if (m_ignore_dup_key && table->s->primary_key != MAX_KEY)
{
int peek_res= peek_row();
int peek_res= peek_row(record);
if (!peek_res)
{
......@@ -1982,9 +1962,7 @@ int ha_ndbcluster::write_row(byte *record)
m_skip_auto_increment= !auto_increment_column_changed;
}
if ((res= (m_primary_key_update ?
set_primary_key_from_old_data(op, record)
: set_primary_key(op))))
if ((res= set_primary_key_from_record(op, record)))
return res;
}
......@@ -2124,7 +2102,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if ((table->s->primary_key != MAX_KEY) &&
(key_cmp(table->s->primary_key, old_data, new_data)))
{
int read_res, insert_res, delete_res;
int read_res, insert_res, delete_res, undo_res;
DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert"));
// Get all old fields, since we optimize away fields not in query
......@@ -2153,9 +2131,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
DBUG_PRINT("info", ("insert failed"));
if (trans->commitStatus() == NdbConnection::Started)
{
// Undo write_row(new_data)
// Undo delete_row(old_data)
m_primary_key_update= TRUE;
insert_res= write_row((byte *)old_data);
undo_res= write_row((byte *)old_data);
if (undo_res)
push_warning(current_thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
undo_res,
"NDB failed undoing delete at primary key update");
m_primary_key_update= FALSE;
}
DBUG_RETURN(insert_res);
......@@ -2204,7 +2187,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
else
{
int res;
if ((res= set_primary_key_from_old_data(op, old_data)))
if ((res= set_primary_key_from_record(op, old_data)))
DBUG_RETURN(res);
}
}
......@@ -2289,9 +2272,7 @@ int ha_ndbcluster::delete_row(const byte *record)
else
{
int res;
if ((res= (m_primary_key_update ?
set_primary_key_from_old_data(op, record)
: set_primary_key(op))))
if ((res= set_primary_key_from_record(op, record)))
return res;
}
}
......
......@@ -551,7 +551,7 @@ private:
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
int peek_row();
int peek_row(const byte *record);
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
......@@ -580,8 +580,7 @@ private:
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key(NdbOperation *op);
int set_primary_key_from_old_data(NdbOperation *op, const byte *old_data);
int set_primary_key_from_record(NdbOperation *op, const byte *record);
int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment