Commit 075eb338 authored by unknown's avatar unknown

Use correct access method, found using sql-bench and comparing with other handler.


BitKeeper/deleted/.del-regression.sh~c19f771726612629:
  Delete: ndb/bin/regression.sh
sql/ha_ndbcluster.cc:
  Change index flags, remove HA_WRONG_ASCII_ORDER and HA_ONLY_WHOLE_INDEX.
  HA_WRONG_ASCII_ORDER was used in field::optimize_range and if it was set, the index was hardly ever used.
  Correct check of start_key and start_key->flag != HA_READ_KEY_EXACT, ignore end-Key in suc a case
  Updated implementation o read_range_first
  Made the calculation of number of bytes par batch easier to underatsn and easier to configure. The value bytesperbatch can be made a config variable.
  AReduce the number of bytes sent per batch, don't overload.
  Increase the value returned from scan_time in order to make it clear to optimizer that scanning is the worst alternative. Always use index if available.
parent 4a3580ca
This diff is collapsed.
...@@ -424,13 +424,14 @@ static const ulong index_type_flags[]= ...@@ -424,13 +424,14 @@ static const ulong index_type_flags[]=
0, 0,
/* PRIMARY_KEY_INDEX */ /* PRIMARY_KEY_INDEX */
HA_ONLY_WHOLE_INDEX | /*
HA_WRONG_ASCII_ORDER | Enable HA_KEY_READ_ONLY when "sorted" indexes are supported,
thus ORDERD BY clauses can be optimized by reading directly
through the index.
*/
HA_NOT_READ_PREFIX_LAST, HA_NOT_READ_PREFIX_LAST,
/* UNIQUE_INDEX */ /* UNIQUE_INDEX */
HA_ONLY_WHOLE_INDEX |
HA_WRONG_ASCII_ORDER |
HA_NOT_READ_PREFIX_LAST, HA_NOT_READ_PREFIX_LAST,
/* ORDERED_INDEX */ /* ORDERED_INDEX */
...@@ -475,6 +476,7 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const ...@@ -475,6 +476,7 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
inline ulong ha_ndbcluster::index_flags(uint idx_no) const inline ulong ha_ndbcluster::index_flags(uint idx_no) const
{ {
DBUG_ENTER("index_flags"); DBUG_ENTER("index_flags");
DBUG_PRINT("info", ("idx_no: %d", idx_no));
DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size); DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size);
DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]); DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]);
} }
...@@ -771,23 +773,23 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ...@@ -771,23 +773,23 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
NdbOperation::BoundLE)) NdbOperation::BoundLE))
DBUG_RETURN(1); DBUG_RETURN(1);
if (end_key && if (end_key)
(start_key && start_key->flag != HA_READ_KEY_EXACT) && {
// MASV Is it a bug that end_key is not 0 if (start_key && start_key->flag == HA_READ_KEY_EXACT)
// if start flag is HA_READ_KEY_EXACT DBUG_PRINT("info", ("start_key is HA_READ_KEY_EXACT ignoring end_key"));
else if (set_bounds(op, end_key,
set_bounds(op, end_key,
(end_key->flag == HA_READ_AFTER_KEY) ? (end_key->flag == HA_READ_AFTER_KEY) ?
NdbOperation::BoundGE : NdbOperation::BoundGE :
NdbOperation::BoundGT)) NdbOperation::BoundGT))
DBUG_RETURN(1); DBUG_RETURN(1);
}
// Define attributes to read // Define attributes to read
for (i= 0; i < no_fields; i++) for (i= 0; i < no_fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if ((thd->query_id == field->query_id) || if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG)) (field->flags & PRI_KEY_FLAG) ||
retrieve_all_fields)
{ {
if (get_ndb_value(op, i, field->ptr)) if (get_ndb_value(op, i, field->ptr))
ERR_RETURN(op->getNdbError()); ERR_RETURN(op->getNdbError());
...@@ -1515,30 +1517,34 @@ int ha_ndbcluster::read_range_first(const key_range *start_key, ...@@ -1515,30 +1517,34 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key, const key_range *end_key,
bool sorted) bool sorted)
{ {
KEY* key_info;
int error= 1; int error= 1;
byte* buf = table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first"); DBUG_ENTER("ha_ndbcluster::read_range_first");
DBUG_PRINT("info", ("sorted: %d", sorted));
switch (get_index_type(active_index)){ switch (get_index_type(active_index)){
case PRIMARY_KEY_INDEX: case PRIMARY_KEY_INDEX:
error= pk_read(start_key->key, start_key->length, key_info= table->key_info + active_index;
table->record[0]); if (start_key &&
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
DBUG_RETURN(pk_read(start_key->key, start_key->length, buf));
break; break;
case UNIQUE_INDEX: case UNIQUE_INDEX:
error= unique_index_read(start_key->key, start_key->length, key_info= table->key_info + active_index;
table->record[0]); if (start_key &&
break; start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
case ORDERED_INDEX: DBUG_RETURN(unique_index_read(start_key->key, start_key->length, buf));
// Start the ordered index scan and fetch the first row
error= ordered_index_scan(start_key, end_key, sorted,
table->record[0]);
break; break;
default: default:
case UNDEFINED_INDEX:
break; break;
} }
// Start the ordered index scan and fetch the first row
error= ordered_index_scan(start_key, end_key, sorted,
buf);
DBUG_RETURN(error); DBUG_RETURN(error);
} }
...@@ -1780,7 +1786,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) ...@@ -1780,7 +1786,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
where field->query_id is the same as where field->query_id is the same as
the current query id */ the current query id */
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS")); DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
retrieve_all_fields = TRUE; retrieve_all_fields= TRUE;
break; break;
case HA_EXTRA_PREPARE_FOR_DELETE: case HA_EXTRA_PREPARE_FOR_DELETE:
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE")); DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
...@@ -1834,9 +1840,9 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) ...@@ -1834,9 +1840,9 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
degrade if too many bytes are inserted, thus it's limited by this degrade if too many bytes are inserted, thus it's limited by this
calculation. calculation.
*/ */
const int bytesperbatch = 8192;
bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns();
batch= (1024*256); // 1024 rows, with size 256 batch= bytesperbatch/bytes;
batch= batch/bytes; //
batch= batch == 0 ? 1 : batch; batch= batch == 0 ? 1 : batch;
DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes)); DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes));
bulk_insert_rows= batch; bulk_insert_rows= batch;
...@@ -1882,7 +1888,7 @@ const char **ha_ndbcluster::bas_ext() const ...@@ -1882,7 +1888,7 @@ const char **ha_ndbcluster::bas_ext() const
double ha_ndbcluster::scan_time() double ha_ndbcluster::scan_time()
{ {
return rows2double(records/3); return rows2double(records*1000);
} }
...@@ -2028,7 +2034,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2028,7 +2034,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
(NdbConnection*)thd->transaction.stmt.ndb_tid; (NdbConnection*)thd->transaction.stmt.ndb_tid;
DBUG_ASSERT(m_active_trans); DBUG_ASSERT(m_active_trans);
retrieve_all_fields = FALSE; retrieve_all_fields= FALSE;
} }
else else
...@@ -2081,7 +2087,7 @@ int ha_ndbcluster::start_stmt(THD *thd) ...@@ -2081,7 +2087,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
} }
m_active_trans= trans; m_active_trans= trans;
retrieve_all_fields = FALSE; retrieve_all_fields= FALSE;
DBUG_RETURN(error); DBUG_RETURN(error);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment