Commit c6ac6aa5 authored by unknown's avatar unknown

Merge


ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  Auto merged
ndb/src/ndbapi/Ndb.cpp:
  Auto merged
sql/ha_ndbcluster.cc:
  SCCS merged
parents 63b89d81 7aa73081
......@@ -4061,12 +4061,14 @@ calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits,
tmp <<= 1;
distrBits++;
}//while
#ifdef ndb_classical_lhdistrbits
if (tmp != totalFragments) {
tmp >>= 1;
if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) {
distrBits--;
}//if
}//if
#endif
* lhPageBits = pageBits;
* lhDistrBits = distrBits;
......
......@@ -764,7 +764,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl *table= info->m_table_impl;
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
DBUG_PRINT("info", ("value %u", tupleId));
DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
......@@ -776,7 +776,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
DBUG_PRINT("info", ("value %u", tupleId));
DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
......@@ -796,7 +796,8 @@ Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] )
{
theFirstTupleId[aTableId]++;
DBUG_PRINT("info", ("next cached value %u", theFirstTupleId[aTableId]));
DBUG_PRINT("info", ("next cached value %ul",
(ulong) theFirstTupleId[aTableId]));
DBUG_RETURN(theFirstTupleId[aTableId]);
}
else // theFirstTupleId == theLastTupleId
......@@ -817,7 +818,7 @@ Ndb::readAutoIncrementValue(const char* aTableName)
DBUG_RETURN(~(Uint64)0);
}
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
DBUG_PRINT("info", ("value %u", tupleId));
DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
......@@ -829,7 +830,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
DBUG_PRINT("info", ("value %u", tupleId));
DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
......
......@@ -3050,8 +3050,10 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
m_rows_inserted= (ha_rows) 0;
if (rows == (ha_rows) 0)
{
/* We don't know how many will be inserted, guess */
m_rows_to_insert= m_autoincrement_prefetch;
}
else
m_rows_to_insert= rows;
......@@ -4174,8 +4176,10 @@ ulonglong ha_ndbcluster::get_auto_increment()
Ndb *ndb= get_ndb();
if (m_rows_inserted > m_rows_to_insert)
{
/* We guessed too low */
m_rows_to_insert+= m_autoincrement_prefetch;
}
cache_size=
(int)
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment