Commit 852a44e1 authored by mronstrom@mysql.com's avatar mronstrom@mysql.com

New config parameters for Log Page Buffers

Fixing issue with NO_OF_FRAG_PER_NODE
Also removed some OSE code no longer needed when configurable log pages 
parent 2a0caafc
......@@ -74,7 +74,7 @@ FsCloseReq::getRemoveFileFlag(const UintR & fileflag){
inline
void
FsCloseReq::setRemoveFileFlag(UintR & fileflag, bool removefile){
ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
// ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag");
if (removefile == true)
fileflag = 1;
else
......
......@@ -83,6 +83,10 @@
#define CFG_DB_NO_LOCAL_SCANS 152
#define CFG_DB_BATCH_SIZE 153
#define CFG_DB_UNDO_INDEX_BUFFER 154
#define CFG_DB_UNDO_DATA_BUFFER 155
#define CFG_DB_REDO_BUFFER 156
#define CFG_NODE_ARBIT_RANK 200
#define CFG_NODE_ARBIT_DELAY 201
......
......@@ -643,7 +643,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT64,
3 * 1024 * 8192,
24 * (1024 * 1024),
128 * 8192,
((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
......@@ -655,10 +655,46 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT64,
10 * 1024 * 8192,
80 * (1024 * 1024),
128 * 8192,
((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
{
CFG_DB_UNDO_INDEX_BUFFER,
"UndoIndexBuffer",
"DB",
"Number bytes on each DB node allocated for storing data",
ConfigInfo::USED,
false,
ConfigInfo::INT,
2 * (1024 * 1024),
1 * (1024 * 1024),
MAX_INT_RNIL},
{
CFG_DB_UNDO_DATA_BUFFER,
"UndoDataBuffer",
"DB",
"Number bytes on each DB node allocated for storing data",
ConfigInfo::USED,
false,
ConfigInfo::INT,
16 * (1024 * 1024),
1 * (1024 * 1024),
MAX_INT_RNIL},
{
CFG_DB_REDO_BUFFER,
"RedoBuffer",
"DB",
"Number bytes on each DB node allocated for storing data",
ConfigInfo::USED,
false,
ConfigInfo::INT,
8 * (1024 * 1024),
1 * (1024 * 1024),
MAX_INT_RNIL},
{
CFG_DB_START_PARTIAL_TIMEOUT,
"StartPartialTimeout",
......
......@@ -194,7 +194,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZTABLESIZE 16
#define ZTABMAXINDEX 3
#define ZUNDEFINED_OP 6
#define ZUNDOPAGESIZE 64
#define ZUNDOHEADSIZE 7
#define ZUNLOCKED 1
#define ZUNDOPAGE_BASE_ADD 2
......@@ -894,8 +893,8 @@ struct SrVersionRec {
/* TABREC */
/* --------------------------------------------------------------------------------- */
struct Tabrec {
Uint32 fragholder[NO_OF_FRAG_PER_NODE];
Uint32 fragptrholder[NO_OF_FRAG_PER_NODE];
Uint32 fragholder[MAX_FRAG_PER_NODE];
Uint32 fragptrholder[MAX_FRAG_PER_NODE];
Uint32 tabUserPtr;
BlockReference tabUserRef;
};
......
......@@ -32,7 +32,6 @@ void Dbacc::initData()
crootfragmentsize = ZROOTFRAGMENTSIZE;
cdirrangesize = ZDIRRANGESIZE;
coverflowrecsize = ZOVERFLOWRECSIZE;
cundopagesize = ZUNDOPAGESIZE;
cfsConnectsize = ZFS_CONNECTSIZE;
cfsOpsize = ZFS_OPSIZE;
cscanRecSize = ZSCAN_REC_SIZE;
......@@ -136,8 +135,26 @@ void Dbacc::initRecords()
Dbacc::Dbacc(const class Configuration & conf):
SimulatedBlock(DBACC, conf)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbacc);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
cundopagesize= (log_page_size / sizeof(Undopage));
Uint32 mega_byte_part= cundopagesize & 15;
if (mega_byte_part != 0) {
jam();
cundopagesize+= (16 - mega_byte_part);
}
ndbout << "ACC: No of Undo Pages = " << cundopagesize << endl;
// Transit signals
addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
......
......@@ -1021,7 +1021,7 @@ void Dbacc::initialiseTableRec(Signal* signal)
for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
refresh_watch_dog();
ptrAss(tabptr, tabrec);
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
tabptr.p->fragholder[i] = RNIL;
tabptr.p->fragptrholder[i] = RNIL;
}//for
......@@ -1187,7 +1187,7 @@ void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
TabrecPtr tabPtr;
tabPtr.i = tableId;
ptrCheckGuard(tabPtr, ctablesize, tabrec);
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabPtr.p->fragholder[i] != RNIL) {
jam();
......@@ -1419,7 +1419,7 @@ void Dbacc::execFSREMOVEREF(Signal* signal)
/* -------------------------------------------------------------------------- */
bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
{
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == RNIL) {
jam();
......@@ -2435,7 +2435,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
ptrCheckGuard(tabptr, ctablesize, tabrec);
// find fragment (TUX will know it)
if (req->fragPtrI == RNIL) {
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragptrholder[i] != RNIL) {
rootfragrecptr.i = tabptr.p->fragptrholder[i];
......@@ -12184,7 +12184,7 @@ void Dbacc::takeOutReadyScanQueue(Signal* signal)
bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
{
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == fid) {
jam();
......
......@@ -64,25 +64,12 @@
/* CONSTANTS OF THE LOG PAGES */
/* ------------------------------------------------------------------------- */
#define ZPAGE_HEADER_SIZE 32
#if defined NDB_OSE
/**
* Set the fragment log file size to 2Mb in OSE
* This is done in order to speed up the initial start
*/
#define ZNO_MBYTES_IN_FILE 2
#define ZPAGE_SIZE 2048
#define ZPAGES_IN_MBYTE 128
#define ZTWOLOG_NO_PAGES_IN_MBYTE 7
#define ZTWOLOG_PAGE_SIZE 11
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
#else
#define ZNO_MBYTES_IN_FILE 16
#define ZPAGE_SIZE 8192
#define ZPAGES_IN_MBYTE 32
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
#define ZTWOLOG_PAGE_SIZE 13
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
#endif
#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
......@@ -1829,11 +1816,7 @@ public:
* - There is no more information needed.
* The next mbyte will always refer to the start of the next mbyte.
*/
#ifdef NDB_OSE
UintR logPageWord[2048]; // Size 8 kbytes
#else
UintR logPageWord[8192]; // Size 32 kbytes
#endif
};
typedef Ptr<LogPageRecord> LogPageRecordPtr;
......@@ -1855,8 +1838,8 @@ public:
PREP_DROP_TABLE_DONE = 4
};
UintR fragrec[NO_OF_FRAG_PER_NODE];
Uint16 fragid[NO_OF_FRAG_PER_NODE];
UintR fragrec[MAX_FRAG_PER_NODE];
Uint16 fragid[MAX_FRAG_PER_NODE];
/**
* Status of the table
*/
......@@ -2643,7 +2626,6 @@ private:
UintR cfirstfreeLfo;
UintR clfoFileSize;
#define ZLOG_PAGE_FILE_SIZE 256 // 8 MByte
LogPageRecord *logPageRecord;
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;
......
......@@ -33,7 +33,6 @@ void Dblqh::initData()
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
clogPageFileSize = ZLOG_PAGE_FILE_SIZE;
clfoFileSize = ZLFO_FILE_SIZE;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
......@@ -176,8 +175,26 @@ Dblqh::Dblqh(const class Configuration & conf):
m_commitAckMarkerHash(m_commitAckMarkerPool),
c_scanTakeOverHash(c_scanRecordPool)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dblqh);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
Uint32 mega_byte_part= clogPageFileSize & 15;
if (mega_byte_part != 0) {
jam();
clogPageFileSize+= (16 - mega_byte_part);
}
ndbout << "LQH: No of REDO pages = " << clogPageFileSize << endl;
addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
......
......@@ -991,7 +991,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
FragrecordPtr tFragPtr;
tFragPtr.i = RNIL;
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
jam();
tFragPtr.i = tTablePtr.p->fragrec[i];
......@@ -1916,7 +1916,7 @@ void Dblqh::removeTable(Uint32 tableId)
tabptr.i = tableId;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] != ZNIL) {
jam();
......@@ -15864,7 +15864,7 @@ void Dblqh::deleteFragrec(Uint32 fragId)
{
Uint32 indexFound= RNIL;
fragptr.i = RNIL;
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
......@@ -15972,7 +15972,7 @@ void Dblqh::getFirstInLogQueue(Signal* signal)
/* ---------------------------------------------------------------- */
bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
{
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (UintR)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
......@@ -16394,7 +16394,7 @@ void Dblqh::initialiseTabrec(Signal* signal)
ptrAss(tabptr, tablerec);
tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
tabptr.p->usageCount = 0;
for (Uint32 i = 0; i <= (NO_OF_FRAG_PER_NODE - 1); i++) {
for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
tabptr.p->fragid[i] = ZNIL;
tabptr.p->fragrec[i] = RNIL;
}//for
......@@ -16716,7 +16716,7 @@ bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
return false;
}//if
seizeFragmentrec(signal);
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == ZNIL) {
jam();
......
......@@ -85,21 +85,12 @@ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
#define ZNO_OF_FRAGREC 64 /* SIZE OF FRAGMENT FILE. */
#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
#define ZNO_OF_OPREC 116 /* SIZE OF OPERATION RECORD FILE */
#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
#define ZNO_OF_TAB_DESCR_REC 484 /* SIZE OF TABLE DESCRIPTOR FILE */
#define ZNO_OF_TABLEREC 16 /* SIZE OF TABLE RECORD FILE. */
#ifdef NDB_OSE
#define ZNO_OF_UNDO_PAGE 80 // Must be multiple of 8
#else
#define ZNO_OF_UNDO_PAGE 500 // Must be multiple of 8
#endif
/* 24 SEGMENTS WITH 8 PAGES IN EACH*/
/* PLUS ONE UNDO BUFFER CACHE */
// Undo record identifiers are 32-bits with page index 13-bits
......@@ -823,8 +814,8 @@ struct Tablerec {
// List of ordered indexes
ArrayList<TupTriggerData> tuxCustomTriggers;
Uint32 fragid[2 * NO_OF_FRAG_PER_NODE];
Uint32 fragrec[2 * NO_OF_FRAG_PER_NODE];
Uint32 fragid[2 * MAX_FRAG_PER_NODE];
Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
struct {
Uint32 tabUserPtr;
......
......@@ -44,16 +44,10 @@ void Dbtup::initData()
cnoOfLcpRec = ZNO_OF_LCP_REC;
cnoOfConcurrentOpenOp = ZNO_OF_CONCURRENT_OPEN_OP;
cnoOfConcurrentWriteOp = ZNO_OF_CONCURRENT_WRITE_OP;
cnoOfFragoprec = 2 * NO_OF_FRAG_PER_NODE;
cnoOfFragrec = ZNO_OF_FRAGREC;
cnoOfOprec = ZNO_OF_OPREC;
cnoOfPage = ZNO_OF_PAGE;
cnoOfFragoprec = 2 * MAX_FRAG_PER_NODE;
cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC;
cnoOfParallellUndoFiles = ZNO_OF_PARALLELL_UNDO_FILES;
cnoOfRestartInfoRec = ZNO_OF_RESTART_INFO_REC;
cnoOfTablerec = ZNO_OF_TABLEREC;
cnoOfTabDescrRec = ZNO_OF_TAB_DESCR_REC;
cnoOfUndoPage = ZNO_OF_UNDO_PAGE;
c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
c_noOfBuildIndexRec = 32;
......@@ -83,9 +77,26 @@ Dbtup::Dbtup(const class Configuration & conf)
c_storedProcPool(),
c_buildIndexList(c_buildIndexPool)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbtup);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_DATA_BUFFER,
&log_page_size);
/**
* Always set page size in half MBytes
*/
cnoOfUndoPage= (log_page_size / sizeof(UndoPage));
Uint32 mega_byte_part= cnoOfUndoPage & 15;
if (mega_byte_part != 0) {
jam();
cnoOfUndoPage+= (16 - mega_byte_part);
}
ndbout << "TUP: No of Undo Pages = " << cnoOfUndoPage << endl;
addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
......@@ -1049,7 +1060,7 @@ void Dbtup::initializeTablerec()
void
Dbtup::initTab(Tablerec* const regTabPtr)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
regTabPtr->fragid[i] = RNIL;
regTabPtr->fragrec[i] = RNIL;
}//for
......@@ -1160,7 +1171,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
return;
}//Dbtup::execTUPSEIZEREQ()
#define printFragment(t){ for(Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE);i++){\
#define printFragment(t){ for(Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE);i++){\
ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}
......
......@@ -349,14 +349,14 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
do {
// get fragment
FragrecordPtr fragPtr;
if (buildPtr.p->m_fragNo == 2 * NO_OF_FRAG_PER_NODE) {
if (buildPtr.p->m_fragNo == 2 * MAX_FRAG_PER_NODE) {
ljam();
// build ready
buildIndexReply(signal, buildPtr.p);
c_buildIndexList.release(buildPtr);
return;
}
ndbrequire(buildPtr.p->m_fragNo < 2 * NO_OF_FRAG_PER_NODE);
ndbrequire(buildPtr.p->m_fragNo < 2 * MAX_FRAG_PER_NODE);
fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo];
if (fragPtr.i == RNIL) {
ljam();
......
......@@ -188,7 +188,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
/* -------------------------------------------------------------------- */
bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == RNIL) {
ljam();
......@@ -202,7 +202,7 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIn
void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
......@@ -456,7 +456,7 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
......@@ -515,7 +515,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId)
Uint32 fragIndex = RNIL;
Uint32 fragId = RNIL;
Uint32 i = 0;
for (i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
for (i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (tabPtr.p->fragid[i] != RNIL) {
ljam();
......
......@@ -148,6 +148,7 @@
// need large value.
/* ------------------------------------------------------------------------- */
#define NO_OF_FRAG_PER_NODE 1
#define MAX_FRAG_PER_NODE (NO_OF_FRAG_PER_NODE * MAX_REPLICAS)
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment