Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
1045f2c3
Commit
1045f2c3
authored
Jun 24, 2004
by
unknown
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Modified index flags and added ndbcluster_print_error
parent
ff392bbe
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
34 additions
and
26 deletions
+34
-26
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+19
-19
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+7
-5
sql/handler.cc
sql/handler.cc
+8
-2
No files found.
sql/ha_ndbcluster.cc
View file @
1045f2c3
...
@@ -474,9 +474,7 @@ static const ulong index_type_flags[]=
...
@@ -474,9 +474,7 @@ static const ulong index_type_flags[]=
0
,
0
,
/* PRIMARY_KEY_INDEX */
/* PRIMARY_KEY_INDEX */
HA_NOT_READ_PREFIX_LAST
|
HA_ONLY_WHOLE_INDEX
,
HA_ONLY_WHOLE_INDEX
|
HA_WRONG_ASCII_ORDER
,
/* PRIMARY_KEY_ORDERED_INDEX */
/* PRIMARY_KEY_ORDERED_INDEX */
/*
/*
...
@@ -484,23 +482,20 @@ static const ulong index_type_flags[]=
...
@@ -484,23 +482,20 @@ static const ulong index_type_flags[]=
thus ORDERD BY clauses can be optimized by reading directly
thus ORDERD BY clauses can be optimized by reading directly
through the index.
through the index.
*/
*/
HA_NOT_READ_PREFIX_LAST
|
// HA_KEY_READ_ONLY |
HA_WRONG_ASCII_ORDER
,
HA_READ_NEXT
|
HA_READ_RANGE
,
/* UNIQUE_INDEX */
/* UNIQUE_INDEX */
HA_NOT_READ_PREFIX_LAST
|
HA_ONLY_WHOLE_INDEX
,
HA_ONLY_WHOLE_INDEX
|
HA_WRONG_ASCII_ORDER
,
/* UNIQUE_ORDERED_INDEX */
/* UNIQUE_ORDERED_INDEX */
HA_
NOT_READ_PREFIX_LAST
|
HA_
READ_NEXT
|
HA_
WRONG_ASCII_ORDER
,
HA_
READ_RANGE
,
/* ORDERED_INDEX */
/* ORDERED_INDEX */
HA_READ_NEXT
|
HA_READ_NEXT
|
HA_READ_PREV
|
HA_READ_RANGE
,
HA_NOT_READ_PREFIX_LAST
|
HA_WRONG_ASCII_ORDER
};
};
static
const
int
index_flags_size
=
sizeof
(
index_type_flags
)
/
sizeof
(
ulong
);
static
const
int
index_flags_size
=
sizeof
(
index_type_flags
)
/
sizeof
(
ulong
);
...
@@ -529,7 +524,7 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
...
@@ -529,7 +524,7 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
flags depending on the type of the index.
flags depending on the type of the index.
*/
*/
inline
ulong
ha_ndbcluster
::
index_flags
(
uint
idx_no
)
const
inline
ulong
ha_ndbcluster
::
index_flags
(
uint
idx_no
,
uint
part
)
const
{
{
DBUG_ENTER
(
"index_flags"
);
DBUG_ENTER
(
"index_flags"
);
DBUG_PRINT
(
"info"
,
(
"idx_no: %d"
,
idx_no
));
DBUG_PRINT
(
"info"
,
(
"idx_no: %d"
,
idx_no
));
...
@@ -1390,6 +1385,7 @@ void ha_ndbcluster::print_results()
...
@@ -1390,6 +1385,7 @@ void ha_ndbcluster::print_results()
switch
(
col
->
getType
())
{
switch
(
col
->
getType
())
{
case
NdbDictionary
:
:
Column
::
Blob
:
case
NdbDictionary
:
:
Column
::
Blob
:
case
NdbDictionary
:
:
Column
::
Clob
:
case
NdbDictionary
:
:
Column
::
Undefined
:
case
NdbDictionary
:
:
Column
::
Undefined
:
fprintf
(
DBUG_FILE
,
"Unknown type: %d"
,
col
->
getType
());
fprintf
(
DBUG_FILE
,
"Unknown type: %d"
,
col
->
getType
());
break
;
break
;
...
@@ -2622,13 +2618,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
...
@@ -2622,13 +2618,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_ndb
(
NULL
),
m_ndb
(
NULL
),
m_table
(
NULL
),
m_table
(
NULL
),
m_table_flags
(
HA_REC_NOT_IN_SEQ
|
m_table_flags
(
HA_REC_NOT_IN_SEQ
|
HA_KEYPOS_TO_RNDPOS
|
HA_NOT_EXACT_COUNT
|
HA_NOT_EXACT_COUNT
|
HA_NO_WRITE_DELAYED
|
HA_NO_PREFIX_CHAR_KEYS
|
HA_NO_PREFIX_CHAR_KEYS
|
HA_NO_BLOBS
|
HA_NO_BLOBS
),
HA_DROP_BEFORE_CREATE
|
HA_NOT_READ_AFTER_KEY
),
m_use_write
(
false
),
m_use_write
(
false
),
retrieve_all_fields
(
FALSE
),
retrieve_all_fields
(
FALSE
),
rows_to_insert
(
0
),
rows_to_insert
(
0
),
...
@@ -2941,6 +2933,14 @@ bool ndbcluster_end()
...
@@ -2941,6 +2933,14 @@ bool ndbcluster_end()
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
}
}
void
ndbcluster_print_error
(
int
error
)
{
DBUG_ENTER
(
"ndbcluster_print_error"
);
TABLE
tab
;
tab
.
table_name
=
NULL
;
ha_ndbcluster
error_handler
(
&
tab
);
error_handler
.
print_error
(
error
,
MYF
(
0
));
}
/*
/*
Set m_tabname from full pathname to table file
Set m_tabname from full pathname to table file
...
...
sql/ha_ndbcluster.h
View file @
1045f2c3
...
@@ -93,11 +93,12 @@ class ha_ndbcluster: public handler
...
@@ -93,11 +93,12 @@ class ha_ndbcluster: public handler
const
char
*
table_type
()
const
{
return
(
"ndbcluster"
);}
const
char
*
table_type
()
const
{
return
(
"ndbcluster"
);}
const
char
**
bas_ext
()
const
;
const
char
**
bas_ext
()
const
;
ulong
table_flags
(
void
)
const
{
return
m_table_flags
;
}
ulong
table_flags
(
void
)
const
{
return
m_table_flags
;
}
ulong
index_flags
(
uint
idx
)
const
;
ulong
index_flags
(
uint
idx
,
uint
part
)
const
;
uint
max_record_length
()
const
{
return
NDB_MAX_TUPLE_SIZE
;
};
uint
max_supported_record_length
()
const
{
return
NDB_MAX_TUPLE_SIZE
;
};
uint
max_keys
()
const
{
return
MAX_KEY
;
}
uint
max_supported_keys
()
const
{
return
MAX_KEY
;
}
uint
max_key_parts
()
const
{
return
NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY
;
};
uint
max_supported_key_parts
()
const
uint
max_key_length
()
const
{
return
NDB_MAX_KEY_SIZE
;};
{
return
NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY
;
};
uint
max_supported_key_length
()
const
{
return
NDB_MAX_KEY_SIZE
;};
int
rename_table
(
const
char
*
from
,
const
char
*
to
);
int
rename_table
(
const
char
*
from
,
const
char
*
to
);
int
delete_table
(
const
char
*
name
);
int
delete_table
(
const
char
*
name
);
...
@@ -227,6 +228,7 @@ int ndbcluster_discover(const char* dbname, const char* name,
...
@@ -227,6 +228,7 @@ int ndbcluster_discover(const char* dbname, const char* name,
const
void
**
frmblob
,
uint
*
frmlen
);
const
void
**
frmblob
,
uint
*
frmlen
);
int
ndbcluster_drop_database
(
const
char
*
path
);
int
ndbcluster_drop_database
(
const
char
*
path
);
void
ndbcluster_print_error
(
int
error
);
...
...
sql/handler.cc
View file @
1045f2c3
...
@@ -488,7 +488,10 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
...
@@ -488,7 +488,10 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
{
{
if
((
error
=
ndbcluster_commit
(
thd
,
trans
->
ndb_tid
)))
if
((
error
=
ndbcluster_commit
(
thd
,
trans
->
ndb_tid
)))
{
{
my_error
(
ER_ERROR_DURING_COMMIT
,
MYF
(
0
),
error
);
if
(
error
==
-
1
)
my_error
(
ER_ERROR_DURING_COMMIT
,
MYF
(
0
),
error
);
else
ndbcluster_print_error
(
error
);
error
=
1
;
error
=
1
;
}
}
if
(
trans
==
&
thd
->
transaction
.
all
)
if
(
trans
==
&
thd
->
transaction
.
all
)
...
@@ -554,7 +557,10 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
...
@@ -554,7 +557,10 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
{
{
if
((
error
=
ndbcluster_rollback
(
thd
,
trans
->
ndb_tid
)))
if
((
error
=
ndbcluster_rollback
(
thd
,
trans
->
ndb_tid
)))
{
{
my_error
(
ER_ERROR_DURING_ROLLBACK
,
MYF
(
0
),
error
);
if
(
error
==
-
1
)
my_error
(
ER_ERROR_DURING_ROLLBACK
,
MYF
(
0
),
error
);
else
ndbcluster_print_error
(
error
);
error
=
1
;
error
=
1
;
}
}
trans
->
ndb_tid
=
0
;
trans
->
ndb_tid
=
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment