Commit 274b25eb authored by Yoni Fogel's avatar Yoni Fogel

refs #5081 Replace all usage:

BOOL->bool
FALSE->false
TRUE->true
u_int*_t->uint*_t

Also poisoned all of the variables

git-svn-id: file:///svn/mysql/tokudb-engine/tokudb-engine@46156 c7de825b-a66e-492c-adef-691d508d4ae1
parent 16f359b7
This diff is collapsed.
......@@ -81,7 +81,7 @@ typedef struct st_tokudb_share {
bool has_unique_keys;
bool replace_into_fast;
rw_lock_t num_DBs_lock;
u_int32_t num_DBs;
uint32_t num_DBs;
} TOKUDB_SHARE;
#define HA_TOKU_ORIG_VERSION 4
......@@ -149,12 +149,12 @@ class ha_tokudb : public handler {
//
uchar *rec_update_buff;
ulong alloced_update_rec_buff_length;
u_int32_t max_key_length;
uint32_t max_key_length;
uchar* range_query_buff; // range query buffer
u_int32_t size_range_query_buff; // size of the allocated range query buffer
u_int32_t bytes_used_in_range_query_buff; // number of bytes used in the range query buffer
u_int32_t curr_range_query_buff_offset; // current offset into the range query buffer for queries to read
uint32_t size_range_query_buff; // size of the allocated range query buffer
uint32_t bytes_used_in_range_query_buff; // number of bytes used in the range query buffer
uint32_t curr_range_query_buff_offset; // current offset into the range query buffer for queries to read
uint64_t bulk_fetch_iteration;
uint64_t rows_fetched_using_bulk_fetch;
bool doing_bulk_fetch;
......@@ -185,9 +185,9 @@ class ha_tokudb : public handler {
// ranges of prelocked area, used to know how much to bulk fetch
//
uchar *prelocked_left_range;
u_int32_t prelocked_left_range_size;
uint32_t prelocked_left_range_size;
uchar *prelocked_right_range;
u_int32_t prelocked_right_range_size;
uint32_t prelocked_right_range_size;
//
......@@ -195,9 +195,9 @@ class ha_tokudb : public handler {
//
DBT mult_key_dbt[2*(MAX_KEY + 1)];
DBT mult_rec_dbt[MAX_KEY + 1];
u_int32_t mult_put_flags[MAX_KEY + 1];
u_int32_t mult_del_flags[MAX_KEY + 1];
u_int32_t mult_dbt_flags[MAX_KEY + 1];
uint32_t mult_put_flags[MAX_KEY + 1];
uint32_t mult_del_flags[MAX_KEY + 1];
uint32_t mult_dbt_flags[MAX_KEY + 1];
//
......@@ -207,7 +207,7 @@ class ha_tokudb : public handler {
// query
//
uchar* blob_buff;
u_int32_t num_blob_bytes;
uint32_t num_blob_bytes;
bool unpack_entire_row;
......@@ -215,10 +215,10 @@ class ha_tokudb : public handler {
// buffers (and their sizes) that will hold the indexes
// of fields that need to be read for a query
//
u_int32_t* fixed_cols_for_query;
u_int32_t num_fixed_cols_for_query;
u_int32_t* var_cols_for_query;
u_int32_t num_var_cols_for_query;
uint32_t* fixed_cols_for_query;
uint32_t num_fixed_cols_for_query;
uint32_t* var_cols_for_query;
uint32_t num_var_cols_for_query;
bool read_blobs;
bool read_key;
......@@ -235,7 +235,7 @@ class ha_tokudb : public handler {
// instance of cursor being used for init_xxx and rnd_xxx functions
//
DBC *cursor;
u_int32_t cursor_flags; // flags for cursor
uint32_t cursor_flags; // flags for cursor
//
// flags that are returned in table_flags()
//
......@@ -265,7 +265,7 @@ class ha_tokudb : public handler {
//
// For instances where we successfully prelock a range or a table,
// we set this to TRUE so that successive cursor calls can know
// we set this to true so that successive cursor calls can know
// know to limit the locking overhead in a call to the fractal tree
//
bool range_lock_grabbed;
......@@ -291,7 +291,7 @@ class ha_tokudb : public handler {
int loader_error;
bool num_DBs_locked_in_bulk;
u_int32_t lock_count;
uint32_t lock_count;
bool fix_rec_buff_for_blob(ulong length);
bool fix_rec_update_buff_for_blob(ulong length);
......@@ -314,9 +314,9 @@ class ha_tokudb : public handler {
const uchar* record,
uint index
);
u_int32_t place_key_into_mysql_buff(KEY* key_info, uchar * record, uchar* data);
uint32_t place_key_into_mysql_buff(KEY* key_info, uchar * record, uchar* data);
void unpack_key(uchar * record, DBT const *key, uint index);
u_int32_t place_key_into_dbt_buff(KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length);
uint32_t place_key_into_dbt_buff(KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length);
DBT* create_dbt_key_from_key(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, bool dont_pack_pk, int key_length = MAX_KEY_LENGTH);
DBT *create_dbt_key_from_table(DBT * key, uint keynr, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH);
DBT* create_dbt_key_for_lookup(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH);
......@@ -328,7 +328,7 @@ class ha_tokudb : public handler {
int open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn);
int open_secondary_dictionary(DB** ptr, KEY* key_info, const char* name, bool is_read_only, DB_TXN* txn);
int acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt);
int estimate_num_rows(DB* db, u_int64_t* num_rows, DB_TXN* txn);
int estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn);
bool has_auto_increment_flag(uint* index);
int write_frm_data(DB* db, DB_TXN* txn, const char* frm_name);
......@@ -365,7 +365,7 @@ class ha_tokudb : public handler {
KEY* key_info,
DB_TXN* txn,
KEY_AND_COL_INFO* kc_info,
u_int32_t keynr,
uint32_t keynr,
bool is_hot_index,
enum row_type row_type
);
......@@ -374,17 +374,17 @@ class ha_tokudb : public handler {
int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info);
int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd);
void set_main_dict_put_flags(THD* thd, bool opt_eligible, u_int32_t* put_flags);
void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags);
int insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn);
int insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd);
void test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val);
u_int32_t fill_row_mutator(
uint32_t fill_row_mutator(
uchar* buf,
u_int32_t* dropped_columns,
u_int32_t num_dropped_columns,
uint32_t* dropped_columns,
uint32_t num_dropped_columns,
TABLE* altered_table,
KEY_AND_COL_INFO* altered_kc_info,
u_int32_t keynr,
uint32_t keynr,
bool is_add
);
......@@ -490,7 +490,7 @@ class ha_tokudb : public handler {
ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key);
u_int32_t get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd);
uint32_t get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd);
THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type);
int get_status(DB_TXN* trans);
......@@ -609,7 +609,7 @@ class ha_tokudb : public handler {
int unpack_blobs(
uchar* record,
const uchar* from_tokudb_blob,
u_int32_t num_blob_bytes,
uint32_t num_blob_bytes,
bool check_bitmap
);
int unpack_row(
......
......@@ -306,8 +306,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table,
goto cleanup;
}
if (has_added_columns && !has_non_added_changes) {
u_int32_t added_columns[altered_table->s->fields];
u_int32_t num_added_columns = 0;
uint32_t added_columns[altered_table->s->fields];
uint32_t num_added_columns = 0;
int r = find_changed_columns(
added_columns,
&num_added_columns,
......@@ -319,8 +319,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table,
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) {
for (u_int32_t i = 0; i < num_added_columns; i++) {
u_int32_t curr_added_index = added_columns[i];
for (uint32_t i = 0; i < num_added_columns; i++) {
uint32_t curr_added_index = added_columns[i];
Field* curr_added_field = altered_table->field[curr_added_index];
printf(
"Added column: index %d, name %s\n",
......@@ -331,8 +331,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table,
}
}
if (has_dropped_columns && !has_non_dropped_changes) {
u_int32_t dropped_columns[table->s->fields];
u_int32_t num_dropped_columns = 0;
uint32_t dropped_columns[table->s->fields];
uint32_t num_dropped_columns = 0;
int r = find_changed_columns(
dropped_columns,
&num_dropped_columns,
......@@ -344,8 +344,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table,
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) {
for (u_int32_t i = 0; i < num_dropped_columns; i++) {
u_int32_t curr_dropped_index = dropped_columns[i];
for (uint32_t i = 0; i < num_dropped_columns; i++) {
uint32_t curr_dropped_index = dropped_columns[i];
Field* curr_dropped_field = table->field[curr_dropped_index];
printf(
"Dropped column: index %d, name %s\n",
......@@ -420,7 +420,7 @@ ha_tokudb::alter_table_phase2(
bool has_row_format_changes = alter_flags->is_set(HA_ALTER_ROW_FORMAT);
KEY_AND_COL_INFO altered_kc_info;
memset(&altered_kc_info, 0, sizeof(altered_kc_info));
u_int32_t max_new_desc_size = 0;
uint32_t max_new_desc_size = 0;
uchar* row_desc_buff = NULL;
uchar* column_extra = NULL;
bool dropping_indexes = alter_info->index_drop_count > 0 && !tables_have_same_keys(table,altered_table,false, false);
......@@ -504,11 +504,11 @@ ha_tokudb::alter_table_phase2(
if (has_dropped_columns || has_added_columns) {
DBT column_dbt;
memset(&column_dbt, 0, sizeof(DBT));
u_int32_t max_column_extra_size;
u_int32_t num_column_extra;
u_int32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases
u_int32_t num_columns = 0;
u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
uint32_t max_column_extra_size;
uint32_t num_column_extra;
uint32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases
uint32_t num_columns = 0;
uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
memset(columns, 0, sizeof(columns));
if (has_added_columns && has_dropped_columns) {
......@@ -554,7 +554,7 @@ ha_tokudb::alter_table_phase2(
column_extra = (uchar *)my_malloc(max_column_extra_size, MYF(MY_WME));
if (column_extra == NULL) { error = ENOMEM; goto cleanup; }
for (u_int32_t i = 0; i < curr_num_DBs; i++) {
for (uint32_t i = 0; i < curr_num_DBs; i++) {
DBT row_descriptor;
memset(&row_descriptor, 0, sizeof(row_descriptor));
KEY* prim_key = (hidden_primary_key) ? NULL : &altered_table->s->key_info[primary_key];
......@@ -624,8 +624,8 @@ ha_tokudb::alter_table_phase2(
method = row_type_to_compression_method(create_info->row_type);
// Set the new type.
u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (u_int32_t i = 0; i < curr_num_DBs; ++i) {
uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; ++i) {
DB *db = share->key_file[i];
error = db->change_compression_method(db, method);
if (error) {
......
......@@ -158,13 +158,13 @@ ha_tokudb::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_
} else
// add column
if (only_flags(handler_flags, Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER)) {
u_int32_t added_columns[altered_table->s->fields];
u_int32_t num_added_columns = 0;
uint32_t added_columns[altered_table->s->fields];
uint32_t num_added_columns = 0;
int r = find_changed_columns(added_columns, &num_added_columns, table, altered_table);
if (r == 0) {
if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) {
for (u_int32_t i = 0; i < num_added_columns; i++) {
u_int32_t curr_added_index = added_columns[i];
for (uint32_t i = 0; i < num_added_columns; i++) {
uint32_t curr_added_index = added_columns[i];
Field* curr_added_field = altered_table->field[curr_added_index];
printf("Added column: index %d, name %s\n", curr_added_index, curr_added_field->field_name);
}
......@@ -174,13 +174,13 @@ ha_tokudb::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_
} else
// drop column
if (only_flags(handler_flags, Alter_inplace_info::DROP_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER)) {
u_int32_t dropped_columns[table->s->fields];
u_int32_t num_dropped_columns = 0;
uint32_t dropped_columns[table->s->fields];
uint32_t num_dropped_columns = 0;
int r = find_changed_columns(dropped_columns, &num_dropped_columns, altered_table, table);
if (r == 0) {
if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) {
for (u_int32_t i = 0; i < num_dropped_columns; i++) {
u_int32_t curr_dropped_index = dropped_columns[i];
for (uint32_t i = 0; i < num_dropped_columns; i++) {
uint32_t curr_dropped_index = dropped_columns[i];
Field* curr_dropped_field = table->field[curr_dropped_index];
printf("Dropped column: index %d, name %s\n", curr_dropped_index, curr_dropped_field->field_name);
}
......@@ -248,8 +248,8 @@ ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alte
assert(error == 0);
ctx->compression_changed = true;
// Set the new type.
u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (u_int32_t i = 0; i < curr_num_DBs; i++) {
uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
db = share->key_file[i];
error = db->change_compression_method(db, method);
if (error)
......@@ -315,13 +315,13 @@ ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_in
int error;
uchar *column_extra = NULL;
uchar *row_desc_buff = NULL;
u_int32_t max_new_desc_size = 0;
u_int32_t max_column_extra_size;
u_int32_t num_column_extra;
u_int32_t num_columns = 0;
u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
uint32_t max_new_desc_size = 0;
uint32_t max_column_extra_size;
uint32_t num_column_extra;
uint32_t num_columns = 0;
uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
u_int32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases
uint32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases
memset(columns, 0, sizeof(columns));
KEY_AND_COL_INFO altered_kc_info;
......@@ -369,7 +369,7 @@ ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_in
column_extra = (uchar *)my_malloc(max_column_extra_size, MYF(MY_WME));
if (column_extra == NULL) { error = ENOMEM; goto cleanup; }
for (u_int32_t i = 0; i < curr_num_DBs; i++) {
for (uint32_t i = 0; i < curr_num_DBs; i++) {
DBT row_descriptor;
memset(&row_descriptor, 0, sizeof(row_descriptor));
KEY* prim_key = (hidden_primary_key) ? NULL : &altered_table->s->key_info[primary_key];
......@@ -495,8 +495,8 @@ ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *
restore_drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count);
}
if (ctx->compression_changed) {
u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (u_int32_t i = 0; i < curr_num_DBs; i++) {
uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
DB *db = share->key_file[i];
int error = db->change_compression_method(db, ctx->orig_compression_method);
assert(error == 0);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -47,7 +47,7 @@
// used for queries
typedef struct st_col_pack_info {
u_int32_t col_pack_val; //offset if fixed, pack_index if var
uint32_t col_pack_val; //offset if fixed, pack_index if var
} COL_PACK_INFO;
//
......@@ -62,8 +62,8 @@ typedef struct st_col_pack_info {
// To figure out where the blobs start, find the last offset listed (if offsets exist)
//
typedef struct st_multi_col_pack_info {
u_int32_t fixed_field_size; //where the fixed length stuff ends and the offsets for var stuff begins
u_int32_t len_of_offsets; //length of the offset bytes in a packed row
uint32_t fixed_field_size; //where the fixed length stuff ends and the offsets for var stuff begins
uint32_t len_of_offsets; //length of the offset bytes in a packed row
} MULTI_COL_PACK_INFO;
......@@ -91,10 +91,10 @@ typedef struct st_key_and_col_info {
// length_bytes[i] is 0
// 'i' shows up in blob_fields
//
u_int16_t* field_lengths; //stores the field lengths of fixed size fields (1<<16 - 1 max),
uint16_t* field_lengths; //stores the field lengths of fixed size fields (1<<16 - 1 max),
uchar* length_bytes; // stores the length of lengths of varchars and varbinaries
u_int32_t* blob_fields; // list of indexes of blob fields,
u_int32_t num_blobs; // number of blobs in the table
uint32_t* blob_fields; // list of indexes of blob fields,
uint32_t num_blobs; // number of blobs in the table
//
// val packing info for all dictionaries. i'th one represents info for i'th dictionary
//
......@@ -105,33 +105,33 @@ typedef struct st_key_and_col_info {
// The number of var fields in a val for dictionary i can be evaluated by
// mcp_info[i].len_of_offsets/num_offset_bytes.
//
u_int32_t num_offset_bytes; //number of bytes needed to encode the offset
uint32_t num_offset_bytes; //number of bytes needed to encode the offset
} KEY_AND_COL_INFO;
void get_var_field_info(
u_int32_t* field_len,
u_int32_t* start_offset,
u_int32_t var_field_index,
uint32_t* field_len,
uint32_t* start_offset,
uint32_t var_field_index,
const uchar* var_field_offset_ptr,
u_int32_t num_offset_bytes
uint32_t num_offset_bytes
);
void get_blob_field_info(
u_int32_t* start_offset,
u_int32_t len_of_offsets,
uint32_t* start_offset,
uint32_t len_of_offsets,
const uchar* var_field_data_ptr,
u_int32_t num_offset_bytes
uint32_t num_offset_bytes
);
static inline u_int32_t get_blob_field_len(
static inline uint32_t get_blob_field_len(
const uchar* from_tokudb,
u_int32_t len_bytes
uint32_t len_bytes
)
{
u_int32_t length = 0;
uint32_t length = 0;
switch (len_bytes) {
case (1):
length = (u_int32_t)(*from_tokudb);
length = (uint32_t)(*from_tokudb);
break;
case (2):
length = uint2korr(from_tokudb);
......@@ -152,11 +152,11 @@ static inline u_int32_t get_blob_field_len(
static inline const uchar* unpack_toku_field_blob(
uchar *to_mysql,
const uchar* from_tokudb,
u_int32_t len_bytes,
uint32_t len_bytes,
bool skip
)
{
u_int32_t length = 0;
uint32_t length = 0;
const uchar* data_ptr = NULL;
if (!skip) {
memcpy(to_mysql, from_tokudb, len_bytes);
......@@ -194,16 +194,16 @@ TOKU_TYPE mysql_to_toku_type (Field* field);
uchar* pack_toku_varbinary_from_desc(
uchar* to_tokudb,
const uchar* from_desc,
u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb
u_int32_t field_length //length of field
uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb
uint32_t field_length //length of field
);
uchar* pack_toku_varstring_from_desc(
uchar* to_tokudb,
const uchar* from_desc,
u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb
u_int32_t field_length,
u_int32_t charset_num//length of field
uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb
uint32_t field_length,
uint32_t charset_num//length of field
);
......@@ -211,21 +211,21 @@ uchar* pack_toku_key_field(
uchar* to_tokudb,
uchar* from_mysql,
Field* field,
u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff
uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff
);
uchar* pack_key_toku_key_field(
uchar* to_tokudb,
uchar* from_mysql,
Field* field,
u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff
uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff
);
uchar* unpack_toku_key_field(
uchar* to_mysql,
uchar* from_tokudb,
Field* field,
u_int32_t key_part_length
uint32_t key_part_length
);
......@@ -263,11 +263,11 @@ static inline ulonglong hpk_char_to_num(uchar* val) {
int tokudb_compare_two_keys(
const void* new_key_data,
const u_int32_t new_key_size,
const uint32_t new_key_size,
const void* saved_key_data,
const u_int32_t saved_key_size,
const uint32_t saved_key_size,
const void* row_desc,
const u_int32_t row_desc_size,
const uint32_t row_desc_size,
bool cmp_prefix
);
......@@ -286,43 +286,43 @@ int create_toku_key_descriptor(
);
u_int32_t create_toku_main_key_pack_descriptor (
uint32_t create_toku_main_key_pack_descriptor (
uchar* buf
);
u_int32_t get_max_clustering_val_pack_desc_size(
uint32_t get_max_clustering_val_pack_desc_size(
TABLE_SHARE* table_share
);
u_int32_t create_toku_clustering_val_pack_descriptor (
uint32_t create_toku_clustering_val_pack_descriptor (
uchar* buf,
uint pk_index,
TABLE_SHARE* table_share,
KEY_AND_COL_INFO* kc_info,
u_int32_t keynr,
uint32_t keynr,
bool is_clustering
);
static inline bool is_key_clustering(
void* row_desc,
u_int32_t row_desc_size
uint32_t row_desc_size
)
{
return (row_desc_size > 0);
}
u_int32_t pack_clustering_val_from_desc(
uint32_t pack_clustering_val_from_desc(
uchar* buf,
void* row_desc,
u_int32_t row_desc_size,
uint32_t row_desc_size,
const DBT* pk_val
);
u_int32_t get_max_secondary_key_pack_desc_size(
uint32_t get_max_secondary_key_pack_desc_size(
KEY_AND_COL_INFO* kc_info
);
u_int32_t create_toku_secondary_key_pack_descriptor (
uint32_t create_toku_secondary_key_pack_descriptor (
uchar* buf,
bool has_hpk,
uint pk_index,
......@@ -335,23 +335,23 @@ u_int32_t create_toku_secondary_key_pack_descriptor (
static inline bool is_key_pk(
void* row_desc,
u_int32_t row_desc_size
uint32_t row_desc_size
)
{
uchar* buf = (uchar *)row_desc;
return buf[0];
}
u_int32_t max_key_size_from_desc(
uint32_t max_key_size_from_desc(
void* row_desc,
u_int32_t row_desc_size
uint32_t row_desc_size
);
u_int32_t pack_key_from_desc(
uint32_t pack_key_from_desc(
uchar* buf,
void* row_desc,
u_int32_t row_desc_size,
uint32_t row_desc_size,
const DBT* pk_key,
const DBT* pk_val
);
......
......@@ -209,7 +209,7 @@ static inline void make_name(char *newname, const char *tablename, const char *d
nn += sprintf(nn, "-%s", dictname);
}
static inline void commit_txn(DB_TXN* txn, u_int32_t flags) {
static inline void commit_txn(DB_TXN* txn, uint32_t flags) {
int r;
r = txn->commit(txn, flags);
if (r != 0) {
......
......@@ -46,7 +46,7 @@ static MYSQL_THDVAR_BOOL(commit_sync,
"sync on txn commit",
/* check */ NULL,
/* update */ NULL,
/* default*/ TRUE
/* default*/ true
);
static MYSQL_THDVAR_UINT(pk_insert_mode,
......@@ -64,49 +64,49 @@ static MYSQL_THDVAR_BOOL(load_save_space,
"if on, intial loads are slower but take less space",
NULL,
NULL,
FALSE
false
);
static MYSQL_THDVAR_BOOL(disable_slow_alter,
0,
"if on, alter tables that require copy are disabled",
NULL,
NULL,
FALSE
false
);
static MYSQL_THDVAR_BOOL(disable_hot_alter,
0,
"if on, hot alter table is disabled",
NULL,
NULL,
FALSE
false
);
static MYSQL_THDVAR_BOOL(create_index_online,
0,
"if on, create index done online",
NULL,
NULL,
TRUE
true
);
static MYSQL_THDVAR_BOOL(disable_prefetching,
0,
"if on, prefetching disabled",
NULL,
NULL,
FALSE
false
);
static MYSQL_THDVAR_BOOL(prelock_empty,
0,
"Tokudb Prelock Empty Table",
NULL,
NULL,
TRUE
true
);
static MYSQL_THDVAR_BOOL(log_client_errors,
0,
"Tokudb Log Client Errors",
NULL,
NULL,
FALSE
false
);
static MYSQL_THDVAR_UINT(block_size,
0,
......@@ -150,7 +150,7 @@ tokudb_checkpoint_lock_update(
const void* save)
{
my_bool* val = (my_bool *) var_ptr;
*val= *(my_bool *) save ? TRUE : FALSE;
*val= *(my_bool *) save ? true : false;
if (*val) {
tokudb_checkpoint_lock(thd);
}
......@@ -164,7 +164,7 @@ static MYSQL_THDVAR_BOOL(checkpoint_lock,
"Tokudb Checkpoint Lock",
NULL,
tokudb_checkpoint_lock_update,
FALSE
false
);
static const char *tokudb_row_format_names[] = {
......@@ -255,16 +255,16 @@ void toku_hton_assert_fail(const char* expr_as_string, const char * fun, const c
//my_bool tokudb_shared_data = FALSE;
static u_int32_t tokudb_init_flags =
//my_bool tokudb_shared_data = false;
static uint32_t tokudb_init_flags =
DB_CREATE | DB_THREAD | DB_PRIVATE |
DB_INIT_LOCK |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_INIT_LOG |
DB_RECOVER;
static u_int32_t tokudb_env_flags = 0;
// static u_int32_t tokudb_lock_type = DB_LOCK_DEFAULT;
static uint32_t tokudb_env_flags = 0;
// static uint32_t tokudb_lock_type = DB_LOCK_DEFAULT;
// static ulong tokudb_log_buffer_size = 0;
// static ulong tokudb_log_file_size = 0;
static ulonglong tokudb_cache_size = 0;
......@@ -276,9 +276,9 @@ static char *tokudb_log_dir;
// static ulong tokudb_region_size = 0;
// static ulong tokudb_cache_parts = 1;
const char *tokudb_hton_name = "TokuDB";
static u_int32_t tokudb_checkpointing_period;
u_int32_t tokudb_write_status_frequency;
u_int32_t tokudb_read_status_frequency;
static uint32_t tokudb_checkpointing_period;
uint32_t tokudb_write_status_frequency;
uint32_t tokudb_read_status_frequency;
#ifdef TOKUDB_VERSION
char *tokudb_version = (char*) TOKUDB_VERSION;
#else
......@@ -444,7 +444,7 @@ static int tokudb_init_func(void *p) {
}
if (tokudb_cache_size) {
DBUG_PRINT("info", ("tokudb_cache_size: %lld\n", tokudb_cache_size));
r = db_env->set_cachesize(db_env, (u_int32_t)(tokudb_cache_size >> 30), (u_int32_t)(tokudb_cache_size % (1024L * 1024L * 1024L)), 1);
r = db_env->set_cachesize(db_env, (uint32_t)(tokudb_cache_size >> 30), (uint32_t)(tokudb_cache_size % (1024L * 1024L * 1024L)), 1);
if (r) {
DBUG_PRINT("info", ("set_cachesize %d\n", r));
goto error;
......@@ -462,7 +462,7 @@ static int tokudb_init_func(void *p) {
}
}
u_int32_t gbytes, bytes; int parts;
uint32_t gbytes, bytes; int parts;
r = db_env->get_cachesize(db_env, &gbytes, &bytes, &parts);
if (r == 0)
if (tokudb_debug & TOKUDB_DEBUG_INIT)
......@@ -551,7 +551,7 @@ static int tokudb_init_func(void *p) {
//3938: succeeded, set the init status flag and unlock
tokudb_hton_initialized = 1;
rw_unlock(&tokudb_hton_initialized_lock);
DBUG_RETURN(FALSE);
DBUG_RETURN(false);
error:
if (metadata_db) {
......@@ -567,7 +567,7 @@ static int tokudb_init_func(void *p) {
// 3938: failed to initialized, drop the flag and lock
tokudb_hton_initialized = 0;
rw_unlock(&tokudb_hton_initialized_lock);
DBUG_RETURN(TRUE);
DBUG_RETURN(true);
}
static int tokudb_done_func(void *p) {
......@@ -637,7 +637,7 @@ bool tokudb_flush_logs(handlerton * hton) {
TOKUDB_DBUG_ENTER("tokudb_flush_logs");
int error;
bool result = 0;
u_int32_t curr_tokudb_checkpointing_period = 0;
uint32_t curr_tokudb_checkpointing_period = 0;
//
// get the current checkpointing period
......@@ -780,7 +780,7 @@ void txn_progress_func(TOKU_TXN_PROGRESS progress, void* extra) {
}
static void commit_txn_with_progress(DB_TXN* txn, u_int32_t flags, THD* thd) {
static void commit_txn_with_progress(DB_TXN* txn, uint32_t flags, THD* thd) {
int r;
struct txn_progress_info info;
info.thd = thd;
......@@ -805,7 +805,7 @@ static void abort_txn_with_progress(DB_TXN* txn, THD* thd) {
static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_ENTER("tokudb_commit");
DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt"));
u_int32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC;
uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
DB_TXN **txn = all ? &trx->all : &trx->stmt;
if (*txn) {
......@@ -1059,7 +1059,7 @@ static int tokudb_discover(handlerton *hton, THD* thd, const char *db,
TOKUDB_DBUG_RETURN(error);
}
static int store_dbname_tablename_size(TABLE *table, char *name, u_int64_t size, THD *thd) {
static int store_dbname_tablename_size(TABLE *table, char *name, uint64_t size, THD *thd) {
char *tp = strrchr(name, '/');
assert(tp);
char *tablename = tp + 1;
......@@ -1181,7 +1181,7 @@ static int tokudb_get_user_data_size(TABLE *table, THD *thd, bool exact) {
if (!error) {
char* name = (char *)curr_key.data;
char* newname;
u_int64_t curr_num_bytes = 0;
uint64_t curr_num_bytes = 0;
DB_BTREE_STAT64 dict_stats;
error = db_create(&curr_db, db_env, 0);
......@@ -1241,14 +1241,14 @@ static int tokudb_get_user_data_size(TABLE *table, THD *thd, bool exact) {
// in this case, we have a hidden primary key, do not
// want to report space taken up by the hidden primary key to the user
//
u_int64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
curr_num_bytes = (hpk_space > curr_num_bytes) ? 0 : curr_num_bytes - hpk_space;
}
else {
//
// one infinity byte per key needs to be subtracted
//
u_int64_t inf_byte_space = dict_stats.bt_ndata;
uint64_t inf_byte_space = dict_stats.bt_ndata;
curr_num_bytes = (inf_byte_space > curr_num_bytes) ? 0 : curr_num_bytes - inf_byte_space;
}
......@@ -1456,7 +1456,7 @@ static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * sta
default:
break;
}
return FALSE;
return false;
}
static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) {
......
......@@ -37,7 +37,7 @@ srv_row_format_t get_row_format(THD *thd);
extern HASH tokudb_open_tables;
extern pthread_mutex_t tokudb_mutex;
extern pthread_mutex_t tokudb_meta_mutex;
extern u_int32_t tokudb_write_status_frequency;
extern u_int32_t tokudb_read_status_frequency;
extern uint32_t tokudb_write_status_frequency;
extern uint32_t tokudb_read_status_frequency;
#endif //#ifdef _HATOKU_HTON
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment