Commit 7a703328 authored by monty@mashka.mysql.fi's avatar monty@mashka.mysql.fi

Portability fix when using -DBIG_TABLES

parent 12c11c34
...@@ -42,7 +42,7 @@ single_host: ...@@ -42,7 +42,7 @@ single_host:
# discovers a problem which requires local intervention. Please make the # discovers a problem which requires local intervention. Please make the
# contact information accurate so we can support you. # contact information accurate so we can support you.
# #
contact: Sasha Pachev <sasha@mysql.com> contact: sys@mysql.com
# #
# It is very important that this email address is filled out and accurate. # It is very important that this email address is filled out and accurate.
# If someone converts your repository to open logging (which you may not # If someone converts your repository to open logging (which you may not
...@@ -51,7 +51,7 @@ contact: Sasha Pachev <sasha@mysql.com> ...@@ -51,7 +51,7 @@ contact: Sasha Pachev <sasha@mysql.com>
# response from anyone else at your location after 90 days, then open logging # response from anyone else at your location after 90 days, then open logging
# will be implicitly approved. # will be implicitly approved.
# #
email: sasha@mysql.com email: sys@mysql.com
# #
# Add your street address if you like, it is optional. # Add your street address if you like, it is optional.
# #
......
...@@ -210,7 +210,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), ...@@ -210,7 +210,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
print_version(); print_version();
exit(0); exit(0);
case '?': case '?':
default:
usage(); usage();
exit(0); exit(0);
} }
......
...@@ -300,8 +300,10 @@ enum data_file_type { ...@@ -300,8 +300,10 @@ enum data_file_type {
/* For number of records */ /* For number of records */
#ifdef BIG_TABLES #ifdef BIG_TABLES
#define rows2double(A) ulonglong2double(A)
typedef my_off_t ha_rows; typedef my_off_t ha_rows;
#else #else
#define rows2double(A) (double) (A)
typedef ulong ha_rows; typedef ulong ha_rows;
#endif #endif
......
...@@ -230,7 +230,7 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length, ...@@ -230,7 +230,7 @@ ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length,
if (error) if (error)
my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG)); my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG));
else else
statistic_add(filesort_rows, records, &LOCK_status); statistic_add(filesort_rows, (ulong) records, &LOCK_status);
*examined_rows= param.examined_rows; *examined_rows= param.examined_rows;
#ifdef SKIP_DBUG_IN_FILESORT #ifdef SKIP_DBUG_IN_FILESORT
DBUG_POP(); /* Ok to DBUG */ DBUG_POP(); /* Ok to DBUG */
......
...@@ -1929,7 +1929,7 @@ int ha_berkeley::delete_table(const char *name) ...@@ -1929,7 +1929,7 @@ int ha_berkeley::delete_table(const char *name)
double ha_berkeley::scan_time() double ha_berkeley::scan_time()
{ {
return records/3; return rows2double(records/3);
} }
ha_rows ha_berkeley::records_in_range(int keynr, ha_rows ha_berkeley::records_in_range(int keynr,
...@@ -2204,7 +2204,7 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table) ...@@ -2204,7 +2204,7 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table)
if (!(share=(BDB_SHARE*) hash_search(&bdb_open_tables, (byte*) table_name, if (!(share=(BDB_SHARE*) hash_search(&bdb_open_tables, (byte*) table_name,
length))) length)))
{ {
ha_rows *rec_per_key; ulong *rec_per_key;
char *tmp_name; char *tmp_name;
DB **key_file; DB **key_file;
u_int32_t *key_type; u_int32_t *key_type;
......
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
typedef struct st_berkeley_share { typedef struct st_berkeley_share {
ulonglong auto_ident; ulonglong auto_ident;
ha_rows rows, org_rows, *rec_per_key; ha_rows rows, org_rows;
ulong *rec_per_key;
THR_LOCK lock; THR_LOCK lock;
pthread_mutex_t mutex; pthread_mutex_t mutex;
char *table_name; char *table_name;
......
...@@ -88,9 +88,9 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) ...@@ -88,9 +88,9 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
file=heap_open(name,mode, file=heap_open(name,mode,
table->keys,keydef, table->keys,keydef,
table->reclength, table->reclength,
((table->max_rows < max_rows && table->max_rows) ? (ulong) ((table->max_rows < max_rows && table->max_rows) ?
table->max_rows : max_rows), table->max_rows : max_rows),
table->min_rows); (ulong) table->min_rows);
my_free((gptr) keydef,MYF(0)); my_free((gptr) keydef,MYF(0));
if (file) if (file)
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
......
...@@ -3421,7 +3421,7 @@ ha_innobase::info( ...@@ -3421,7 +3421,7 @@ ha_innobase::info(
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
dict_table_t* ib_table; dict_table_t* ib_table;
dict_index_t* index; dict_index_t* index;
ulong rec_per_key; ha_rows rec_per_key;
ulong j; ulong j;
ulong i; ulong i;
...@@ -3482,7 +3482,7 @@ ha_innobase::info( ...@@ -3482,7 +3482,7 @@ ha_innobase::info(
rec_per_key = records; rec_per_key = records;
} else { } else {
rec_per_key = (ulong)(records / rec_per_key = (ha_rows)(records /
index->stat_n_diff_key_vals[j + 1]); index->stat_n_diff_key_vals[j + 1]);
} }
...@@ -3497,8 +3497,9 @@ ha_innobase::info( ...@@ -3497,8 +3497,9 @@ ha_innobase::info(
rec_per_key = 1; rec_per_key = 1;
} }
table->key_info[i].rec_per_key[j] table->key_info[i].rec_per_key[j]=
= rec_per_key; rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
rec_per_key;
} }
index = dict_table_get_next_index_noninline(index); index = dict_table_get_next_index_noninline(index);
......
...@@ -380,7 +380,8 @@ int ha_isam::create(const char *name, register TABLE *form, ...@@ -380,7 +380,8 @@ int ha_isam::create(const char *name, register TABLE *form,
} }
recinfo_pos->base.type= (int) FIELD_LAST; /* End of fieldinfo */ recinfo_pos->base.type= (int) FIELD_LAST; /* End of fieldinfo */
error=nisam_create(fn_format(buff,name,"","",2+4+16),form->keys,keydef, error=nisam_create(fn_format(buff,name,"","",2+4+16),form->keys,keydef,
recinfo,form->max_rows,form->min_rows,0,0,0L); recinfo,(ulong) form->max_rows, (ulong) form->min_rows,
0, 0, 0L);
my_free((gptr) recinfo,MYF(0)); my_free((gptr) recinfo,MYF(0));
DBUG_RETURN(error); DBUG_RETURN(error);
......
...@@ -228,7 +228,7 @@ class handler :public Sql_alloc ...@@ -228,7 +228,7 @@ class handler :public Sql_alloc
void change_table_ptr(TABLE *table_arg) { table=table_arg; } void change_table_ptr(TABLE *table_arg) { table=table_arg; }
virtual double scan_time() virtual double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + 1; } { return ulonglong2double(data_file_length) / IO_SIZE + 1; }
virtual double read_time(ha_rows rows) { return rows; } virtual double read_time(ha_rows rows) { return rows2double(rows); }
virtual bool fast_key_read() { return 0;} virtual bool fast_key_read() { return 0;}
virtual key_map keys_to_use_for_scanning() { return 0; } virtual key_map keys_to_use_for_scanning() { return 0; }
virtual bool has_transactions(){ return 0;} virtual bool has_transactions(){ return 0;}
......
...@@ -192,7 +192,7 @@ longlong Item_func_week::val_int() ...@@ -192,7 +192,7 @@ longlong Item_func_week::val_int()
TIME ltime; TIME ltime;
if (get_arg0_date(&ltime,0)) if (get_arg0_date(&ltime,0))
return 0; return 0;
week_format= args[1]->val_int(); week_format= (uint) args[1]->val_int();
return (longlong) calc_week(&ltime, return (longlong) calc_week(&ltime,
(week_format & 2) != 0, (week_format & 2) != 0,
(week_format & 1) == 0, (week_format & 1) == 0,
......
...@@ -1969,8 +1969,8 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) ...@@ -1969,8 +1969,8 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli)
int Rand_log_event::exec_event(struct st_relay_log_info* rli) int Rand_log_event::exec_event(struct st_relay_log_info* rli)
{ {
thd->rand.seed1 = seed1; thd->rand.seed1 = (ulong) seed1;
thd->rand.seed2 = seed2; thd->rand.seed2 = (ulong) seed2;
rli->inc_pending(get_event_len()); rli->inc_pending(get_event_len());
return 0; return 0;
} }
......
...@@ -1049,13 +1049,13 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var) ...@@ -1049,13 +1049,13 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var)
bool sys_var_rand_seed1::update(THD *thd, set_var *var) bool sys_var_rand_seed1::update(THD *thd, set_var *var)
{ {
thd->rand.seed1=var->value->val_int(); thd->rand.seed1= (ulong) var->value->val_int();
return 0; return 0;
} }
bool sys_var_rand_seed2::update(THD *thd, set_var *var) bool sys_var_rand_seed2::update(THD *thd, set_var *var)
{ {
thd->rand.seed2=var->value->val_int(); thd->rand.seed2= (ulong) var->value->val_int();
return 0; return 0;
} }
......
...@@ -940,7 +940,7 @@ int show_binlog_events(THD* thd) ...@@ -940,7 +940,7 @@ int show_binlog_events(THD* thd)
if (mysql_bin_log.is_open()) if (mysql_bin_log.is_open())
{ {
LEX_MASTER_INFO *lex_mi = &thd->lex.mi; LEX_MASTER_INFO *lex_mi = &thd->lex.mi;
uint event_count, limit_start, limit_end; ha_rows event_count, limit_start, limit_end;
my_off_t pos = lex_mi->pos; my_off_t pos = lex_mi->pos;
char search_file_name[FN_REFLEN], *name; char search_file_name[FN_REFLEN], *name;
const char *log_file_name = lex_mi->log_file_name; const char *log_file_name = lex_mi->log_file_name;
......
...@@ -1751,7 +1751,7 @@ static void ...@@ -1751,7 +1751,7 @@ static void
find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
double read_time) double read_time)
{ {
ulong rec; ha_rows rec;
double tmp; double tmp;
THD *thd= current_thd; THD *thd= current_thd;
...@@ -2013,7 +2013,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, ...@@ -2013,7 +2013,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{ // Check full join { // Check full join
if (s->on_expr) if (s->on_expr)
{ {
tmp=s->found_records; // Can't use read cache tmp=rows2double(s->found_records); // Can't use read cache
} }
else else
{ {
...@@ -2032,11 +2032,11 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, ...@@ -2032,11 +2032,11 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
will ensure that this will be used will ensure that this will be used
*/ */
best=tmp; best=tmp;
records=s->found_records; records= rows2double(s->found_records);
best_key=0; best_key=0;
} }
} }
join->positions[idx].records_read=(double) records; join->positions[idx].records_read= records;
join->positions[idx].key=best_key; join->positions[idx].key=best_key;
join->positions[idx].table= s; join->positions[idx].table= s;
if (!best_key && idx == join->const_tables && if (!best_key && idx == join->const_tables &&
...@@ -2373,7 +2373,7 @@ bool ...@@ -2373,7 +2373,7 @@ bool
store_val_in_field(Field *field,Item *item) store_val_in_field(Field *field,Item *item)
{ {
THD *thd=current_thd; THD *thd=current_thd;
ulong cuted_fields=thd->cuted_fields; ha_rows cuted_fields=thd->cuted_fields;
thd->count_cuted_fields=1; thd->count_cuted_fields=1;
item->save_in_field(field); item->save_in_field(field);
thd->count_cuted_fields=0; thd->count_cuted_fields=0;
...@@ -2461,7 +2461,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) ...@@ -2461,7 +2461,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
use_quick_range=1; use_quick_range=1;
tab->use_quick=1; tab->use_quick=1;
tab->ref.key_parts=0; // Don't use ref key. tab->ref.key_parts=0; // Don't use ref key.
join->best_positions[i].records_read=tab->quick->records; join->best_positions[i].records_read= rows2double(tab->quick->records);
} }
COND *tmp=make_cond_for_table(cond,used_tables,current_map); COND *tmp=make_cond_for_table(cond,used_tables,current_map);
......
...@@ -659,7 +659,7 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list) ...@@ -659,7 +659,7 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list)
field_list.push_back(new Item_empty_string("Column_name",NAME_LEN)); field_list.push_back(new Item_empty_string("Column_name",NAME_LEN));
field_list.push_back(item=new Item_empty_string("Collation",1)); field_list.push_back(item=new Item_empty_string("Collation",1));
item->maybe_null=1; item->maybe_null=1;
field_list.push_back(item=new Item_int("Cardinality",0,11)); field_list.push_back(item=new Item_int("Cardinality",0,21));
item->maybe_null=1; item->maybe_null=1;
field_list.push_back(item=new Item_int("Sub_part",0,3)); field_list.push_back(item=new Item_int("Sub_part",0,3));
item->maybe_null=1; item->maybe_null=1;
...@@ -700,8 +700,8 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list) ...@@ -700,8 +700,8 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list)
KEY *key=table->key_info+i; KEY *key=table->key_info+i;
if (key->rec_per_key[j]) if (key->rec_per_key[j])
{ {
ulong records=(table->file->records / key->rec_per_key[j]); ha_rows records=(table->file->records / key->rec_per_key[j]);
end=int10_to_str((long) records, buff, 10); end=longlong10_to_str((longlong) records, buff, 10);
net_store_data(packet,convert,buff,(uint) (end-buff)); net_store_data(packet,convert,buff,(uint) (end-buff));
} }
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment