Commit 49568aef authored by Sergey Petrunya's avatar Sergey Petrunya

MWL#90: Code cleanup: Unification of merged and non-merged SJM nests processing

- Make join buffering code to take into account that JOIN_TABs are not a 
  linear array anymore.
parent dad93f2c
......@@ -1360,7 +1360,7 @@ explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using index
1 PRIMARY t3 index a a 5 NULL 3 100.00 Using index
1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.61 Using index; FirstMatch(t2)
1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.00 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1` join `test`.`t3`) where ((`test`.`t1`.`a` = `test`.`t2`.`a`) and (`test`.`t1`.`b` = `test`.`t3`.`a`))
insert into t1 values (3,31);
......
......@@ -3983,6 +3983,7 @@ bool join_tab_execution_startup(JOIN_TAB *tab)
JOIN *join= tab->join;
SJ_MATERIALIZATION_INFO *sjm= tab->bush_children->start->emb_sj_nest->sj_mat_info;
JOIN_TAB *join_tab= tab->bush_children->start;
JOIN_TAB *save_return_tab= join->return_tab;
if (!sjm->materialized)
{
......@@ -3995,29 +3996,14 @@ bool join_tab_execution_startup(JOIN_TAB *tab)
(rc= sub_select(join, join_tab, TRUE/* now EOF */)) < 0)
{
//psergey3-todo: set sjm->materialized=TRUE here, too??
join->return_tab= save_return_tab;
DBUG_RETURN(rc); /* it's NESTED_LOOP_(ERROR|KILLED)*/
}
/*
Ok, materialization finished. Initialize the access to the temptable
*/
join->return_tab= save_return_tab;
sjm->materialized= TRUE;
#if 0
psergey3: already done at setup:
if (sjm->is_sj_scan)
{
/* Initialize full scan */
JOIN_TAB *last_tab= join_tab + (sjm->tables - 1);
init_read_record(&last_tab->read_record, join->thd,
sjm->table, NULL, TRUE, TRUE, FALSE);
DBUG_ASSERT(last_tab->read_record.read_record == rr_sequential);
last_tab->read_first_record= join_read_record_no_init;
last_tab->read_record.copy_field= sjm->copy_field;
last_tab->read_record.copy_field_end= sjm->copy_field +
sjm->sjm_table_cols.elements;
last_tab->read_record.read_record= rr_sequential_and_unpack;
}
#endif
}
}
......
......@@ -137,6 +137,7 @@ uint add_table_data_fields_to_join_cache(JOIN_TAB *tab,
return len;
}
JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab, bool include_bush_roots);
/*
Determine different counters of fields associated with a record in the cache
......@@ -157,10 +158,61 @@ uint add_table_data_fields_to_join_cache(JOIN_TAB *tab,
void JOIN_CACHE::calc_record_fields()
{
JOIN_TAB *tab = prev_cache ? prev_cache->join_tab :
//psergey4-todo: prev_cache, or
// - first non-const table if on top level
// - first table inside SJM nest if within sjm nest
// this->join_tab is 'our' join_tab
// No. the right idea: start from ... and walk to the current join_tab
/// with an iterator, skipping
// join nests (can do so for now)
/*
The above sucks, too.
The right idea:
- for SJM-inner tables, walk only within the nest
- for SJM-outer tables, use all preceding tables, including inner ones.
eof
*/
/* JOIN_TAB *tab = prev_cache ? prev_cache->join_tab :
join->join_tab+join->const_tables;
tables= join_tab-tab;
*/
/* JOIN_TAB *tab;
if (prev_cache)
tab= prev_cache->join_tab;
else
{
if (tab->bush_root_tab)
{
;
}
else
{
/ * top-level * /
tab= join->join_tab+join->const_tables;
}
}*/
JOIN_TAB *tab;
if (prev_cache)
tab= prev_cache->join_tab;
else
{
if (join_tab->bush_root_tab)
{
// inside SJM-Mat nest: pick first one
tab= join_tab->bush_root_tab->bush_children->start;
}
else
{
// outside SJM-Mat nest: start from first non-const table
tab= join->join_tab + join->const_tables;
}
}
start_tab= tab;
//tables= join_tab-tab;
//tables= 0;
fields= 0;
blobs= 0;
flag_fields= 0;
......@@ -168,7 +220,7 @@ void JOIN_CACHE::calc_record_fields()
data_field_ptr_count= 0;
referenced_fields= 0;
for ( ; tab < join_tab ; tab++)
for ( ; tab != join_tab ; tab= next_linear_tab(join, tab, TRUE))
{
calc_used_field_length(join->thd, tab);
flag_fields+= test(tab->used_null_fields || tab->used_uneven_bit_fields);
......@@ -177,6 +229,7 @@ void JOIN_CACHE::calc_record_fields()
blobs+= tab->used_blobs;
fields+= tab->check_rowid_field();
//tables++;
}
if ((with_match_flag= join_tab->use_match_flag()))
flag_fields++;
......@@ -271,7 +324,8 @@ void JOIN_CACHE::create_flag_fields()
&copy);
/* Create fields for all null bitmaps and null row flags that are needed */
for (tab= join_tab-tables; tab < join_tab; tab++)
//for (tab= join_tab-tables; tab < join_tab; tab++)
for (tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, TRUE))
{
TABLE *table= tab->table;
......@@ -336,7 +390,8 @@ void JOIN_CACHE:: create_remaining_fields(bool all_read_fields)
CACHE_FIELD *copy= field_descr+flag_fields+data_field_count;
CACHE_FIELD **copy_ptr= blob_ptr+data_field_ptr_count;
for (tab= join_tab-tables; tab < join_tab; tab++)
for (tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, TRUE))
//for (tab= join_tab-tables; tab < join_tab; tab++)
{
MY_BITMAP *rem_field_set;
TABLE *table= tab->table;
......@@ -557,7 +612,9 @@ int JOIN_CACHE_BKA::init()
of the counting 'in local_key_arg_fields' and 'external_key_arg_fields'
respectively.
*/
for (tab= cache->join_tab-cache->tables; tab < cache->join_tab ; tab++)
// for (tab= cache->join_tab-cache->tables; tab < cache->join_tab ; tab++)
for (tab= cache->start_tab; tab != cache->join_tab; tab=
next_linear_tab(cache->join, tab, TRUE))
{
uint key_args;
bitmap_clear_all(&tab->table->tmp_set);
......@@ -597,7 +654,9 @@ int JOIN_CACHE_BKA::init()
while (ext_key_arg_cnt)
{
cache= cache->prev_cache;
for (tab= cache->join_tab-cache->tables; tab < cache->join_tab ; tab++)
for (tab= cache->start_tab; tab != cache->join_tab; tab=
next_linear_tab(cache->join, tab, TRUE))
//for (tab= cache->join_tab-cache->tables; tab < cache->join_tab ; tab++)
{
CACHE_FIELD *copy_end;
MY_BITMAP *key_read_set= &tab->table->tmp_set;
......@@ -640,7 +699,8 @@ int JOIN_CACHE_BKA::init()
/* Now create local fields that are used to build ref for this key access */
copy= field_descr+flag_fields;
for (tab= join_tab-tables; tab < join_tab ; tab++)
//for (tab= join_tab-tables; tab < join_tab ; tab++)
for (tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, TRUE))
{
length+= add_table_data_fields_to_join_cache(tab, &tab->table->tmp_set,
&data_field_count, &copy,
......
......@@ -5883,7 +5883,7 @@ JOIN_TAB *first_linear_tab(JOIN *join, bool after_const_tables)
Depending on include_bush_roots parameter, JOIN_TABS that represent
SJM-scan/lookups are produced or omitted.
SJM Bush children are returned right after (or in place of) their container
SJM-Bush children are returned right after (or in place of) their container
join tab (TODO: does anybody depend on this? A: make_join_readinfo() seems
to.)
*/
......@@ -6095,8 +6095,8 @@ get_best_combination(JOIN *join)
}
else if (create_ref_for_key(join, j, keyuse, used_tables))
DBUG_RETURN(TRUE); // Something went wrong
j->records_read= join->best_positions[tablenr].records_read;
loop_end:
j->records_read= join->best_positions[tablenr].records_read;
join->map2table[j->table->tablenr]= j;
// If we've reached the end of sjm nest, switch back to main sequence
......
......@@ -500,12 +500,14 @@ protected:
context can be accessed.
*/
JOIN *join;
#if 0
/*
Cardinality of the range of join tables whose fields can be put into the
cache. (A table from the range not necessarily contributes to the cache.)
*/
uint tables;
#endif
JOIN_TAB *start_tab;
/*
The total number of flag and data fields that can appear in a record
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment