mirror of
https://github.com/MariaDB/server.git
synced 2026-01-06 05:22:24 +03:00
Remove garbage comments
This commit is contained in:
@@ -4495,26 +4495,6 @@ double get_post_group_estimate(JOIN* join, double join_op_rows)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Optimize the underlying subselect's join
|
||||
|
||||
@param out_rows OUT How many records we expect to get in the
|
||||
materialized table
|
||||
@param cost OUT Cost to materialize the subquery
|
||||
|
||||
@return
|
||||
0 OK
|
||||
1 Fatal error
|
||||
*/
|
||||
#if 0
|
||||
int subselect_hash_sj_engine::optimize(double *out_rows, double *cost)
|
||||
{
|
||||
DBUG_ENTER("subselect_hash_sj_engine::optimize");
|
||||
DBUG_ASSERT(0);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
Execute a subquery IN predicate via materialization.
|
||||
|
||||
|
||||
@@ -160,7 +160,6 @@ public:
|
||||
bool mark_as_dependent(THD *thd, st_select_lex *select, Item *item);
|
||||
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
|
||||
void recalc_used_tables(st_select_lex *new_parent, bool after_pullout);
|
||||
//virtual int optimize(double *out_rows, double *cost);
|
||||
virtual bool exec();
|
||||
/*
|
||||
If subquery optimization or execution determines that the subquery has
|
||||
@@ -593,7 +592,6 @@ public:
|
||||
THD * get_thd() { return thd; }
|
||||
virtual int prepare()= 0;
|
||||
virtual void fix_length_and_dec(Item_cache** row)= 0;
|
||||
//virtual int optimize(double *out_rows, double *cost) { DBUG_ASSERT(0); return 0; }
|
||||
/*
|
||||
Execute the engine
|
||||
|
||||
@@ -870,7 +868,6 @@ public:
|
||||
bool init(List<Item> *tmp_columns, uint subquery_id);
|
||||
void cleanup();
|
||||
int prepare();
|
||||
//int optimize(double *out_rows, double *cost);
|
||||
int exec();
|
||||
virtual void print(String *str, enum_query_type query_type);
|
||||
uint cols()
|
||||
|
||||
@@ -5928,6 +5928,8 @@ greedy_search(JOIN *join,
|
||||
@param record_count_arg[out] store record count here
|
||||
|
||||
@note
|
||||
end_tab==NULL means get full join cost and fanout
|
||||
|
||||
When used by semi-join materialization code the idea is that we
|
||||
detect sj-materialization after we've put all sj-inner tables into
|
||||
the join prefix.
|
||||
@@ -5943,39 +5945,6 @@ greedy_search(JOIN *join,
|
||||
@returns
|
||||
read_time_arg and record_count_arg contain the computed cost.
|
||||
*/
|
||||
#if 0
|
||||
void JOIN::get_partial_join_cost(uint n_tables,
|
||||
double *read_time_arg, double *record_count_arg)
|
||||
{
|
||||
#if 0
|
||||
double record_count= 1;
|
||||
double read_time= 0.0;
|
||||
|
||||
DBUG_ASSERT(n_tables <= tables);
|
||||
|
||||
for (uint i= const_tables; i < n_tables; i++)
|
||||
{
|
||||
if (best_positions[i].records_read)
|
||||
{
|
||||
record_count *= best_positions[i].records_read;
|
||||
read_time += best_positions[i].read_time;
|
||||
}
|
||||
}
|
||||
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
|
||||
*record_count_arg= record_count;
|
||||
#endif
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
Get partial join cost and fanout
|
||||
|
||||
end_tab==NULL means get full join cost and fanout
|
||||
|
||||
if end_tab belongs to a semi-join nest, get fanout within the scope of that
|
||||
nest.
|
||||
*/
|
||||
|
||||
void JOIN::get_partial_cost_and_fanout(uint end_tab_idx,
|
||||
table_map filter_map,
|
||||
@@ -6521,14 +6490,6 @@ int JOIN_TAB::make_scan_filter()
|
||||
Item *cond= is_inner_table_of_outer_join() ?
|
||||
*get_first_inner_table()->on_expr_ref : join->conds;
|
||||
|
||||
/*
|
||||
psergey4timour: passing MAX_TABLES here is not correct.
|
||||
|
||||
The following make_cond_for_table call constructs a condition that will be
|
||||
applied when reading a table with join buffering. This means that we're
|
||||
nearly certain that this condition will be checked fewer than #{records in
|
||||
join output} times.
|
||||
*/
|
||||
if (cond &&
|
||||
(tmp= make_cond_for_table(join->thd, cond,
|
||||
join->const_table_map | table->map,
|
||||
|
||||
Reference in New Issue
Block a user