mirror of
https://github.com/MariaDB/server.git
synced 2025-07-08 17:02:21 +03:00
Manual merge
mysql-test/r/partition.result: Auto merged sql/handler.h: Auto merged sql/item.h: Auto merged sql/sql_class.cc: Auto merged sql/sql_lex.h: Auto merged sql/sql_select.cc: Auto merged
This commit is contained in:
@ -271,3 +271,10 @@ t1 CREATE TABLE `t1` (
|
|||||||
`b` int(11) default NULL
|
`b` int(11) default NULL
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (a) (PARTITION x1 VALUES LESS THAN (6) ENGINE = MyISAM, PARTITION x3 VALUES LESS THAN (8) ENGINE = MyISAM, PARTITION x4 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION x5 VALUES LESS THAN (12) ENGINE = MyISAM, PARTITION x6 VALUES LESS THAN (14) ENGINE = MyISAM, PARTITION x7 VALUES LESS THAN (16) ENGINE = MyISAM, PARTITION x8 VALUES LESS THAN (18) ENGINE = MyISAM, PARTITION x9 VALUES LESS THAN (20) ENGINE = MyISAM)
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (a) (PARTITION x1 VALUES LESS THAN (6) ENGINE = MyISAM, PARTITION x3 VALUES LESS THAN (8) ENGINE = MyISAM, PARTITION x4 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION x5 VALUES LESS THAN (12) ENGINE = MyISAM, PARTITION x6 VALUES LESS THAN (14) ENGINE = MyISAM, PARTITION x7 VALUES LESS THAN (16) ENGINE = MyISAM, PARTITION x8 VALUES LESS THAN (18) ENGINE = MyISAM, PARTITION x9 VALUES LESS THAN (20) ENGINE = MyISAM)
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (a int not null, b int not null) partition by LIST (a+b) (
|
||||||
|
partition p0 values in (12),
|
||||||
|
partition p1 values in (14)
|
||||||
|
);
|
||||||
|
insert into t1 values (10,1);
|
||||||
|
ERROR HY000: Table has no partition for value 11
|
||||||
|
drop table t1;
|
||||||
|
@ -259,3 +259,48 @@ explain partitions select * from t1 where a is not null;
|
|||||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
1 SIMPLE t1 p0,p1 ALL NULL NULL NULL NULL 2 Using where
|
1 SIMPLE t1 p0,p1 ALL NULL NULL NULL NULL 2 Using where
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (a int not null, b int not null, key(a), key(b))
|
||||||
|
partition by hash(a) partitions 4;
|
||||||
|
insert into t1 values (1,1),(2,2),(3,3),(4,4);
|
||||||
|
explain partitions
|
||||||
|
select * from t1 X, t1 Y
|
||||||
|
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE X p1,p2 ALL a,b NULL NULL NULL 4 Using where
|
||||||
|
1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 2 Using where
|
||||||
|
explain partitions
|
||||||
|
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE X p1,p2 ALL a NULL NULL NULL 4 Using where
|
||||||
|
1 SIMPLE Y p1,p2 ref a a 4 test.X.a 2
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (a int) partition by hash(a) partitions 20;
|
||||||
|
insert into t1 values (1),(2),(3);
|
||||||
|
explain partitions select * from t1 where a > 1 and a < 3;
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p2 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
explain partitions select * from t1 where a >= 1 and a < 3;
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p1,p2 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
explain partitions select * from t1 where a > 1 and a <= 3;
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p2,p3 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
explain partitions select * from t1 where a >= 1 and a <= 3;
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p1,p2,p3 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
drop table t1;
|
||||||
|
create table t1 (a int, b int)
|
||||||
|
partition by list(a) subpartition by hash(b) subpartitions 20
|
||||||
|
(
|
||||||
|
partition p0 values in (0),
|
||||||
|
partition p1 values in (1),
|
||||||
|
partition p2 values in (2),
|
||||||
|
partition p3 values in (3)
|
||||||
|
);
|
||||||
|
insert into t1 values (1,1),(2,2),(3,3);
|
||||||
|
explain partitions select * from t1 where b > 1 and b < 3;
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p0_sp2,p1_sp2,p2_sp2,p3_sp2 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
explain partitions select * from t1 where b > 1 and b < 3 and (a =1 or a =2);
|
||||||
|
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 p1_sp2,p2_sp2 ALL NULL NULL NULL NULL 3 Using where
|
||||||
|
@ -343,3 +343,13 @@ ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
|
|||||||
show create table t1;
|
show create table t1;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
# Testcase for BUG#15819
|
||||||
|
create table t1 (a int not null, b int not null) partition by LIST (a+b) (
|
||||||
|
partition p0 values in (12),
|
||||||
|
partition p1 values in (14)
|
||||||
|
);
|
||||||
|
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
|
||||||
|
insert into t1 values (10,1);
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
@ -230,9 +230,45 @@ create table t1 (a int) partition by hash(a) partitions 2;
|
|||||||
insert into t1 values (1),(2);
|
insert into t1 values (1),(2);
|
||||||
explain partitions select * from t1 where a is null;
|
explain partitions select * from t1 where a is null;
|
||||||
|
|
||||||
# this selects both
|
# this uses both partitions
|
||||||
explain partitions select * from t1 where a is not null;
|
explain partitions select * from t1 where a is not null;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
# Join tests
|
||||||
|
create table t1 (a int not null, b int not null, key(a), key(b))
|
||||||
|
partition by hash(a) partitions 4;
|
||||||
|
insert into t1 values (1,1),(2,2),(3,3),(4,4);
|
||||||
|
|
||||||
|
explain partitions
|
||||||
|
select * from t1 X, t1 Y
|
||||||
|
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
|
||||||
|
|
||||||
|
explain partitions
|
||||||
|
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
# Tests for "short ranges"
|
||||||
|
create table t1 (a int) partition by hash(a) partitions 20;
|
||||||
|
insert into t1 values (1),(2),(3);
|
||||||
|
explain partitions select * from t1 where a > 1 and a < 3;
|
||||||
|
explain partitions select * from t1 where a >= 1 and a < 3;
|
||||||
|
explain partitions select * from t1 where a > 1 and a <= 3;
|
||||||
|
explain partitions select * from t1 where a >= 1 and a <= 3;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1 (a int, b int)
|
||||||
|
partition by list(a) subpartition by hash(b) subpartitions 20
|
||||||
|
(
|
||||||
|
partition p0 values in (0),
|
||||||
|
partition p1 values in (1),
|
||||||
|
partition p2 values in (2),
|
||||||
|
partition p3 values in (3)
|
||||||
|
);
|
||||||
|
insert into t1 values (1,1),(2,2),(3,3);
|
||||||
|
|
||||||
|
explain partitions select * from t1 where b > 1 and b < 3;
|
||||||
|
explain partitions select * from t1 where b > 1 and b < 3 and (a =1 or a =2);
|
||||||
|
|
||||||
# No tests for NULLs in RANGE(monotonic_expr()) - they depend on BUG#15447
|
# No tests for NULLs in RANGE(monotonic_expr()) - they depend on BUG#15447
|
||||||
# being fixed.
|
# being fixed.
|
||||||
|
169
sql/handler.h
169
sql/handler.h
@ -620,6 +620,8 @@ typedef struct {
|
|||||||
uint32 end_part;
|
uint32 end_part;
|
||||||
bool use_bit_array;
|
bool use_bit_array;
|
||||||
} part_id_range;
|
} part_id_range;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An enum and a struct to handle partitioning and subpartitioning.
|
* An enum and a struct to handle partitioning and subpartitioning.
|
||||||
*/
|
*/
|
||||||
@ -699,7 +701,109 @@ typedef int (*get_part_id_func)(partition_info *part_info,
|
|||||||
longlong *func_value);
|
longlong *func_value);
|
||||||
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
|
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
|
||||||
|
|
||||||
class partition_info :public Sql_alloc {
|
|
||||||
|
struct st_partition_iter;
|
||||||
|
#define NOT_A_PARTITION_ID ((uint32)-1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
A "Get next" function for partition iterator.
|
||||||
|
SYNOPSIS
|
||||||
|
partition_iter_func()
|
||||||
|
part_iter Partition iterator, you call only "iter.get_next(&iter)"
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
NOT_A_PARTITION_ID if there are no more partitions.
|
||||||
|
[sub]partition_id of the next partition
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Partition set iterator. Used to enumerate a set of [sub]partitions
|
||||||
|
obtained in partition interval analysis (see get_partitions_in_range_iter).
|
||||||
|
|
||||||
|
For the user, the only meaningful field is get_next, which may be used as
|
||||||
|
follows:
|
||||||
|
part_iterator.get_next(&part_iterator);
|
||||||
|
|
||||||
|
Initialization is done by any of the following calls:
|
||||||
|
- get_partitions_in_range_iter-type function call
|
||||||
|
- init_single_partition_iterator()
|
||||||
|
- init_all_partitions_iterator()
|
||||||
|
Cleanup is not needed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct st_partition_iter
|
||||||
|
{
|
||||||
|
partition_iter_func get_next;
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
uint32 start_part_num;
|
||||||
|
uint32 end_part_num;
|
||||||
|
};
|
||||||
|
struct {
|
||||||
|
longlong start_val;
|
||||||
|
longlong end_val;
|
||||||
|
};
|
||||||
|
bool null_returned;
|
||||||
|
};
|
||||||
|
partition_info *part_info;
|
||||||
|
} PARTITION_ITERATOR;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Get an iterator for set of partitions that match given field-space interval
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_partitions_in_range_iter()
|
||||||
|
part_info Partitioning info
|
||||||
|
is_subpart
|
||||||
|
min_val Left edge, field value in opt_range_key format.
|
||||||
|
max_val Right edge, field value in opt_range_key format.
|
||||||
|
flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
|
||||||
|
NO_MAX_RANGE.
|
||||||
|
part_iter Iterator structure to be initialized
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Functions with this signature are used to perform "Partitioning Interval
|
||||||
|
Analysis". This analysis is applicable for any type of [sub]partitioning
|
||||||
|
by some function of a single fieldX. The idea is as follows:
|
||||||
|
Given an interval "const1 <=? fieldX <=? const2", find a set of partitions
|
||||||
|
that may contain records with value of fieldX within the given interval.
|
||||||
|
|
||||||
|
The min_val, max_val and flags parameters specify the interval.
|
||||||
|
The set of partitions is returned by initializing an iterator in *part_iter
|
||||||
|
|
||||||
|
NOTES
|
||||||
|
There are currently two functions of this type:
|
||||||
|
- get_part_iter_for_interval_via_walking
|
||||||
|
- get_part_iter_for_interval_via_mapping
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
0 - No matching partitions, iterator not initialized
|
||||||
|
1 - Some partitions would match, iterator intialized for traversing them
|
||||||
|
-1 - All partitions would match, iterator not initialized
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef int (*get_partitions_in_range_iter)(partition_info *part_info,
|
||||||
|
bool is_subpart,
|
||||||
|
byte *min_val, byte *max_val,
|
||||||
|
uint flags,
|
||||||
|
PARTITION_ITERATOR *part_iter);
|
||||||
|
|
||||||
|
|
||||||
|
/* Initialize the iterator to return a single partition with given part_id */
|
||||||
|
inline void init_single_partition_iterator(uint32 part_id,
|
||||||
|
PARTITION_ITERATOR *part_iter);
|
||||||
|
|
||||||
|
/* Initialize the iterator to enumerate all partitions */
|
||||||
|
inline void init_all_partitions_iterator(partition_info *part_info,
|
||||||
|
PARTITION_ITERATOR *part_iter);
|
||||||
|
|
||||||
|
class partition_info : public Sql_alloc
|
||||||
|
{
|
||||||
public:
|
public:
|
||||||
/*
|
/*
|
||||||
* Here comes a set of definitions needed for partitioned table handlers.
|
* Here comes a set of definitions needed for partitioned table handlers.
|
||||||
@ -728,10 +832,10 @@ public:
|
|||||||
same in all subpartitions
|
same in all subpartitions
|
||||||
*/
|
*/
|
||||||
get_subpart_id_func get_subpartition_id;
|
get_subpart_id_func get_subpartition_id;
|
||||||
|
|
||||||
/* NULL-terminated list of fields used in partitioned expression */
|
/* NULL-terminated array of fields used in partitioned expression */
|
||||||
Field **part_field_array;
|
Field **part_field_array;
|
||||||
/* NULL-terminated list of fields used in subpartitioned expression */
|
/* NULL-terminated array of fields used in subpartitioned expression */
|
||||||
Field **subpart_field_array;
|
Field **subpart_field_array;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -748,11 +852,10 @@ public:
|
|||||||
/*
|
/*
|
||||||
A bitmap of partitions used by the current query.
|
A bitmap of partitions used by the current query.
|
||||||
Usage pattern:
|
Usage pattern:
|
||||||
* It is guaranteed that all partitions are set to be unused on query start.
|
* The handler->extra(HA_EXTRA_RESET) call at query start/end sets all
|
||||||
|
partitions to be unused.
|
||||||
* Before index/rnd_init(), partition pruning code sets the bits for used
|
* Before index/rnd_init(), partition pruning code sets the bits for used
|
||||||
partitions.
|
partitions.
|
||||||
* The handler->extra(HA_EXTRA_RESET) call at query end sets all partitions
|
|
||||||
to be unused.
|
|
||||||
*/
|
*/
|
||||||
MY_BITMAP used_partitions;
|
MY_BITMAP used_partitions;
|
||||||
|
|
||||||
@ -760,6 +863,39 @@ public:
|
|||||||
longlong *range_int_array;
|
longlong *range_int_array;
|
||||||
LIST_PART_ENTRY *list_array;
|
LIST_PART_ENTRY *list_array;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/********************************************
|
||||||
|
* INTERVAL ANALYSIS
|
||||||
|
********************************************/
|
||||||
|
/*
|
||||||
|
Partitioning interval analysis function for partitioning, or NULL if
|
||||||
|
interval analysis is not supported for this kind of partitioning.
|
||||||
|
*/
|
||||||
|
get_partitions_in_range_iter get_part_iter_for_interval;
|
||||||
|
/*
|
||||||
|
Partitioning interval analysis function for subpartitioning, or NULL if
|
||||||
|
interval analysis is not supported for this kind of partitioning.
|
||||||
|
*/
|
||||||
|
get_partitions_in_range_iter get_subpart_iter_for_interval;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Valid iff
|
||||||
|
get_part_iter_for_interval=get_part_iter_for_interval_via_walking:
|
||||||
|
controls how we'll process "field < C" and "field > C" intervals.
|
||||||
|
If the partitioning function F is strictly increasing, then for any x, y
|
||||||
|
"x < y" => "F(x) < F(y)" (*), i.e. when we get interval "field < C"
|
||||||
|
we can perform partition pruning on the equivalent "F(field) < F(C)".
|
||||||
|
|
||||||
|
If the partitioning function not strictly increasing (it is simply
|
||||||
|
increasing), then instead of (*) we get "x < y" => "F(x) <= F(y)"
|
||||||
|
i.e. for interval "field < C" we can perform partition pruning for
|
||||||
|
"F(field) <= F(C)".
|
||||||
|
*/
|
||||||
|
bool range_analysis_include_bounds;
|
||||||
|
/********************************************
|
||||||
|
* INTERVAL ANALYSIS ENDS
|
||||||
|
********************************************/
|
||||||
|
|
||||||
char* part_info_string;
|
char* part_info_string;
|
||||||
|
|
||||||
char *part_func_string;
|
char *part_func_string;
|
||||||
@ -863,6 +999,25 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
|
uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
|
||||||
|
|
||||||
|
inline void init_single_partition_iterator(uint32 part_id,
|
||||||
|
PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
part_iter->start_part_num= part_id;
|
||||||
|
part_iter->end_part_num= part_id+1;
|
||||||
|
part_iter->get_next= get_next_partition_id_range;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void init_all_partitions_iterator(partition_info *part_info,
|
||||||
|
PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
part_iter->start_part_num= 0;
|
||||||
|
part_iter->end_part_num= part_info->no_parts;
|
||||||
|
part_iter->get_next= get_next_partition_id_range;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Answers the question if subpartitioning is used for a certain table
|
Answers the question if subpartitioning is used for a certain table
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
|
11
sql/item.h
11
sql/item.h
@ -381,13 +381,20 @@ public:
|
|||||||
put values of field_i into table record buffer;
|
put values of field_i into table record buffer;
|
||||||
return item->val_int();
|
return item->val_int();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NOTE
|
||||||
|
At the moment function monotonicity is not well defined (and so may be
|
||||||
|
incorrect) for Item trees with parameters/return types that are different
|
||||||
|
from INT_RESULT, may be NULL, or are unsigned.
|
||||||
|
It will be possible to address this issue once the related partitioning bugs
|
||||||
|
(BUG#16002, BUG#15447, BUG#13436) are fixed.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
typedef enum monotonicity_info
|
typedef enum monotonicity_info
|
||||||
{
|
{
|
||||||
NON_MONOTONIC, /* none of the below holds */
|
NON_MONOTONIC, /* none of the below holds */
|
||||||
MONOTONIC_INCREASING, /* F() is unary and "x < y" => "F(x) < F(y)" */
|
MONOTONIC_INCREASING, /* F() is unary and (x < y) => (F(x) <= F(y)) */
|
||||||
MONOTONIC_STRICT_INCREASING /* F() is unary and "x < y" => "F(x) <= F(y)" */
|
MONOTONIC_STRICT_INCREASING /* F() is unary and (x < y) => (F(x) < F(y)) */
|
||||||
} enum_monotonicity_info;
|
} enum_monotonicity_info;
|
||||||
|
|
||||||
/*************************************************************************/
|
/*************************************************************************/
|
||||||
|
@ -885,6 +885,21 @@ longlong Item_func_to_days::val_int()
|
|||||||
return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
|
return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Get information about this Item tree monotonicity
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
Item_func_to_days::get_monotonicity_info()
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Get information about monotonicity of the function represented by this item
|
||||||
|
tree.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
See enum_monotonicity_info.
|
||||||
|
*/
|
||||||
|
|
||||||
enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
|
enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
|
||||||
{
|
{
|
||||||
if (args[0]->type() == Item::FIELD_ITEM)
|
if (args[0]->type() == Item::FIELD_ITEM)
|
||||||
@ -1080,6 +1095,21 @@ longlong Item_func_year::val_int()
|
|||||||
return (longlong) ltime.year;
|
return (longlong) ltime.year;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Get information about this Item tree monotonicity
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
Item_func_to_days::get_monotonicity_info()
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Get information about monotonicity of the function represented by this item
|
||||||
|
tree.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
See enum_monotonicity_info.
|
||||||
|
*/
|
||||||
|
|
||||||
enum_monotonicity_info Item_func_year::get_monotonicity_info() const
|
enum_monotonicity_info Item_func_year::get_monotonicity_info() const
|
||||||
{
|
{
|
||||||
if (args[0]->type() == Item::FIELD_ITEM &&
|
if (args[0]->type() == Item::FIELD_ITEM &&
|
||||||
|
520
sql/opt_range.cc
520
sql/opt_range.cc
@ -313,11 +313,46 @@ public:
|
|||||||
}
|
}
|
||||||
SEL_ARG *clone_tree();
|
SEL_ARG *clone_tree();
|
||||||
|
|
||||||
/* Return TRUE if this represents "keypartK = const" or "keypartK IS NULL" */
|
|
||||||
|
/*
|
||||||
|
Check if this SEL_ARG object represents a single-point interval
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
is_singlepoint()
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Check if this SEL_ARG object (not tree) represents a single-point
|
||||||
|
interval, i.e. if it represents a "keypart = const" or
|
||||||
|
"keypart IS NULL".
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
TRUE This SEL_ARG object represents a singlepoint interval
|
||||||
|
FALSE Otherwise
|
||||||
|
*/
|
||||||
|
|
||||||
bool is_singlepoint()
|
bool is_singlepoint()
|
||||||
{
|
{
|
||||||
return !min_flag && !max_flag &&
|
/*
|
||||||
!field->key_cmp((byte*) min_value, (byte*)max_value);
|
Check for NEAR_MIN ("strictly less") and NO_MIN_RANGE (-inf < field)
|
||||||
|
flags, and the same for right edge.
|
||||||
|
*/
|
||||||
|
if (min_flag || max_flag)
|
||||||
|
return FALSE;
|
||||||
|
byte *min_val= min_value;
|
||||||
|
byte *max_val= min_value;
|
||||||
|
|
||||||
|
if (maybe_null)
|
||||||
|
{
|
||||||
|
/* First byte is a NULL value indicator */
|
||||||
|
if (*min_val != *max_val)
|
||||||
|
return FALSE;
|
||||||
|
|
||||||
|
if (*min_val)
|
||||||
|
return TRUE; /* This "x IS NULL" */
|
||||||
|
min_val++;
|
||||||
|
max_val++;
|
||||||
|
}
|
||||||
|
return !field->key_cmp(min_val, max_val);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2035,7 +2070,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Partition pruning starts
|
* Partition pruning module
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
|
|
||||||
@ -2080,7 +2115,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
The list of intervals we'll obtain will look like this:
|
The list of intervals we'll obtain will look like this:
|
||||||
((t1.a, t1.b) = (1,'foo')),
|
((t1.a, t1.b) = (1,'foo')),
|
||||||
((t1.a, t1.b) = (2,'bar')),
|
((t1.a, t1.b) = (2,'bar')),
|
||||||
((t1,a, t1.b) > (10,'zz')) (**)
|
((t1,a, t1.b) > (10,'zz'))
|
||||||
|
|
||||||
2. for each interval I
|
2. for each interval I
|
||||||
{
|
{
|
||||||
@ -2110,7 +2145,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
Putting it all together, partitioning module works as follows:
|
Putting it all together, partitioning module works as follows:
|
||||||
|
|
||||||
prune_partitions() {
|
prune_partitions() {
|
||||||
call create_partition_index_descrition();
|
call create_partition_index_description();
|
||||||
|
|
||||||
call get_mm_tree(); // invoke the RangeAnalysisModule
|
call get_mm_tree(); // invoke the RangeAnalysisModule
|
||||||
|
|
||||||
@ -2124,10 +2159,6 @@ struct st_part_prune_param;
|
|||||||
struct st_part_opt_info;
|
struct st_part_opt_info;
|
||||||
|
|
||||||
typedef void (*mark_full_part_func)(partition_info*, uint32);
|
typedef void (*mark_full_part_func)(partition_info*, uint32);
|
||||||
typedef uint32 (*part_num_to_partition_id_func)(struct st_part_prune_param*,
|
|
||||||
uint32);
|
|
||||||
typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
|
|
||||||
bool include_endpoint);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Partition pruning operation context
|
Partition pruning operation context
|
||||||
@ -2135,7 +2166,7 @@ typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
|
|||||||
typedef struct st_part_prune_param
|
typedef struct st_part_prune_param
|
||||||
{
|
{
|
||||||
RANGE_OPT_PARAM range_param; /* Range analyzer parameters */
|
RANGE_OPT_PARAM range_param; /* Range analyzer parameters */
|
||||||
|
|
||||||
/***************************************************************
|
/***************************************************************
|
||||||
Following fields are filled in based solely on partitioning
|
Following fields are filled in based solely on partitioning
|
||||||
definition and not modified after that:
|
definition and not modified after that:
|
||||||
@ -2164,32 +2195,6 @@ typedef struct st_part_prune_param
|
|||||||
int last_part_partno;
|
int last_part_partno;
|
||||||
int last_subpart_partno; /* Same as above for supartitioning */
|
int last_subpart_partno; /* Same as above for supartitioning */
|
||||||
|
|
||||||
/*
|
|
||||||
Function to be used to analyze non-singlepoint intervals (Can be pointer
|
|
||||||
to one of two functions - for RANGE and for LIST types). NULL means
|
|
||||||
partitioning type and/or expression doesn't allow non-singlepoint interval
|
|
||||||
analysis.
|
|
||||||
See get_list_array_idx_for_endpoint (or get_range_...) for description of
|
|
||||||
what the function does.
|
|
||||||
*/
|
|
||||||
get_endpoint_func get_endpoint;
|
|
||||||
|
|
||||||
/* Maximum possible value that can be returned by get_endpoint function */
|
|
||||||
uint32 max_endpoint_val;
|
|
||||||
|
|
||||||
/*
|
|
||||||
For RANGE partitioning, part_num_to_part_id_range, for LIST partitioning,
|
|
||||||
part_num_to_part_id_list. Just to avoid the if-else clutter.
|
|
||||||
*/
|
|
||||||
part_num_to_partition_id_func endpoints_walk_func;
|
|
||||||
|
|
||||||
/*
|
|
||||||
If true, process "key < const" as "part_func(key) < part_func(const)",
|
|
||||||
otherwise as "part_func(key) <= part_func(const)". Same for '>' and '>='.
|
|
||||||
This is defined iff get_endpoint != NULL.
|
|
||||||
*/
|
|
||||||
bool force_include_bounds;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
is_part_keypart[i] == test(keypart #i in partitioning index is a member
|
is_part_keypart[i] == test(keypart #i in partitioning index is a member
|
||||||
used in partitioning)
|
used in partitioning)
|
||||||
@ -2208,28 +2213,15 @@ typedef struct st_part_prune_param
|
|||||||
uint cur_part_fields;
|
uint cur_part_fields;
|
||||||
/* Same as cur_part_fields, but for subpartitioning */
|
/* Same as cur_part_fields, but for subpartitioning */
|
||||||
uint cur_subpart_fields;
|
uint cur_subpart_fields;
|
||||||
|
|
||||||
/***************************************************************
|
|
||||||
Following fields are used to store an 'iterator' that can be
|
|
||||||
used to obtain a set of used artitions.
|
|
||||||
**************************************************************/
|
|
||||||
/*
|
|
||||||
Start and end+1 partition "numbers". They can have two meanings depending
|
|
||||||
depending of the value of part_num_to_part_id:
|
|
||||||
part_num_to_part_id_range - numbers are partition ids
|
|
||||||
part_num_to_part_id_list - numbers are indexes in part_info->list_array
|
|
||||||
*/
|
|
||||||
uint32 start_part_num;
|
|
||||||
uint32 end_part_num;
|
|
||||||
|
|
||||||
/*
|
/* Iterator to be used to obtain the "current" set of used partitions */
|
||||||
A function that should be used to convert two above "partition numbers"
|
PARTITION_ITERATOR part_iter;
|
||||||
to partition_ids.
|
|
||||||
*/
|
/* Initialized bitmap of no_subparts size */
|
||||||
part_num_to_partition_id_func part_num_to_part_id;
|
MY_BITMAP subparts_bitmap;
|
||||||
} PART_PRUNE_PARAM;
|
} PART_PRUNE_PARAM;
|
||||||
|
|
||||||
static bool create_partition_index_descrition(PART_PRUNE_PARAM *prune_par);
|
static bool create_partition_index_description(PART_PRUNE_PARAM *prune_par);
|
||||||
static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree);
|
static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree);
|
||||||
static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar,
|
static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar,
|
||||||
SEL_IMERGE *imerge);
|
SEL_IMERGE *imerge);
|
||||||
@ -2243,7 +2235,7 @@ static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par,
|
|||||||
static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
|
static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
|
||||||
static void dbug_print_field(Field *field);
|
static void dbug_print_field(Field *field);
|
||||||
static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part);
|
static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part);
|
||||||
static void dbug_print_onepoint_range(SEL_ARG **start, uint num);
|
static void dbug_print_singlepoint_range(SEL_ARG **start, uint num);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@ -2297,7 +2289,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
|
|||||||
range_par->mem_root= &alloc;
|
range_par->mem_root= &alloc;
|
||||||
range_par->old_root= thd->mem_root;
|
range_par->old_root= thd->mem_root;
|
||||||
|
|
||||||
if (create_partition_index_descrition(&prune_param))
|
if (create_partition_index_description(&prune_param))
|
||||||
{
|
{
|
||||||
mark_all_partitions_as_used(part_info);
|
mark_all_partitions_as_used(part_info);
|
||||||
free_root(&alloc,MYF(0)); // Return memory & allocator
|
free_root(&alloc,MYF(0)); // Return memory & allocator
|
||||||
@ -2335,15 +2327,14 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
|
|||||||
|
|
||||||
if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
|
if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
|
||||||
goto all_used;
|
goto all_used;
|
||||||
|
|
||||||
if (tree->merges.is_empty())
|
if (tree->merges.is_empty())
|
||||||
{
|
{
|
||||||
|
/* Range analysis has produced a single list of intervals. */
|
||||||
prune_param.arg_stack_end= prune_param.arg_stack;
|
prune_param.arg_stack_end= prune_param.arg_stack;
|
||||||
prune_param.cur_part_fields= 0;
|
prune_param.cur_part_fields= 0;
|
||||||
prune_param.cur_subpart_fields= 0;
|
prune_param.cur_subpart_fields= 0;
|
||||||
prune_param.part_num_to_part_id= part_num_to_part_id_range;
|
init_all_partitions_iterator(part_info, &prune_param.part_iter);
|
||||||
prune_param.start_part_num= 0;
|
|
||||||
prune_param.end_part_num= prune_param.part_info->no_parts;
|
|
||||||
if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
|
if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
|
||||||
tree->keys[0]))))
|
tree->keys[0]))))
|
||||||
goto all_used;
|
goto all_used;
|
||||||
@ -2352,14 +2343,30 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
|
|||||||
{
|
{
|
||||||
if (tree->merges.elements == 1)
|
if (tree->merges.elements == 1)
|
||||||
{
|
{
|
||||||
if (-1 == (res |= find_used_partitions_imerge(&prune_param,
|
/*
|
||||||
tree->merges.head())))
|
Range analysis has produced a "merge" of several intervals lists, a
|
||||||
|
SEL_TREE that represents an expression in form
|
||||||
|
sel_imerge = (tree1 OR tree2 OR ... OR treeN)
|
||||||
|
that cannot be reduced to one tree. This can only happen when
|
||||||
|
partitioning index has several keyparts and the condition is OR of
|
||||||
|
conditions that refer to different key parts. For example, we'll get
|
||||||
|
here for "partitioning_field=const1 OR subpartitioning_field=const2"
|
||||||
|
*/
|
||||||
|
if (-1 == (res= find_used_partitions_imerge(&prune_param,
|
||||||
|
tree->merges.head())))
|
||||||
goto all_used;
|
goto all_used;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (-1 == (res |= find_used_partitions_imerge_list(&prune_param,
|
/*
|
||||||
tree->merges)))
|
Range analysis has produced a list of several imerges, i.e. a
|
||||||
|
structure that represents a condition in form
|
||||||
|
imerge_list= (sel_imerge1 AND sel_imerge2 AND ... AND sel_imergeN)
|
||||||
|
This is produced for complicated WHERE clauses that range analyzer
|
||||||
|
can't really analyze properly.
|
||||||
|
*/
|
||||||
|
if (-1 == (res= find_used_partitions_imerge_list(&prune_param,
|
||||||
|
tree->merges)))
|
||||||
goto all_used;
|
goto all_used;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2384,15 +2391,22 @@ end:
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Store key image to table record
|
Store field key image to table record
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
field Field which key image should be stored.
|
store_key_image_to_rec()
|
||||||
ptr Field value in key format.
|
field Field which key image should be stored
|
||||||
len Length of the value, in bytes.
|
ptr Field value in key format
|
||||||
|
len Length of the value, in bytes
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Copy the field value from its key image to the table record. The source
|
||||||
|
is the value in key image format, occupying len bytes in buffer pointed
|
||||||
|
by ptr. The destination is table record, in "field value in table record"
|
||||||
|
format.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void store_key_image_to_rec(Field *field, char *ptr, uint len)
|
void store_key_image_to_rec(Field *field, char *ptr, uint len)
|
||||||
{
|
{
|
||||||
/* Do the same as print_key() does */
|
/* Do the same as print_key() does */
|
||||||
if (field->real_maybe_null())
|
if (field->real_maybe_null())
|
||||||
@ -2414,8 +2428,12 @@ static void store_key_image_to_rec(Field *field, char *ptr, uint len)
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
store_selargs_to_rec()
|
store_selargs_to_rec()
|
||||||
ppar Partition pruning context
|
ppar Partition pruning context
|
||||||
start Array SEL_ARG* for which the minimum values should be stored
|
start Array of SEL_ARG* for which the minimum values should be stored
|
||||||
num Number of elements in the array
|
num Number of elements in the array
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
For each SEL_ARG* interval in the specified array, store the left edge
|
||||||
|
field value (sel_arg->min, key image format) into the table record.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void store_selargs_to_rec(PART_PRUNE_PARAM *ppar, SEL_ARG **start,
|
static void store_selargs_to_rec(PART_PRUNE_PARAM *ppar, SEL_ARG **start,
|
||||||
@ -2449,19 +2467,6 @@ static void mark_full_partition_used_with_parts(partition_info *part_info,
|
|||||||
bitmap_set_bit(&part_info->used_partitions, start);
|
bitmap_set_bit(&part_info->used_partitions, start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* See comment in PART_PRUNE_PARAM::part_num_to_part_id about what this is */
|
|
||||||
static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* ppar, uint32 num)
|
|
||||||
{
|
|
||||||
return num;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* See comment in PART_PRUNE_PARAM::part_num_to_part_id about what this is */
|
|
||||||
static uint32 part_num_to_part_id_list(PART_PRUNE_PARAM* ppar, uint32 num)
|
|
||||||
{
|
|
||||||
return ppar->part_info->list_array[num].partition_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Find the set of used partitions for List<SEL_IMERGE>
|
Find the set of used partitions for List<SEL_IMERGE>
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
@ -2473,7 +2478,7 @@ static uint32 part_num_to_part_id_list(PART_PRUNE_PARAM* ppar, uint32 num)
|
|||||||
List<SEL_IMERGE> represents "imerge1 AND imerge2 AND ...".
|
List<SEL_IMERGE> represents "imerge1 AND imerge2 AND ...".
|
||||||
The set of used partitions is an intersection of used partitions sets
|
The set of used partitions is an intersection of used partitions sets
|
||||||
for imerge_{i}.
|
for imerge_{i}.
|
||||||
We accumulate this intersection a separate bitmap.
|
We accumulate this intersection in a separate bitmap.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
See find_used_partitions()
|
See find_used_partitions()
|
||||||
@ -2491,7 +2496,7 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
|
|||||||
bitmap_bytes)))
|
bitmap_bytes)))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Fallback, process just first SEL_IMERGE. This can leave us with more
|
Fallback, process just the first SEL_IMERGE. This can leave us with more
|
||||||
partitions marked as used then actually needed.
|
partitions marked as used then actually needed.
|
||||||
*/
|
*/
|
||||||
return find_used_partitions_imerge(ppar, merges.head());
|
return find_used_partitions_imerge(ppar, merges.head());
|
||||||
@ -2549,9 +2554,7 @@ int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge)
|
|||||||
ppar->arg_stack_end= ppar->arg_stack;
|
ppar->arg_stack_end= ppar->arg_stack;
|
||||||
ppar->cur_part_fields= 0;
|
ppar->cur_part_fields= 0;
|
||||||
ppar->cur_subpart_fields= 0;
|
ppar->cur_subpart_fields= 0;
|
||||||
ppar->part_num_to_part_id= part_num_to_part_id_range;
|
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
|
||||||
ppar->start_part_num= 0;
|
|
||||||
ppar->end_part_num= ppar->part_info->no_parts;
|
|
||||||
if (-1 == (res |= find_used_partitions(ppar, (*ptree)->keys[0])))
|
if (-1 == (res |= find_used_partitions(ppar, (*ptree)->keys[0])))
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -2560,41 +2563,106 @@ int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge)
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Recursively walk the SEL_ARG tree, find/mark partitions that need to be used
|
Collect partitioning ranges for the SEL_ARG tree and mark partitions as used
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
find_used_partitions()
|
find_used_partitions()
|
||||||
ppar Partition pruning context.
|
ppar Partition pruning context.
|
||||||
key_tree Intervals tree to perform pruning for.
|
key_tree SEL_ARG range tree to perform pruning for
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
This function
|
This function
|
||||||
* recursively walks the SEL_ARG* tree, collecting partitioning
|
* recursively walks the SEL_ARG* tree collecting partitioning "intervals"
|
||||||
"intervals";
|
* finds the partitions one needs to use to get rows in these intervals
|
||||||
* finds the partitions one needs to use to get rows in these intervals;
|
|
||||||
* marks these partitions as used.
|
* marks these partitions as used.
|
||||||
|
The next session desribes the process in greater detail.
|
||||||
WHAT IS CONSIDERED TO BE "INTERVALS"
|
|
||||||
A partition pruning "interval" is equivalent to condition in one of the
|
IMPLEMENTATION
|
||||||
forms:
|
TYPES OF RESTRICTIONS THAT WE CAN OBTAIN PARTITIONS FOR
|
||||||
|
We can find out which [sub]partitions to use if we obtain restrictions on
|
||||||
"partition_field1=const1 AND ... partition_fieldN=constN" (1)
|
[sub]partitioning fields in the following form:
|
||||||
"subpartition_field1=const1 AND ... subpartition_fieldN=constN" (2)
|
1. "partition_field1=const1 AND ... AND partition_fieldN=constN"
|
||||||
"(1) AND (2)" (3)
|
1.1 Same as (1) but for subpartition fields
|
||||||
|
|
||||||
In (1) and (2) all [sub]partitioning fields must be used, and "x=const"
|
|
||||||
includes "x IS NULL".
|
|
||||||
|
|
||||||
If partitioning is performed using
|
|
||||||
|
|
||||||
PARTITION BY RANGE(unary_monotonic_func(single_partition_field)),
|
|
||||||
|
|
||||||
then the following is also an interval:
|
|
||||||
|
|
||||||
" const1 OP1 single_partition_field OR const2" (4)
|
If partitioning supports interval analysis (i.e. partitioning is a
|
||||||
|
function of a single table field, and partition_info::
|
||||||
where OP1 and OP2 are '<' OR '<=', and const_i can be +/- inf.
|
get_part_iter_for_interval != NULL), then we can also use condition in
|
||||||
Everything else is not a partition pruning "interval".
|
this form:
|
||||||
|
2. "const1 <=? partition_field <=? const2"
|
||||||
|
2.1 Same as (2) but for subpartition_field
|
||||||
|
|
||||||
|
INFERRING THE RESTRICTIONS FROM SEL_ARG TREE
|
||||||
|
|
||||||
|
The below is an example of what SEL_ARG tree may represent:
|
||||||
|
|
||||||
|
(start)
|
||||||
|
| $
|
||||||
|
| Partitioning keyparts $ subpartitioning keyparts
|
||||||
|
| $
|
||||||
|
| ... ... $
|
||||||
|
| | | $
|
||||||
|
| +---------+ +---------+ $ +-----------+ +-----------+
|
||||||
|
\-| par1=c1 |--| par2=c2 |-----| subpar1=c3|--| subpar2=c5|
|
||||||
|
+---------+ +---------+ $ +-----------+ +-----------+
|
||||||
|
| $ | |
|
||||||
|
| $ | +-----------+
|
||||||
|
| $ | | subpar2=c6|
|
||||||
|
| $ | +-----------+
|
||||||
|
| $ |
|
||||||
|
| $ +-----------+ +-----------+
|
||||||
|
| $ | subpar1=c4|--| subpar2=c8|
|
||||||
|
| $ +-----------+ +-----------+
|
||||||
|
| $
|
||||||
|
| $
|
||||||
|
+---------+ $ +------------+ +------------+
|
||||||
|
| par1=c2 |------------------| subpar1=c10|--| subpar2=c12|
|
||||||
|
+---------+ $ +------------+ +------------+
|
||||||
|
| $
|
||||||
|
... $
|
||||||
|
|
||||||
|
The up-down connections are connections via SEL_ARG::left and
|
||||||
|
SEL_ARG::right. A horizontal connection to the right is the
|
||||||
|
SEL_ARG::next_key_part connection.
|
||||||
|
|
||||||
|
find_used_partitions() traverses the entire tree via recursion on
|
||||||
|
* SEL_ARG::next_key_part (from left to right on the picture)
|
||||||
|
* SEL_ARG::left|right (up/down on the pic). Left-right recursion is
|
||||||
|
performed for each depth level.
|
||||||
|
|
||||||
|
Recursion descent on SEL_ARG::next_key_part is used to accumulate (in
|
||||||
|
ppar->arg_stack) constraints on partitioning and subpartitioning fields.
|
||||||
|
For the example in the above picture, one of stack states is:
|
||||||
|
in find_used_partitions(key_tree = "subpar2=c5") (***)
|
||||||
|
in find_used_partitions(key_tree = "subpar1=c3")
|
||||||
|
in find_used_partitions(key_tree = "par2=c2") (**)
|
||||||
|
in find_used_partitions(key_tree = "par1=c1")
|
||||||
|
in prune_partitions(...)
|
||||||
|
We apply partitioning limits as soon as possible, e.g. when we reach the
|
||||||
|
depth (**), we find which partition(s) correspond to "par1=c1 AND par2=c2",
|
||||||
|
and save them in ppar->part_iter.
|
||||||
|
When we reach the depth (***), we find which subpartition(s) correspond to
|
||||||
|
"subpar1=c3 AND subpar2=c5", and then mark appropriate subpartitions in
|
||||||
|
appropriate subpartitions as used.
|
||||||
|
|
||||||
|
It is possible that constraints on some partitioning fields are missing.
|
||||||
|
For the above example, consider this stack state:
|
||||||
|
in find_used_partitions(key_tree = "subpar2=c12") (***)
|
||||||
|
in find_used_partitions(key_tree = "subpar1=c10")
|
||||||
|
in find_used_partitions(key_tree = "par1=c2")
|
||||||
|
in prune_partitions(...)
|
||||||
|
Here we don't have constraints for all partitioning fields. Since we've
|
||||||
|
never set the ppar->part_iter to contain used set of partitions, we use
|
||||||
|
its default "all partitions" value. We get subpartition id for
|
||||||
|
"subpar1=c3 AND subpar2=c5", and mark that subpartition as used in every
|
||||||
|
partition.
|
||||||
|
|
||||||
|
The inverse is also possible: we may get constraints on partitioning
|
||||||
|
fields, but not constraints on subpartitioning fields. In that case,
|
||||||
|
calls to find_used_partitions() with depth below (**) will return -1,
|
||||||
|
and we will mark entire partition as used.
|
||||||
|
|
||||||
|
TODO
|
||||||
|
Replace recursion on SEL_ARG::left and SEL_ARG::right with a loop
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
1 OK, one or more [sub]partitions are marked as used.
|
1 OK, one or more [sub]partitions are marked as used.
|
||||||
@ -2620,58 +2688,29 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
|
|
||||||
if (key_tree->type == SEL_ARG::KEY_RANGE)
|
if (key_tree->type == SEL_ARG::KEY_RANGE)
|
||||||
{
|
{
|
||||||
if (partno == 0 && (NULL != ppar->get_endpoint))
|
if (partno == 0 && (NULL != ppar->part_info->get_part_iter_for_interval))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
|
Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
|
||||||
we got "const1 < fieldX < const2" interval.
|
we got "const1 CMP fieldX CMP const2" interval <-- psergey-todo: change
|
||||||
*/
|
*/
|
||||||
DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
|
DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
|
||||||
ppar->range_param.
|
ppar->range_param.
|
||||||
key_parts););
|
key_parts););
|
||||||
/* Find minimum */
|
res= ppar->part_info->
|
||||||
if (key_tree->min_flag & NO_MIN_RANGE)
|
get_part_iter_for_interval(ppar->part_info,
|
||||||
ppar->start_part_num= 0;
|
FALSE,
|
||||||
else
|
key_tree->min_value,
|
||||||
|
key_tree->max_value,
|
||||||
|
key_tree->min_flag | key_tree->max_flag,
|
||||||
|
&ppar->part_iter);
|
||||||
|
if (!res)
|
||||||
|
goto go_right; /* res=0 --> no satisfying partitions */
|
||||||
|
if (res == -1)
|
||||||
{
|
{
|
||||||
/*
|
//get a full range iterator
|
||||||
Store the interval edge in the record buffer, and call the
|
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
|
||||||
function that maps the edge in table-field space to an edge
|
|
||||||
in ordered-set-of-partitions (for RANGE partitioning) or
|
|
||||||
indexes-in-ordered-array-of-list-constants (for LIST) space.
|
|
||||||
*/
|
|
||||||
store_key_image_to_rec(key_tree->field, key_tree->min_value,
|
|
||||||
ppar->range_param.key_parts[0].length);
|
|
||||||
bool include_endp= ppar->force_include_bounds ||
|
|
||||||
!test(key_tree->min_flag & NEAR_MIN);
|
|
||||||
ppar->start_part_num= ppar->get_endpoint(ppar->part_info, 1,
|
|
||||||
include_endp);
|
|
||||||
if (ppar->start_part_num == ppar->max_endpoint_val)
|
|
||||||
{
|
|
||||||
res= 0; /* No satisfying partitions */
|
|
||||||
goto pop_and_go_right;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find maximum, do the same as above but for right interval bound */
|
|
||||||
if (key_tree->max_flag & NO_MAX_RANGE)
|
|
||||||
ppar->end_part_num= ppar->max_endpoint_val;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
store_key_image_to_rec(key_tree->field, key_tree->max_value,
|
|
||||||
ppar->range_param.key_parts[0].length);
|
|
||||||
bool include_endp= ppar->force_include_bounds ||
|
|
||||||
!test(key_tree->max_flag & NEAR_MAX);
|
|
||||||
ppar->end_part_num= ppar->get_endpoint(ppar->part_info, 0,
|
|
||||||
include_endp);
|
|
||||||
if (ppar->start_part_num == ppar->end_part_num)
|
|
||||||
{
|
|
||||||
res= 0; /* No satisfying partitions */
|
|
||||||
goto pop_and_go_right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ppar->part_num_to_part_id= ppar->endpoints_walk_func;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Save our intent to mark full partition as used if we will not be able
|
Save our intent to mark full partition as used if we will not be able
|
||||||
to obtain further limits on subpartitions
|
to obtain further limits on subpartitions
|
||||||
@ -2680,6 +2719,42 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
goto process_next_key_part;
|
goto process_next_key_part;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (partno == ppar->last_subpart_partno &&
|
||||||
|
(NULL != ppar->part_info->get_subpart_iter_for_interval))
|
||||||
|
{
|
||||||
|
PARTITION_ITERATOR subpart_iter;
|
||||||
|
DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
|
||||||
|
ppar->range_param.
|
||||||
|
key_parts););
|
||||||
|
res= ppar->part_info->
|
||||||
|
get_subpart_iter_for_interval(ppar->part_info,
|
||||||
|
TRUE,
|
||||||
|
key_tree->min_value,
|
||||||
|
key_tree->max_value,
|
||||||
|
key_tree->min_flag | key_tree->max_flag,
|
||||||
|
&subpart_iter);
|
||||||
|
DBUG_ASSERT(res); /* We can't get "no satisfying subpartitions" */
|
||||||
|
if (res == -1)
|
||||||
|
return -1; /* all subpartitions satisfy */
|
||||||
|
|
||||||
|
uint32 subpart_id;
|
||||||
|
bitmap_clear_all(&ppar->subparts_bitmap);
|
||||||
|
while ((subpart_id= subpart_iter.get_next(&subpart_iter)) != NOT_A_PARTITION_ID)
|
||||||
|
bitmap_set_bit(&ppar->subparts_bitmap, subpart_id);
|
||||||
|
|
||||||
|
/* Mark each partition as used in each subpartition. */
|
||||||
|
uint32 part_id;
|
||||||
|
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
|
||||||
|
NOT_A_PARTITION_ID)
|
||||||
|
{
|
||||||
|
for (uint i= 0; i < ppar->part_info->no_subparts; i++)
|
||||||
|
if (bitmap_is_set(&ppar->subparts_bitmap, i))
|
||||||
|
bitmap_set_bit(&ppar->part_info->used_partitions,
|
||||||
|
part_id * ppar->part_info->no_subparts + i);
|
||||||
|
}
|
||||||
|
goto go_right;
|
||||||
|
}
|
||||||
|
|
||||||
if (key_tree->is_singlepoint())
|
if (key_tree->is_singlepoint())
|
||||||
{
|
{
|
||||||
pushed= TRUE;
|
pushed= TRUE;
|
||||||
@ -2695,11 +2770,11 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
fields. Save all constN constants into table record buffer.
|
fields. Save all constN constants into table record buffer.
|
||||||
*/
|
*/
|
||||||
store_selargs_to_rec(ppar, ppar->arg_stack, ppar->part_fields);
|
store_selargs_to_rec(ppar, ppar->arg_stack, ppar->part_fields);
|
||||||
DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack,
|
DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack,
|
||||||
ppar->part_fields););
|
ppar->part_fields););
|
||||||
uint32 part_id;
|
uint32 part_id;
|
||||||
longlong func_value;
|
longlong func_value;
|
||||||
/* then find in which partition the {const1, ...,constN} tuple goes */
|
/* Find in which partition the {const1, ...,constN} tuple goes */
|
||||||
if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
|
if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
|
||||||
&func_value))
|
&func_value))
|
||||||
{
|
{
|
||||||
@ -2707,9 +2782,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
goto pop_and_go_right;
|
goto pop_and_go_right;
|
||||||
}
|
}
|
||||||
/* Rembember the limit we got - single partition #part_id */
|
/* Rembember the limit we got - single partition #part_id */
|
||||||
ppar->part_num_to_part_id= part_num_to_part_id_range;
|
init_single_partition_iterator(part_id, &ppar->part_iter);
|
||||||
ppar->start_part_num= part_id;
|
|
||||||
ppar->end_part_num= part_id + 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
If there are no subpartitions/we fail to get any limit for them,
|
If there are no subpartitions/we fail to get any limit for them,
|
||||||
@ -2719,7 +2792,8 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
goto process_next_key_part;
|
goto process_next_key_part;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (partno == ppar->last_subpart_partno)
|
if (partno == ppar->last_subpart_partno &&
|
||||||
|
ppar->cur_subpart_fields == ppar->subpart_fields)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all subpartitioning
|
Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all subpartitioning
|
||||||
@ -2727,7 +2801,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
*/
|
*/
|
||||||
store_selargs_to_rec(ppar, ppar->arg_stack_end - ppar->subpart_fields,
|
store_selargs_to_rec(ppar, ppar->arg_stack_end - ppar->subpart_fields,
|
||||||
ppar->subpart_fields);
|
ppar->subpart_fields);
|
||||||
DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack_end -
|
DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack_end-
|
||||||
ppar->subpart_fields,
|
ppar->subpart_fields,
|
||||||
ppar->subpart_fields););
|
ppar->subpart_fields););
|
||||||
/* Find the subpartition (it's HASH/KEY so we always have one) */
|
/* Find the subpartition (it's HASH/KEY so we always have one) */
|
||||||
@ -2735,12 +2809,12 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
uint32 subpart_id= part_info->get_subpartition_id(part_info);
|
uint32 subpart_id= part_info->get_subpartition_id(part_info);
|
||||||
|
|
||||||
/* Mark this partition as used in each subpartition. */
|
/* Mark this partition as used in each subpartition. */
|
||||||
for (uint32 num= ppar->start_part_num; num != ppar->end_part_num;
|
uint32 part_id;
|
||||||
num++)
|
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
|
||||||
|
NOT_A_PARTITION_ID)
|
||||||
{
|
{
|
||||||
bitmap_set_bit(&part_info->used_partitions,
|
bitmap_set_bit(&part_info->used_partitions,
|
||||||
ppar->part_num_to_part_id(ppar, num) *
|
part_id * part_info->no_subparts + subpart_id);
|
||||||
part_info->no_subparts + subpart_id);
|
|
||||||
}
|
}
|
||||||
res= 1; /* Some partitions were marked as used */
|
res= 1; /* Some partitions were marked as used */
|
||||||
goto pop_and_go_right;
|
goto pop_and_go_right;
|
||||||
@ -2761,31 +2835,28 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
process_next_key_part:
|
process_next_key_part:
|
||||||
if (key_tree->next_key_part)
|
if (key_tree->next_key_part)
|
||||||
res= find_used_partitions(ppar, key_tree->next_key_part);
|
res= find_used_partitions(ppar, key_tree->next_key_part);
|
||||||
else
|
else
|
||||||
res= -1;
|
res= -1;
|
||||||
|
|
||||||
if (res == -1) /* Got "full range" for key_tree->next_key_part call */
|
|
||||||
{
|
|
||||||
if (set_full_part_if_bad_ret)
|
|
||||||
{
|
|
||||||
for (uint32 num= ppar->start_part_num; num != ppar->end_part_num;
|
|
||||||
num++)
|
|
||||||
{
|
|
||||||
ppar->mark_full_partition_used(ppar->part_info,
|
|
||||||
ppar->part_num_to_part_id(ppar, num));
|
|
||||||
}
|
|
||||||
res= 1;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (set_full_part_if_bad_ret)
|
if (set_full_part_if_bad_ret)
|
||||||
{
|
{
|
||||||
/* Restore the "used partition iterator" to its default */
|
if (res == -1)
|
||||||
ppar->part_num_to_part_id= part_num_to_part_id_range;
|
{
|
||||||
ppar->start_part_num= 0;
|
/* Got "full range" for subpartitioning fields */
|
||||||
ppar->end_part_num= ppar->part_info->no_parts;
|
uint32 part_id;
|
||||||
|
bool found= FALSE;
|
||||||
|
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) != NOT_A_PARTITION_ID)
|
||||||
|
{
|
||||||
|
ppar->mark_full_partition_used(ppar->part_info, part_id);
|
||||||
|
found= TRUE;
|
||||||
|
}
|
||||||
|
res= test(found);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
Restore the "used partitions iterator" to the default setting that
|
||||||
|
specifies iteration over all partitions.
|
||||||
|
*/
|
||||||
|
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pushed)
|
if (pushed)
|
||||||
@ -2796,7 +2867,10 @@ pop_and_go_right:
|
|||||||
ppar->cur_part_fields-= ppar->is_part_keypart[partno];
|
ppar->cur_part_fields-= ppar->is_part_keypart[partno];
|
||||||
ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
|
ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (res == -1)
|
||||||
|
return -1;
|
||||||
|
go_right:
|
||||||
if (key_tree->right != &null_element)
|
if (key_tree->right != &null_element)
|
||||||
{
|
{
|
||||||
if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
|
if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
|
||||||
@ -2854,7 +2928,7 @@ static bool fields_ok_for_partition_index(Field **pfield)
|
|||||||
struct
|
struct
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
create_partition_index_descrition()
|
create_partition_index_description()
|
||||||
prune_par INOUT Partition pruning context
|
prune_par INOUT Partition pruning context
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
@ -2871,7 +2945,7 @@ static bool fields_ok_for_partition_index(Field **pfield)
|
|||||||
FALSE OK
|
FALSE OK
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static bool create_partition_index_descrition(PART_PRUNE_PARAM *ppar)
|
static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
|
||||||
{
|
{
|
||||||
RANGE_OPT_PARAM *range_par= &(ppar->range_param);
|
RANGE_OPT_PARAM *range_par= &(ppar->range_param);
|
||||||
partition_info *part_info= ppar->part_info;
|
partition_info *part_info= ppar->part_info;
|
||||||
@ -2903,38 +2977,6 @@ static bool create_partition_index_descrition(PART_PRUNE_PARAM *ppar)
|
|||||||
ppar->get_top_partition_id_func= part_info->get_partition_id;
|
ppar->get_top_partition_id_func= part_info->get_partition_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum_monotonicity_info minfo;
|
|
||||||
ppar->get_endpoint= NULL;
|
|
||||||
if (part_info->part_expr &&
|
|
||||||
(minfo= part_info->part_expr->get_monotonicity_info()) != NON_MONOTONIC)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
ppar->force_include_bounds controls how we'll process "field < C" and
|
|
||||||
"field > C" intervals.
|
|
||||||
If the partitioning function F is strictly increasing, then for any x, y
|
|
||||||
"x < y" => "F(x) < F(y)" (*), i.e. when we get interval "field < C"
|
|
||||||
we can perform partition pruning on the equivalent "F(field) < F(C)".
|
|
||||||
|
|
||||||
If the partitioning function not strictly increasing (it is simply
|
|
||||||
increasing), then instead of (*) we get "x < y" => "F(x) <= F(y)"
|
|
||||||
i.e. for interval "field < C" we can perform partition pruning for
|
|
||||||
"F(field) <= F(C)".
|
|
||||||
*/
|
|
||||||
ppar->force_include_bounds= test(minfo == MONOTONIC_INCREASING);
|
|
||||||
if (part_info->part_type == RANGE_PARTITION)
|
|
||||||
{
|
|
||||||
ppar->get_endpoint= get_partition_id_range_for_endpoint;
|
|
||||||
ppar->endpoints_walk_func= part_num_to_part_id_range;
|
|
||||||
ppar->max_endpoint_val= part_info->no_parts;
|
|
||||||
}
|
|
||||||
else if (part_info->part_type == LIST_PARTITION)
|
|
||||||
{
|
|
||||||
ppar->get_endpoint= get_list_array_idx_for_endpoint;
|
|
||||||
ppar->endpoints_walk_func= part_num_to_part_id_list;
|
|
||||||
ppar->max_endpoint_val= part_info->no_list_values;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
KEY_PART *key_part;
|
KEY_PART *key_part;
|
||||||
MEM_ROOT *alloc= range_par->mem_root;
|
MEM_ROOT *alloc= range_par->mem_root;
|
||||||
if (!total_parts ||
|
if (!total_parts ||
|
||||||
@ -2947,11 +2989,19 @@ static bool create_partition_index_descrition(PART_PRUNE_PARAM *ppar)
|
|||||||
!(ppar->is_subpart_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
|
!(ppar->is_subpart_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
|
||||||
total_parts)))
|
total_parts)))
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
|
||||||
|
if (ppar->subpart_fields)
|
||||||
|
{
|
||||||
|
uint32 *buf;
|
||||||
|
uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
|
||||||
|
if (!(buf= (uint32*)alloc_root(alloc, bufsize)))
|
||||||
|
return TRUE;
|
||||||
|
bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts, FALSE);
|
||||||
|
}
|
||||||
range_par->key_parts= key_part;
|
range_par->key_parts= key_part;
|
||||||
Field **field= (ppar->part_fields)? part_info->part_field_array :
|
Field **field= (ppar->part_fields)? part_info->part_field_array :
|
||||||
part_info->subpart_field_array;
|
part_info->subpart_field_array;
|
||||||
bool subpart_fields= FALSE;
|
bool in_subpart_fields= FALSE;
|
||||||
for (uint part= 0; part < total_parts; part++, key_part++)
|
for (uint part= 0; part < total_parts; part++, key_part++)
|
||||||
{
|
{
|
||||||
key_part->key= 0;
|
key_part->key= 0;
|
||||||
@ -2972,13 +3022,13 @@ static bool create_partition_index_descrition(PART_PRUNE_PARAM *ppar)
|
|||||||
key_part->image_type = Field::itRAW;
|
key_part->image_type = Field::itRAW;
|
||||||
/* We don't set key_parts->null_bit as it will not be used */
|
/* We don't set key_parts->null_bit as it will not be used */
|
||||||
|
|
||||||
ppar->is_part_keypart[part]= !subpart_fields;
|
ppar->is_part_keypart[part]= !in_subpart_fields;
|
||||||
ppar->is_subpart_keypart[part]= subpart_fields;
|
ppar->is_subpart_keypart[part]= in_subpart_fields;
|
||||||
|
|
||||||
if (!*(++field))
|
if (!*(++field))
|
||||||
{
|
{
|
||||||
field= part_info->subpart_field_array;
|
field= part_info->subpart_field_array;
|
||||||
subpart_fields= TRUE;
|
in_subpart_fields= TRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
range_par->key_parts_end= key_part;
|
range_par->key_parts_end= key_part;
|
||||||
@ -3058,7 +3108,7 @@ static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part)
|
|||||||
Print a singlepoint multi-keypart range interval to debug trace
|
Print a singlepoint multi-keypart range interval to debug trace
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
dbug_print_onepoint_range()
|
dbug_print_singlepoint_range()
|
||||||
start Array of SEL_ARG* ptrs representing conditions on key parts
|
start Array of SEL_ARG* ptrs representing conditions on key parts
|
||||||
num Number of elements in the array.
|
num Number of elements in the array.
|
||||||
|
|
||||||
@ -3067,9 +3117,9 @@ static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part)
|
|||||||
interval to debug trace.
|
interval to debug trace.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void dbug_print_onepoint_range(SEL_ARG **start, uint num)
|
static void dbug_print_singlepoint_range(SEL_ARG **start, uint num)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("dbug_print_onepoint_range");
|
DBUG_ENTER("dbug_print_singlepoint_range");
|
||||||
DBUG_LOCK_FILE;
|
DBUG_LOCK_FILE;
|
||||||
SEL_ARG **end= start + num;
|
SEL_ARG **end= start + num;
|
||||||
|
|
||||||
|
@ -721,6 +721,7 @@ uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit);
|
|||||||
|
|
||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
|
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
|
||||||
|
void store_key_image_to_rec(Field *field, char *ptr, uint len);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -763,7 +763,10 @@ int THD::send_explain_fields(select_result *result)
|
|||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
if (lex->describe & DESCRIBE_PARTITIONS)
|
if (lex->describe & DESCRIBE_PARTITIONS)
|
||||||
{
|
{
|
||||||
field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
|
/* Maximum length of string that make_used_partitions_str() can produce */
|
||||||
|
item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
|
||||||
|
cs);
|
||||||
|
field_list.push_back(item);
|
||||||
item->maybe_null= 1;
|
item->maybe_null= 1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -110,7 +110,7 @@ enum enum_sql_command {
|
|||||||
#define DESCRIBE_NORMAL 1
|
#define DESCRIBE_NORMAL 1
|
||||||
#define DESCRIBE_EXTENDED 2
|
#define DESCRIBE_EXTENDED 2
|
||||||
/*
|
/*
|
||||||
This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
|
This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce
|
||||||
additional "partitions" column even if partitioning is not compiled in.
|
additional "partitions" column even if partitioning is not compiled in.
|
||||||
*/
|
*/
|
||||||
#define DESCRIBE_PARTITIONS 4
|
#define DESCRIBE_PARTITIONS 4
|
||||||
|
@ -110,6 +110,21 @@ uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
|
|||||||
uint32 get_partition_id_linear_key_sub(partition_info *part_info);
|
uint32 get_partition_id_linear_key_sub(partition_info *part_info);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static uint32 get_next_partition_via_walking(PARTITION_ITERATOR*);
|
||||||
|
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*);
|
||||||
|
uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter);
|
||||||
|
uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter);
|
||||||
|
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
|
||||||
|
bool is_subpart,
|
||||||
|
byte *min_value, byte *max_value,
|
||||||
|
uint flags,
|
||||||
|
PARTITION_ITERATOR *part_iter);
|
||||||
|
int get_part_iter_for_interval_via_walking(partition_info *part_info,
|
||||||
|
bool is_subpart,
|
||||||
|
byte *min_value, byte *max_value,
|
||||||
|
uint flags,
|
||||||
|
PARTITION_ITERATOR *part_iter);
|
||||||
|
static void set_up_range_analysis_info(partition_info *part_info);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
A routine used by the parser to decide whether we are specifying a full
|
A routine used by the parser to decide whether we are specifying a full
|
||||||
@ -1866,8 +1881,8 @@ static void set_up_partition_func_pointers(partition_info *part_info)
|
|||||||
}
|
}
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
For linear hashing we need a mask which is on the form 2**n - 1 where
|
For linear hashing we need a mask which is on the form 2**n - 1 where
|
||||||
2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
|
2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
|
||||||
@ -2101,6 +2116,7 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table,
|
|||||||
set_up_partition_key_maps(table, part_info);
|
set_up_partition_key_maps(table, part_info);
|
||||||
set_up_partition_func_pointers(part_info);
|
set_up_partition_func_pointers(part_info);
|
||||||
part_info->fixed= TRUE;
|
part_info->fixed= TRUE;
|
||||||
|
set_up_range_analysis_info(part_info);
|
||||||
result= FALSE;
|
result= FALSE;
|
||||||
end:
|
end:
|
||||||
thd->set_query_id= save_set_query_id;
|
thd->set_query_id= save_set_query_id;
|
||||||
@ -5494,13 +5510,21 @@ void mem_alloc_error(size_t size)
|
|||||||
my_error(ER_OUTOFMEMORY, MYF(0), size);
|
my_error(ER_OUTOFMEMORY, MYF(0), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
/*
|
/*
|
||||||
Fill the string comma-separated line of used partitions names
|
Return comma-separated list of used partitions in the provided given string
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
make_used_partitions_str()
|
make_used_partitions_str()
|
||||||
part_info IN Partitioning info
|
part_info IN Partitioning info
|
||||||
parts_str OUT The string to fill
|
parts_str OUT The string to fill
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Generate a list of used partitions (from bits in part_info->used_partitions
|
||||||
|
bitmap), asd store it into the provided String object.
|
||||||
|
|
||||||
|
NOTE
|
||||||
|
The produced string must not be longer then MAX_PARTITIONS * (1 + FN_LEN).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
||||||
@ -5510,7 +5534,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||||||
uint partition_id= 0;
|
uint partition_id= 0;
|
||||||
List_iterator<partition_element> it(part_info->partitions);
|
List_iterator<partition_element> it(part_info->partitions);
|
||||||
|
|
||||||
if (part_info->subpart_type != NOT_A_PARTITION)
|
if (is_sub_partitioned(part_info))
|
||||||
{
|
{
|
||||||
partition_element *head_pe;
|
partition_element *head_pe;
|
||||||
while ((head_pe= it++))
|
while ((head_pe= it++))
|
||||||
@ -5549,4 +5573,443 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* Partition interval analysis support
|
||||||
|
***************************************************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Setup partition_info::* members related to partitioning range analysis
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
set_up_partition_func_pointers()
|
||||||
|
part_info Partitioning info structure
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Assuming that passed partition_info structure already has correct values
|
||||||
|
for members that specify [sub]partitioning type, table fields, and
|
||||||
|
functions, set up partition_info::* members that are related to
|
||||||
|
Partitioning Interval Analysis (see get_partitions_in_range_iter for its
|
||||||
|
definition)
|
||||||
|
|
||||||
|
IMPLEMENTATION
|
||||||
|
There are two available interval analyzer functions:
|
||||||
|
(1) get_part_iter_for_interval_via_mapping
|
||||||
|
(2) get_part_iter_for_interval_via_walking
|
||||||
|
|
||||||
|
They both have limited applicability:
|
||||||
|
(1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where
|
||||||
|
func is a monotonic function.
|
||||||
|
|
||||||
|
(2) is applicable for
|
||||||
|
"[SUB]PARTITION BY <any-partitioning-type>(any_func(t.integer_field))"
|
||||||
|
|
||||||
|
If both are applicable, (1) is preferred over (2).
|
||||||
|
|
||||||
|
This function sets part_info::get_part_iter_for_interval according to
|
||||||
|
this criteria, and also sets some auxilary fields that the function
|
||||||
|
uses.
|
||||||
|
*/
|
||||||
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||||
|
static void set_up_range_analysis_info(partition_info *part_info)
|
||||||
|
{
|
||||||
|
enum_monotonicity_info minfo;
|
||||||
|
|
||||||
|
/* Set the catch-all default */
|
||||||
|
part_info->get_part_iter_for_interval= NULL;
|
||||||
|
part_info->get_subpart_iter_for_interval= NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Check if get_part_iter_for_interval_via_mapping() can be used for
|
||||||
|
partitioning
|
||||||
|
*/
|
||||||
|
switch (part_info->part_type) {
|
||||||
|
case RANGE_PARTITION:
|
||||||
|
case LIST_PARTITION:
|
||||||
|
minfo= part_info->part_expr->get_monotonicity_info();
|
||||||
|
if (minfo != NON_MONOTONIC)
|
||||||
|
{
|
||||||
|
part_info->range_analysis_include_bounds=
|
||||||
|
test(minfo == MONOTONIC_INCREASING);
|
||||||
|
part_info->get_part_iter_for_interval=
|
||||||
|
get_part_iter_for_interval_via_mapping;
|
||||||
|
goto setup_subparts;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Check get_part_iter_for_interval_via_walking() can be used for
|
||||||
|
partitioning
|
||||||
|
*/
|
||||||
|
if (part_info->no_part_fields == 1)
|
||||||
|
{
|
||||||
|
Field *field= part_info->part_field_array[0];
|
||||||
|
switch (field->type()) {
|
||||||
|
case MYSQL_TYPE_TINY:
|
||||||
|
case MYSQL_TYPE_SHORT:
|
||||||
|
case MYSQL_TYPE_LONG:
|
||||||
|
case MYSQL_TYPE_LONGLONG:
|
||||||
|
part_info->get_part_iter_for_interval=
|
||||||
|
get_part_iter_for_interval_via_walking;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_subparts:
|
||||||
|
/*
|
||||||
|
Check get_part_iter_for_interval_via_walking() can be used for
|
||||||
|
subpartitioning
|
||||||
|
*/
|
||||||
|
if (part_info->no_subpart_fields == 1)
|
||||||
|
{
|
||||||
|
Field *field= part_info->subpart_field_array[0];
|
||||||
|
switch (field->type()) {
|
||||||
|
case MYSQL_TYPE_TINY:
|
||||||
|
case MYSQL_TYPE_SHORT:
|
||||||
|
case MYSQL_TYPE_LONG:
|
||||||
|
case MYSQL_TYPE_LONGLONG:
|
||||||
|
part_info->get_subpart_iter_for_interval=
|
||||||
|
get_part_iter_for_interval_via_walking;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
|
||||||
|
bool include_endpoint);
|
||||||
|
|
||||||
|
/*
|
||||||
|
Partitioning Interval Analysis: Initialize the iterator for "mapping" case
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_part_iter_for_interval_via_mapping()
|
||||||
|
part_info Partition info
|
||||||
|
is_subpart TRUE - act for subpartitioning
|
||||||
|
FALSE - act for partitioning
|
||||||
|
min_value minimum field value, in opt_range key format.
|
||||||
|
max_value minimum field value, in opt_range key format.
|
||||||
|
flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
|
||||||
|
NO_MAX_RANGE.
|
||||||
|
part_iter Iterator structure to be initialized
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Initialize partition set iterator to walk over the interval in
|
||||||
|
ordered-array-of-partitions (for RANGE partitioning) or
|
||||||
|
ordered-array-of-list-constants (for LIST partitioning) space.
|
||||||
|
|
||||||
|
IMPLEMENTATION
|
||||||
|
This function is used when partitioning is done by
|
||||||
|
<RANGE|LIST>(ascending_func(t.field)), and we can map an interval in
|
||||||
|
t.field space into a sub-array of partition_info::range_int_array or
|
||||||
|
partition_info::list_array (see get_partition_id_range_for_endpoint,
|
||||||
|
get_list_array_idx_for_endpoint for details).
|
||||||
|
|
||||||
|
The function performs this interval mapping, and sets the iterator to
|
||||||
|
traverse the sub-array and return appropriate partitions.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
0 - No matching partitions (iterator not initialized)
|
||||||
|
1 - Ok, iterator intialized for traversal of matching partitions.
|
||||||
|
-1 - All partitions would match (iterator not initialized)
|
||||||
|
*/
|
||||||
|
|
||||||
|
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
|
||||||
|
bool is_subpart,
|
||||||
|
byte *min_value, byte *max_value,
|
||||||
|
uint flags,
|
||||||
|
PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(!is_subpart);
|
||||||
|
Field *field= part_info->part_field_array[0];
|
||||||
|
uint32 max_endpoint_val;
|
||||||
|
get_endpoint_func get_endpoint;
|
||||||
|
uint field_len= field->pack_length_in_rec();
|
||||||
|
|
||||||
|
if (part_info->part_type == RANGE_PARTITION)
|
||||||
|
{
|
||||||
|
get_endpoint= get_partition_id_range_for_endpoint;
|
||||||
|
max_endpoint_val= part_info->no_parts;
|
||||||
|
part_iter->get_next= get_next_partition_id_range;
|
||||||
|
}
|
||||||
|
else if (part_info->part_type == LIST_PARTITION)
|
||||||
|
{
|
||||||
|
get_endpoint= get_list_array_idx_for_endpoint;
|
||||||
|
max_endpoint_val= part_info->no_list_values;
|
||||||
|
part_iter->get_next= get_next_partition_id_list;
|
||||||
|
part_iter->part_info= part_info;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
DBUG_ASSERT(0);
|
||||||
|
|
||||||
|
/* Find minimum */
|
||||||
|
if (flags & NO_MIN_RANGE)
|
||||||
|
part_iter->start_part_num= 0;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Store the interval edge in the record buffer, and call the
|
||||||
|
function that maps the edge in table-field space to an edge
|
||||||
|
in ordered-set-of-partitions (for RANGE partitioning) or
|
||||||
|
index-in-ordered-array-of-list-constants (for LIST) space.
|
||||||
|
*/
|
||||||
|
store_key_image_to_rec(field, min_value, field_len);
|
||||||
|
bool include_endp= part_info->range_analysis_include_bounds ||
|
||||||
|
!test(flags & NEAR_MIN);
|
||||||
|
part_iter->start_part_num= get_endpoint(part_info, 1, include_endp);
|
||||||
|
if (part_iter->start_part_num == max_endpoint_val)
|
||||||
|
return 0; /* No partitions */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Find maximum, do the same as above but for right interval bound */
|
||||||
|
if (flags & NO_MAX_RANGE)
|
||||||
|
part_iter->end_part_num= max_endpoint_val;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
store_key_image_to_rec(field, max_value, field_len);
|
||||||
|
bool include_endp= part_info->range_analysis_include_bounds ||
|
||||||
|
!test(flags & NEAR_MAX);
|
||||||
|
part_iter->end_part_num= get_endpoint(part_info, 0, include_endp);
|
||||||
|
if (part_iter->start_part_num == part_iter->end_part_num)
|
||||||
|
return 0; /* No partitions */
|
||||||
|
}
|
||||||
|
return 1; /* Ok, iterator initialized */
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* See get_part_iter_for_interval_via_walking for definition of what this is */
|
||||||
|
#define MAX_RANGE_TO_WALK 10
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Partitioning Interval Analysis: Initialize iterator to walk field interval
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_part_iter_for_interval_via_walking()
|
||||||
|
part_info Partition info
|
||||||
|
is_subpart TRUE - act for subpartitioning
|
||||||
|
FALSE - act for partitioning
|
||||||
|
min_value minimum field value, in opt_range key format.
|
||||||
|
max_value minimum field value, in opt_range key format.
|
||||||
|
flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
|
||||||
|
NO_MAX_RANGE.
|
||||||
|
part_iter Iterator structure to be initialized
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Initialize partition set iterator to walk over interval in integer field
|
||||||
|
space. That is, for "const1 <=? t.field <=? const2" interval, initialize
|
||||||
|
the iterator to return a set of [sub]partitions obtained with the
|
||||||
|
following procedure:
|
||||||
|
get partition id for t.field = const1, return it
|
||||||
|
get partition id for t.field = const1+1, return it
|
||||||
|
... t.field = const1+2, ...
|
||||||
|
... ... ...
|
||||||
|
... t.field = const2 ...
|
||||||
|
|
||||||
|
IMPLEMENTATION
|
||||||
|
See get_partitions_in_range_iter for general description of interval
|
||||||
|
analysis. We support walking over the following intervals:
|
||||||
|
"t.field IS NULL"
|
||||||
|
"c1 <=? t.field <=? c2", where c1 and c2 are finite.
|
||||||
|
Intervals with +inf/-inf, and [NULL, c1] interval can be processed but
|
||||||
|
that is more tricky and I don't have time to do it right now.
|
||||||
|
|
||||||
|
Additionally we have these requirements:
|
||||||
|
* number of values in the interval must be less then number of
|
||||||
|
[sub]partitions, and
|
||||||
|
* Number of values in the interval must be less then MAX_RANGE_TO_WALK.
|
||||||
|
|
||||||
|
The rationale behind these requirements is that if they are not met
|
||||||
|
we're likely to hit most of the partitions and traversing the interval
|
||||||
|
will only add overhead. So it's better return "all partitions used" in
|
||||||
|
that case.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
0 - No matching partitions, iterator not initialized
|
||||||
|
1 - Some partitions would match, iterator intialized for traversing them
|
||||||
|
-1 - All partitions would match, iterator not initialized
|
||||||
|
*/
|
||||||
|
|
||||||
|
int get_part_iter_for_interval_via_walking(partition_info *part_info,
|
||||||
|
bool is_subpart,
|
||||||
|
byte *min_value, byte *max_value,
|
||||||
|
uint flags,
|
||||||
|
PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
Field *field;
|
||||||
|
uint total_parts;
|
||||||
|
partition_iter_func get_next_func;
|
||||||
|
if (is_subpart)
|
||||||
|
{
|
||||||
|
field= part_info->subpart_field_array[0];
|
||||||
|
total_parts= part_info->no_subparts;
|
||||||
|
get_next_func= get_next_subpartition_via_walking;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
field= part_info->part_field_array[0];
|
||||||
|
total_parts= part_info->no_parts;
|
||||||
|
get_next_func= get_next_partition_via_walking;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Handle the "t.field IS NULL" interval, it is a special case */
|
||||||
|
if (field->real_maybe_null() && !(flags & (NO_MIN_RANGE | NO_MAX_RANGE)) &&
|
||||||
|
*min_value && *max_value)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
We don't have a part_iter->get_next() function that would find which
|
||||||
|
partition "t.field IS NULL" belongs to, so find partition that contains
|
||||||
|
NULL right here, and return an iterator over singleton set.
|
||||||
|
*/
|
||||||
|
uint32 part_id;
|
||||||
|
field->set_null();
|
||||||
|
if (is_subpart)
|
||||||
|
{
|
||||||
|
part_id= part_info->get_subpartition_id(part_info);
|
||||||
|
init_single_partition_iterator(part_id, part_iter);
|
||||||
|
return 1; /* Ok, iterator initialized */
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!part_info->get_partition_id(part_info, &part_id))
|
||||||
|
{
|
||||||
|
init_single_partition_iterator(part_id, part_iter);
|
||||||
|
return 1; /* Ok, iterator initialized */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0; /* No partitions match */
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flags & (NO_MIN_RANGE | NO_MAX_RANGE))
|
||||||
|
return -1; /* Can't handle this interval, have to use all partitions */
|
||||||
|
|
||||||
|
/* Get integers for left and right interval bound */
|
||||||
|
longlong a, b;
|
||||||
|
uint len= field->pack_length_in_rec();
|
||||||
|
store_key_image_to_rec(field, min_value, len);
|
||||||
|
a= field->val_int();
|
||||||
|
|
||||||
|
store_key_image_to_rec(field, max_value, len);
|
||||||
|
b= field->val_int();
|
||||||
|
|
||||||
|
a += test(flags & NEAR_MIN);
|
||||||
|
b += test(!(flags & NEAR_MAX));
|
||||||
|
uint n_values= b - a;
|
||||||
|
|
||||||
|
if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
part_iter->start_val= a;
|
||||||
|
part_iter->end_val= b;
|
||||||
|
part_iter->part_info= part_info;
|
||||||
|
part_iter->get_next= get_next_func;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
PARTITION_ITERATOR::get_next implementation: enumerate partitions in range
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_next_partition_id_list()
|
||||||
|
part_iter Partition set iterator structure
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
This is implementation of PARTITION_ITERATOR::get_next() that returns
|
||||||
|
[sub]partition ids in [min_partition_id, max_partition_id] range.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
partition id
|
||||||
|
NOT_A_PARTITION_ID if there are no more partitions
|
||||||
|
*/
|
||||||
|
|
||||||
|
uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
|
||||||
|
{
|
||||||
|
if (part_iter->start_part_num == part_iter->end_part_num)
|
||||||
|
return NOT_A_PARTITION_ID;
|
||||||
|
else
|
||||||
|
return part_iter->start_part_num++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
PARTITION_ITERATOR::get_next implementation for LIST partitioning
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_next_partition_id_list()
|
||||||
|
part_iter Partition set iterator structure
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
This implementation of PARTITION_ITERATOR::get_next() is special for
|
||||||
|
LIST partitioning: it enumerates partition ids in
|
||||||
|
part_info->list_array[i] where i runs over [min_idx, max_idx] interval.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
partition id
|
||||||
|
NOT_A_PARTITION_ID if there are no more partitions
|
||||||
|
*/
|
||||||
|
|
||||||
|
uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
if (part_iter->start_part_num == part_iter->end_part_num)
|
||||||
|
return NOT_A_PARTITION_ID;
|
||||||
|
else
|
||||||
|
return part_iter->part_info->list_array[part_iter->
|
||||||
|
start_part_num++].partition_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
PARTITION_ITERATOR::get_next implementation: walk over field-space interval
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
get_next_partition_via_walking()
|
||||||
|
part_iter Partitioning iterator
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
This implementation of PARTITION_ITERATOR::get_next() returns ids of
|
||||||
|
partitions that contain records with partitioning field value within
|
||||||
|
[start_val, end_val] interval.
|
||||||
|
|
||||||
|
RETURN
|
||||||
|
partition id
|
||||||
|
NOT_A_PARTITION_ID if there are no more partitioning.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
uint32 part_id;
|
||||||
|
Field *field= part_iter->part_info->part_field_array[0];
|
||||||
|
while (part_iter->start_val != part_iter->end_val)
|
||||||
|
{
|
||||||
|
field->store(part_iter->start_val, FALSE);
|
||||||
|
part_iter->start_val++;
|
||||||
|
if (!part_iter->part_info->get_partition_id(part_iter->part_info,
|
||||||
|
&part_id))
|
||||||
|
return part_id;
|
||||||
|
}
|
||||||
|
return NOT_A_PARTITION_ID;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Same as get_next_partition_via_walking, but for subpartitions */
|
||||||
|
|
||||||
|
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
|
||||||
|
{
|
||||||
|
uint32 part_id;
|
||||||
|
Field *field= part_iter->part_info->subpart_field_array[0];
|
||||||
|
if (part_iter->start_val == part_iter->end_val)
|
||||||
|
return NOT_A_PARTITION_ID;
|
||||||
|
field->store(part_iter->start_val, FALSE);
|
||||||
|
part_iter->start_val++;
|
||||||
|
return part_iter->part_info->get_subpartition_id(part_iter->part_info);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
@ -639,6 +639,11 @@ JOIN::optimize()
|
|||||||
TABLE_LIST *tbl;
|
TABLE_LIST *tbl;
|
||||||
for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
|
for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
If tbl->embedding!=NULL that means that this table is in the inner
|
||||||
|
part of the nested outer join, and we can't do partition pruning
|
||||||
|
(TODO: check if this limitation can be lifted)
|
||||||
|
*/
|
||||||
if (!tbl->embedding)
|
if (!tbl->embedding)
|
||||||
{
|
{
|
||||||
Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
|
Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
|
||||||
|
@ -1415,7 +1415,7 @@ bool multi_update::send_data(List<Item> ¬_used_values)
|
|||||||
memcpy((char*) tmp_table->field[0]->ptr,
|
memcpy((char*) tmp_table->field[0]->ptr,
|
||||||
(char*) table->file->ref, table->file->ref_length);
|
(char*) table->file->ref, table->file->ref_length);
|
||||||
/* Write row, ignoring duplicated updates to a row */
|
/* Write row, ignoring duplicated updates to a row */
|
||||||
if (error= tmp_table->file->ha_write_row(tmp_table->record[0]))
|
if ((error= tmp_table->file->ha_write_row(tmp_table->record[0])))
|
||||||
{
|
{
|
||||||
if (error != HA_ERR_FOUND_DUPP_KEY &&
|
if (error != HA_ERR_FOUND_DUPP_KEY &&
|
||||||
error != HA_ERR_FOUND_DUPP_UNIQUE &&
|
error != HA_ERR_FOUND_DUPP_UNIQUE &&
|
||||||
|
Reference in New Issue
Block a user