1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00
This commit is contained in:
Igor Babaev
2010-12-10 23:23:34 -08:00
115 changed files with 8761 additions and 3775 deletions

View File

@ -160,7 +160,9 @@ typedef struct st_join_table {
TABLE *table;
KEYUSE *keyuse; /**< pointer to first used key */
SQL_SELECT *select;
COND *select_cond;
COND *select_cond;
COND *on_precond; /**< part of on condition to check before
accessing the first inner table */
QUICK_SELECT_I *quick;
/*
The value of select_cond before we've attempted to do Index Condition
@ -216,11 +218,16 @@ typedef struct st_join_table {
E(#records) is in found_records.
*/
ha_rows read_time;
double partial_join_cardinality;
table_map dependent,key_dependent;
uint use_quick,index;
uint status; ///< Save status for cache
uint used_fields,used_fieldlength,used_blobs;
uint used_fields;
ulong used_fieldlength;
ulong max_used_fieldlength;
uint used_blobs;
uint used_null_fields;
uint used_rowid_fields;
uint used_uneven_bit_fields;
@ -235,6 +242,7 @@ typedef struct st_join_table {
ha_rows limit;
TABLE_REF ref;
bool use_join_cache;
ulong join_buffer_size_limit;
JOIN_CACHE *cache;
/*
Index condition for BKA access join
@ -298,6 +306,8 @@ typedef struct st_join_table {
*/
uint sj_strategy;
struct st_join_table *first_sjm_sibling;
void cleanup();
inline bool is_using_loose_index_scan()
{
@ -349,6 +359,19 @@ typedef struct st_join_table {
return (first_inner && first_inner->last_inner == this) ||
last_sj_inner_tab == this;
}
/*
Check whether the table belongs to a nest of inner tables of an
outer join or to a nest of inner tables of a semi-join
*/
bool is_nested_inner()
{
if (first_inner &&
(first_inner != first_inner->last_inner || first_inner->first_upper))
return TRUE;
if (first_sj_inner_tab && first_sj_inner_tab != last_sj_inner_tab)
return TRUE;
return FALSE;
}
struct st_join_table *get_first_inner_table()
{
if (first_inner)
@ -369,850 +392,26 @@ typedef struct st_join_table {
select->cond= new_cond;
return tmp_select_cond;
}
void calc_used_field_length(bool max_fl);
ulong get_used_fieldlength()
{
if (!used_fieldlength)
calc_used_field_length(FALSE);
return used_fieldlength;
}
ulong get_max_used_fieldlength()
{
if (!max_used_fieldlength)
calc_used_field_length(TRUE);
return max_used_fieldlength;
}
double get_partial_join_cardinality() { return partial_join_cardinality; }
bool hash_join_is_possible();
int make_scan_filter();
} JOIN_TAB;
/*
Categories of data fields of variable length written into join cache buffers.
The value of any of these fields is written into cache together with the
prepended length of the value.
*/
#define CACHE_BLOB 1 /* blob field */
#define CACHE_STRIPPED 2 /* field stripped of trailing spaces */
#define CACHE_VARSTR1 3 /* short string value (length takes 1 byte) */
#define CACHE_VARSTR2 4 /* long string value (length takes 2 bytes) */
/*
The CACHE_FIELD structure used to describe fields of records that
are written into a join cache buffer from record buffers and backward.
*/
typedef struct st_cache_field {
uchar *str; /**< buffer from/to where the field is to be copied */
uint length; /**< maximal number of bytes to be copied from/to str */
/*
Field object for the moved field
(0 - for a flag field, see JOIN_CACHE::create_flag_fields).
*/
Field *field;
uint type; /**< category of the of the copied field (CACHE_BLOB et al.) */
/*
The number of the record offset value for the field in the sequence
of offsets placed after the last field of the record. These
offset values are used to access fields referred to from other caches.
If the value is 0 then no offset for the field is saved in the
trailing sequence of offsets.
*/
uint referenced_field_no;
/* The remaining structure fields are used as containers for temp values */
uint blob_length; /**< length of the blob to be copied */
uint offset; /**< field offset to be saved in cache buffer */
} CACHE_FIELD;
/*
JOIN_CACHE is the base class to support the implementations of both
Blocked-Based Nested Loops (BNL) Join Algorithm and Batched Key Access (BKA)
Join Algorithm. The first algorithm is supported by the derived class
JOIN_CACHE_BNL, while the second algorithm is supported by the derived
class JOIN_CACHE_BKA.
These two algorithms have a lot in common. Both algorithms first
accumulate the records of the left join operand in a join buffer and
then search for matching rows of the second operand for all accumulated
records.
For the first algorithm this strategy saves on logical I/O operations:
the entire set of records from the join buffer requires only one look-through
the records provided by the second operand.
For the second algorithm the accumulation of records allows to optimize
fetching rows of the second operand from disk for some engines (MyISAM,
InnoDB), or to minimize the number of round-trips between the Server and
the engine nodes (NDB Cluster).
*/
class JOIN_CACHE :public Sql_alloc
{
private:
/* Size of the offset of a record from the cache */
uint size_of_rec_ofs;
/* Size of the length of a record in the cache */
uint size_of_rec_len;
/* Size of the offset of a field within a record in the cache */
uint size_of_fld_ofs;
protected:
/* 3 functions below actually do not use the hidden parameter 'this' */
/* Calculate the number of bytes used to store an offset value */
uint offset_size(uint len)
{ return (len < 256 ? 1 : len < 256*256 ? 2 : 4); }
/* Get the offset value that takes ofs_sz bytes at the position ptr */
ulong get_offset(uint ofs_sz, uchar *ptr)
{
switch (ofs_sz) {
case 1: return uint(*ptr);
case 2: return uint2korr(ptr);
case 4: return uint4korr(ptr);
}
return 0;
}
/* Set the offset value ofs that takes ofs_sz bytes at the position ptr */
void store_offset(uint ofs_sz, uchar *ptr, ulong ofs)
{
switch (ofs_sz) {
case 1: *ptr= (uchar) ofs; return;
case 2: int2store(ptr, (uint16) ofs); return;
case 4: int4store(ptr, (uint32) ofs); return;
}
}
/*
The total maximal length of the fields stored for a record in the cache.
For blob fields only the sizes of the blob lengths are taken into account.
*/
uint length;
/*
Representation of the executed multi-way join through which all needed
context can be accessed.
*/
JOIN *join;
/*
Cardinality of the range of join tables whose fields can be put into the
cache. (A table from the range not necessarily contributes to the cache.)
*/
uint tables;
/*
The total number of flag and data fields that can appear in a record
written into the cache. Fields with null values are always skipped
to save space.
*/
uint fields;
/*
The total number of flag fields in a record put into the cache. They are
used for table null bitmaps, table null row flags, and an optional match
flag. Flag fields go before other fields in a cache record with the match
flag field placed always at the very beginning of the record.
*/
uint flag_fields;
/* The total number of blob fields that are written into the cache */
uint blobs;
/*
The total number of fields referenced from field descriptors for other join
caches. These fields are used to construct key values to access matching
rows with index lookups. Currently the fields can be referenced only from
descriptors for bka caches. However they may belong to a cache of any type.
*/
uint referenced_fields;
/*
The current number of already created data field descriptors.
This number can be useful for implementations of the init methods.
*/
uint data_field_count;
/*
The current number of already created pointers to the data field
descriptors. This number can be useful for implementations of
the init methods.
*/
uint data_field_ptr_count;
/*
Array of the descriptors of fields containing 'fields' elements.
These are all fields that are stored for a record in the cache.
*/
CACHE_FIELD *field_descr;
/*
Array of pointers to the blob descriptors that contains 'blobs' elements.
*/
CACHE_FIELD **blob_ptr;
/*
This flag indicates that records written into the join buffer contain
a match flag field. The flag must be set by the init method.
*/
bool with_match_flag;
/*
This flag indicates that any record is prepended with the length of the
record which allows us to skip the record or part of it without reading.
*/
bool with_length;
/*
The maximal number of bytes used for a record representation in
the cache excluding the space for blob data.
For future derived classes this representation may contains some
redundant info such as a key value associated with the record.
*/
uint pack_length;
/*
The value of pack_length incremented by the total size of all
pointers of a record in the cache to the blob data.
*/
uint pack_length_with_blob_ptrs;
/* Pointer to the beginning of the join buffer */
uchar *buff;
/*
Size of the entire memory allocated for the join buffer.
Part of this memory may be reserved for the auxiliary buffer.
*/
ulong buff_size;
/* Size of the auxiliary buffer. */
ulong aux_buff_size;
/* The number of records put into the join buffer */
uint records;
/*
Pointer to the current position in the join buffer.
This member is used both when writing to buffer and
when reading from it.
*/
uchar *pos;
/*
Pointer to the first free position in the join buffer,
right after the last record into it.
*/
uchar *end_pos;
/*
Pointer to the beginning of first field of the current read/write record
from the join buffer. The value is adjusted by the get_record/put_record
functions.
*/
uchar *curr_rec_pos;
/*
Pointer to the beginning of first field of the last record
from the join buffer.
*/
uchar *last_rec_pos;
/*
Flag is set if the blob data for the last record in the join buffer
is in record buffers rather than in the join cache.
*/
bool last_rec_blob_data_is_in_rec_buff;
/*
Pointer to the position to the current record link.
Record links are used only with linked caches. Record links allow to set
connections between parts of one join record that are stored in different
join buffers.
In the simplest case a record link is just a pointer to the beginning of
the record stored in the buffer.
In a more general case a link could be a reference to an array of pointers
to records in the buffer. */
uchar *curr_rec_link;
void calc_record_fields();
int alloc_fields(uint external_fields);
void create_flag_fields();
void create_remaining_fields(bool all_read_fields);
void set_constants();
int alloc_buffer();
uint get_size_of_rec_offset() { return size_of_rec_ofs; }
uint get_size_of_rec_length() { return size_of_rec_len; }
uint get_size_of_fld_offset() { return size_of_fld_ofs; }
uchar *get_rec_ref(uchar *ptr)
{
return buff+get_offset(size_of_rec_ofs, ptr-size_of_rec_ofs);
}
ulong get_rec_length(uchar *ptr)
{
return (ulong) get_offset(size_of_rec_len, ptr);
}
ulong get_fld_offset(uchar *ptr)
{
return (ulong) get_offset(size_of_fld_ofs, ptr);
}
void store_rec_ref(uchar *ptr, uchar* ref)
{
store_offset(size_of_rec_ofs, ptr-size_of_rec_ofs, (ulong) (ref-buff));
}
void store_rec_length(uchar *ptr, ulong len)
{
store_offset(size_of_rec_len, ptr, len);
}
void store_fld_offset(uchar *ptr, ulong ofs)
{
store_offset(size_of_fld_ofs, ptr, ofs);
}
/* Write record fields and their required offsets into the join buffer */
uint write_record_data(uchar *link, bool *is_full);
/*
This method must determine for how much the auxiliary buffer should be
incremented when a new record is added to the join buffer.
If no auxiliary buffer is needed the function should return 0.
*/
virtual uint aux_buffer_incr() { return 0; }
/* Shall calculate how much space is remaining in the join buffer */
virtual ulong rem_space()
{
return max(buff_size-(end_pos-buff)-aux_buff_size,0);
}
/* Shall skip record from the join buffer if its match flag is on */
virtual bool skip_record_if_match();
/* Read all flag and data fields of a record from the join buffer */
uint read_all_record_fields();
/* Read all flag fields of a record from the join buffer */
uint read_flag_fields();
/* Read a data record field from the join buffer */
uint read_record_field(CACHE_FIELD *copy, bool last_record);
/* Read a referenced field from the join buffer */
bool read_referenced_field(CACHE_FIELD *copy, uchar *rec_ptr, uint *len);
/*
True if rec_ptr points to the record whose blob data stay in
record buffers
*/
bool blob_data_is_in_rec_buff(uchar *rec_ptr)
{
return rec_ptr == last_rec_pos && last_rec_blob_data_is_in_rec_buff;
}
/* Find matches from the next table for records from the join buffer */
virtual enum_nested_loop_state join_matching_records(bool skip_last)=0;
/* Add null complements for unmatched outer records from buffer */
virtual enum_nested_loop_state join_null_complements(bool skip_last);
/* Restore the fields of the last record from the join buffer */
virtual void restore_last_record();
/*Set match flag for a record in join buffer if it has not been set yet */
bool set_match_flag_if_none(JOIN_TAB *first_inner, uchar *rec_ptr);
enum_nested_loop_state generate_full_extensions(uchar *rec_ptr);
/* Check matching to a partial join record from the join buffer */
bool check_match(uchar *rec_ptr);
public:
/* Table to be joined with the partial join records from the cache */
JOIN_TAB *join_tab;
/* Pointer to the previous join cache if there is any */
JOIN_CACHE *prev_cache;
/* Pointer to the next join cache if there is any */
JOIN_CACHE *next_cache;
/* Shall initialize the join cache structure */
virtual int init()=0;
/* The function shall return TRUE only for BKA caches */
virtual bool is_key_access() { return FALSE; }
/* Shall reset the join buffer for reading/writing */
virtual void reset(bool for_writing);
/*
This function shall add a record into the join buffer and return TRUE
if it has been decided that it should be the last record in the buffer.
*/
virtual bool put_record();
/*
This function shall read the next record into the join buffer and return
TRUE if there is no more next records.
*/
virtual bool get_record();
/*
This function shall read the record at the position rec_ptr
in the join buffer
*/
virtual void get_record_by_pos(uchar *rec_ptr);
/* Shall return the value of the match flag for the positioned record */
virtual bool get_match_flag_by_pos(uchar *rec_ptr);
/* Shall return the position of the current record */
virtual uchar *get_curr_rec() { return curr_rec_pos; }
/* Shall set the current record link */
virtual void set_curr_rec_link(uchar *link) { curr_rec_link= link; }
/* Shall return the current record link */
virtual uchar *get_curr_rec_link()
{
return (curr_rec_link ? curr_rec_link : get_curr_rec());
}
/* Join records from the join buffer with records from the next join table */
enum_nested_loop_state join_records(bool skip_last);
virtual ~JOIN_CACHE() {}
void reset_join(JOIN *j) { join= j; }
void free()
{
x_free(buff);
buff= 0;
}
friend class JOIN_CACHE_BNL;
friend class JOIN_CACHE_BKA;
friend class JOIN_CACHE_BKA_UNIQUE;
};
class JOIN_CACHE_BNL :public JOIN_CACHE
{
protected:
/* Using BNL find matches from the next table for records from join buffer */
enum_nested_loop_state join_matching_records(bool skip_last);
public:
/*
This constructor creates an unlinked BNL join cache. The cache is to be
used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter.
*/
JOIN_CACHE_BNL(JOIN *j, JOIN_TAB *tab)
{
join= j;
join_tab= tab;
prev_cache= next_cache= 0;
}
/*
This constructor creates a linked BNL join cache. The cache is to be
used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter. The parameter 'prev' specifies the previous
cache object to which this cache is linked.
*/
JOIN_CACHE_BNL(JOIN *j, JOIN_TAB *tab, JOIN_CACHE *prev)
{
join= j;
join_tab= tab;
prev_cache= prev;
next_cache= 0;
if (prev)
prev->next_cache= this;
}
/* Initialize the BNL cache */
int init();
};
class JOIN_CACHE_BKA :public JOIN_CACHE
{
protected:
/* Flag to to be passed to the MRR interface */
uint mrr_mode;
/* MRR buffer assotiated with this join cache */
HANDLER_BUFFER mrr_buff;
/* Shall initialize the MRR buffer */
virtual void init_mrr_buff()
{
mrr_buff.buffer= end_pos;
mrr_buff.buffer_end= buff+buff_size;
}
/*
The number of the cache fields that are used in building keys to access
the table join_tab
*/
uint local_key_arg_fields;
/*
The total number of the fields in the previous caches that are used
in building keys t access the table join_tab
*/
uint external_key_arg_fields;
/*
This flag indicates that the key values will be read directly from the join
buffer. It will save us building key values in the key buffer.
*/
bool use_emb_key;
/* The length of an embedded key value */
uint emb_key_length;
/* Check the possibility to read the access keys directly from join buffer */
bool check_emb_key_usage();
/* Calculate the increment of the MM buffer for a record write */
uint aux_buffer_incr();
/* Using BKA find matches from the next table for records from join buffer */
enum_nested_loop_state join_matching_records(bool skip_last);
/* Prepare to search for records that match records from the join buffer */
enum_nested_loop_state init_join_matching_records(RANGE_SEQ_IF *seq_funcs,
uint ranges);
/* Finish searching for records that match records from the join buffer */
enum_nested_loop_state end_join_matching_records(enum_nested_loop_state rc);
public:
/*
This constructor creates an unlinked BKA join cache. The cache is to be
used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter.
The MRR mode initially is set to 'flags'.
*/
JOIN_CACHE_BKA(JOIN *j, JOIN_TAB *tab, uint flags)
{
join= j;
join_tab= tab;
prev_cache= next_cache= 0;
mrr_mode= flags;
}
/*
This constructor creates a linked BKA join cache. The cache is to be
used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter. The parameter 'prev' specifies the cache
object to which this cache is linked.
The MRR mode initially is set to 'flags'.
*/
JOIN_CACHE_BKA(JOIN *j, JOIN_TAB *tab, uint flags, JOIN_CACHE* prev)
{
join= j;
join_tab= tab;
prev_cache= prev;
next_cache= 0;
if (prev)
prev->next_cache= this;
mrr_mode= flags;
}
/* Initialize the BKA cache */
int init();
bool is_key_access() { return TRUE; }
/* Shall get the key built over the next record from the join buffer */
virtual uint get_next_key(uchar **key);
/* Check if the record combination matches the index condition */
bool skip_index_tuple(range_seq_t rseq, char *range_info);
};
/*
The class JOIN_CACHE_BKA_UNIQUE supports the variant of the BKA join algorithm
that submits only distinct keys to the MRR interface. The records in the join
buffer of a cache of this class that have the same access key are linked into
a chain attached to a key entry structure that either itself contains the key
value, or, in the case when the keys are embedded, refers to its occurance in
one of the records from the chain.
To build the chains with the same keys a hash table is employed. It is placed
at the very end of the join buffer. The array of hash entries is allocated
first at the very bottom of the join buffer, then go key entries. A hash entry
contains a header of the list of the key entries with the same hash value.
Each key entry is a structure of the following type:
struct st_join_cache_key_entry {
union {
uchar[] value;
cache_ref *value_ref; // offset from the beginning of the buffer
} hash_table_key;
key_ref next_key; // offset backward from the beginning of hash table
cache_ref *last_rec // offset from the beginning of the buffer
}
The references linking the records in a chain are always placed at the very
beginning of the record info stored in the join buffer. The records are
linked in a circular list. A new record is always added to the end of this
list. When a key is passed to the MRR interface it can be passed either with
an association link containing a reference to the header of the record chain
attached to the corresponding key entry in the hash table, or without any
association link. When the next record is returned by a call to the MRR
function multi_range_read_next without any association (because if was not
passed together with the key) then the key value is extracted from the
returned record and searched for it in the hash table. If there is any records
with such key the chain of them will be yielded as the result of this search.
The following picture represents a typical layout for the info stored in the
join buffer of a join cache object of the JOIN_CACHE_BKA_UNIQUE class.
buff
V
+----------------------------------------------------------------------------+
| |[*]record_1_1| |
| ^ | |
| | +--------------------------------------------------+ |
| | |[*]record_2_1| | |
| | ^ | V |
| | | +------------------+ |[*]record_1_2| |
| | +--------------------+-+ | |
|+--+ +---------------------+ | | +-------------+ |
|| | | V | | |
|||[*]record_3_1| |[*]record_1_3| |[*]record_2_2| | |
||^ ^ ^ | |
||+----------+ | | | |
||^ | |<---------------------------+-------------------+ |
|++ | | ... mrr | buffer ... ... | | |
| | | | |
| +-----+--------+ | +-----|-------+ |
| V | | | V | | |
||key_3|[/]|[*]| | | |key_2|[/]|[*]| | |
| +-+---|-----------------------+ | |
| V | | | | |
| |key_1|[*]|[*]| | | ... |[*]| ... |[*]| ... | |
+----------------------------------------------------------------------------+
^ ^ ^
| i-th entry j-th entry
hash table
i-th hash entry:
circular record chain for key_1:
record_1_1
record_1_2
record_1_3 (points to record_1_1)
circular record chain for key_3:
record_3_1 (points to itself)
j-th hash entry:
circular record chain for key_2:
record_2_1
record_2_2 (points to record_2_1)
*/
class JOIN_CACHE_BKA_UNIQUE :public JOIN_CACHE_BKA
{
private:
/* Size of the offset of a key entry in the hash table */
uint size_of_key_ofs;
/*
Length of a key value.
It is assumed that all key values have the same length.
*/
uint key_length;
/*
Length of the key entry in the hash table.
A key entry either contains the key value, or it contains a reference
to the key value if use_emb_key flag is set for the cache.
*/
uint key_entry_length;
/* The beginning of the hash table in the join buffer */
uchar *hash_table;
/* Number of hash entries in the hash table */
uint hash_entries;
/* Number of key entries in the hash table (number of distinct keys) */
uint key_entries;
/* The position of the last key entry in the hash table */
uchar *last_key_entry;
/* The position of the currently retrieved key entry in the hash table */
uchar *curr_key_entry;
/*
The offset of the record fields from the beginning of the record
representation. The record representation starts with a reference to
the next record in the key record chain followed by the length of
the trailing record data followed by a reference to the record segment
in the previous cache, if any, followed by the record fields.
*/
uint rec_fields_offset;
/* The offset of the data fields from the beginning of the record fields */
uint data_fields_offset;
uint get_hash_idx(uchar* key, uint key_len);
void cleanup_hash_table();
protected:
uint get_size_of_key_offset() { return size_of_key_ofs; }
/*
Get the position of the next_key_ptr field pointed to by
a linking reference stored at the position key_ref_ptr.
This reference is actually the offset backward from the
beginning of hash table.
*/
uchar *get_next_key_ref(uchar *key_ref_ptr)
{
return hash_table-get_offset(size_of_key_ofs, key_ref_ptr);
}
/*
Store the linking reference to the next_key_ptr field at
the position key_ref_ptr. The position of the next_key_ptr
field is pointed to by ref. The stored reference is actually
the offset backward from the beginning of the hash table.
*/
void store_next_key_ref(uchar *key_ref_ptr, uchar *ref)
{
store_offset(size_of_key_ofs, key_ref_ptr, (ulong) (hash_table-ref));
}
/*
Check whether the reference to the next_key_ptr field at the position
key_ref_ptr contains a nil value.
*/
bool is_null_key_ref(uchar *key_ref_ptr)
{
ulong nil= 0;
return memcmp(key_ref_ptr, &nil, size_of_key_ofs ) == 0;
}
/*
Set the reference to the next_key_ptr field at the position
key_ref_ptr equal to nil.
*/
void store_null_key_ref(uchar *key_ref_ptr)
{
ulong nil= 0;
store_offset(size_of_key_ofs, key_ref_ptr, nil);
}
uchar *get_next_rec_ref(uchar *ref_ptr)
{
return buff+get_offset(get_size_of_rec_offset(), ref_ptr);
}
void store_next_rec_ref(uchar *ref_ptr, uchar *ref)
{
store_offset(get_size_of_rec_offset(), ref_ptr, (ulong) (ref-buff));
}
/*
Get the position of the embedded key value for the current
record pointed to by get_curr_rec().
*/
uchar *get_curr_emb_key()
{
return get_curr_rec()+data_fields_offset;
}
/*
Get the position of the embedded key value pointed to by a reference
stored at ref_ptr. The stored reference is actually the offset from
the beginning of the join buffer.
*/
uchar *get_emb_key(uchar *ref_ptr)
{
return buff+get_offset(get_size_of_rec_offset(), ref_ptr);
}
/*
Store the reference to an embedded key at the position key_ref_ptr.
The position of the embedded key is pointed to by ref. The stored
reference is actually the offset from the beginning of the join buffer.
*/
void store_emb_key_ref(uchar *ref_ptr, uchar *ref)
{
store_offset(get_size_of_rec_offset(), ref_ptr, (ulong) (ref-buff));
}
/*
Calculate how much space in the buffer would not be occupied by
records, key entries and additional memory for the MMR buffer.
*/
ulong rem_space()
{
return max(last_key_entry-end_pos-aux_buff_size,0);
}
/*
Initialize the MRR buffer allocating some space within the join buffer.
The entire space between the last record put into the join buffer and the
last key entry added to the hash table is used for the MRR buffer.
*/
void init_mrr_buff()
{
mrr_buff.buffer= end_pos;
mrr_buff.buffer_end= last_key_entry;
}
/* Skip record from JOIN_CACHE_BKA_UNIQUE buffer if its match flag is on */
bool skip_record_if_match();
/* Using BKA_UNIQUE find matches for records from join buffer */
enum_nested_loop_state join_matching_records(bool skip_last);
/* Search for a key in the hash table of the join buffer */
bool key_search(uchar *key, uint key_len, uchar **key_ref_ptr);
public:
/*
This constructor creates an unlinked BKA_UNIQUE join cache. The cache is
to be used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter.
The MRR mode initially is set to 'flags'.
*/
JOIN_CACHE_BKA_UNIQUE(JOIN *j, JOIN_TAB *tab, uint flags)
:JOIN_CACHE_BKA(j, tab, flags) {}
/*
This constructor creates a linked BKA_UNIQUE join cache. The cache is
to be used to join table 'tab' to the result of joining the previous tables
specified by the 'j' parameter. The parameter 'prev' specifies the cache
object to which this cache is linked.
The MRR mode initially is set to 'flags'.
*/
JOIN_CACHE_BKA_UNIQUE(JOIN *j, JOIN_TAB *tab, uint flags, JOIN_CACHE* prev)
:JOIN_CACHE_BKA(j, tab, flags, prev) {}
/* Initialize the BKA_UNIQUE cache */
int init();
/* Reset the JOIN_CACHE_BKA_UNIQUE buffer for reading/writing */
void reset(bool for_writing);
/* Add a record into the JOIN_CACHE_BKA_UNIQUE buffer */
bool put_record();
/* Read the next record from the JOIN_CACHE_BKA_UNIQUE buffer */
bool get_record();
/*
Shall check whether all records in a key chain have
their match flags set on
*/
virtual bool check_all_match_flags_for_key(uchar *key_chain_ptr);
uint get_next_key(uchar **key);
/* Get the head of the record chain attached to the current key entry */
uchar *get_curr_key_chain()
{
return get_next_rec_ref(curr_key_entry+key_entry_length-
get_size_of_rec_offset());
}
/* Check if the record combination matches the index condition */
bool skip_index_tuple(range_seq_t rseq, char *range_info);
};
#include "sql_join_cache.h"
enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool
end_of_records);
@ -1745,6 +944,10 @@ public:
NULL : join_tab+const_tables;
}
bool setup_subquery_caches();
bool shrink_join_buffers(JOIN_TAB *jt,
ulonglong curr_space,
ulonglong needed_space);
private:
/**
TRUE if the query contains an aggregate function but has no GROUP
@ -1874,6 +1077,15 @@ class store_key_field: public store_key
TABLE *table= copy_field.to_field->table;
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
table->write_set);
/*
It looks like the next statement is needed only for a simplified
hash function over key values used now in BNLH join.
When the implementation of this function will be replaced for a proper
full version this statement probably should be removed.
*/
bzero(copy_field.to_ptr,copy_field.to_length);
copy_field.do_copy(&copy_field);
dbug_tmp_restore_column_map(table->write_set, old_map);
null_key= to_field->is_null();
@ -1907,6 +1119,15 @@ public:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
table->write_set);
int res= FALSE;
/*
It looks like the next statement is needed only for a simplified
hash function over key values used now in BNLH join.
When the implementation of this function will be replaced for a proper
full version this statement probably should be removed.
*/
to_field->reset();
if (use_value)
item->save_val(to_field);
else
@ -1969,7 +1190,6 @@ int report_error(TABLE *table, int error);
int safe_index_read(JOIN_TAB *tab);
COND *remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value);
int test_if_item_cache_changed(List<Cached_item> &list);
void calc_used_field_length(THD *thd, JOIN_TAB *join_tab);
int join_init_read_record(JOIN_TAB *tab);
void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key);
inline Item * and_items(Item* cond, Item *item)
@ -1997,7 +1217,8 @@ inline bool optimizer_flag(THD *thd, uint flag)
void eliminate_tables(JOIN *join);
/* Index Condition Pushdown entry point function */
void push_index_cond(JOIN_TAB *tab, uint keyno, bool other_tbls_ok);
void push_index_cond(JOIN_TAB *tab, uint keyno, bool other_tbls_ok,
bool factor_out);
/****************************************************************************
Temporary table support for SQL Runtime