1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00

MDEV-16188 Use in-memory PK filters built from range index scans

This patch contains a full implementation of the optimization
that allows to use in-memory rowid / primary filters built for range  
conditions over indexes. In many cases usage of such filters reduce  
the number of disk seeks spent for fetching table rows.

In this implementation the choice of what possible filter to be applied  
(if any) is made purely on cost-based considerations.

This implementation re-achitectured the partial implementation of
the feature pushed by Galina Shalygina in the commit
8d5a11122c.

Besides this patch contains a better implementation of the generic  
handler function handler::multi_range_read_info_const() that
takes into account gaps between ranges when calculating the cost of
range index scans. It also contains some corrections of the
implementation of the handler function records_in_range() for MyISAM.

This patch supports the feature for InnoDB and MyISAM.
This commit is contained in:
Igor Babaev
2019-02-03 14:56:12 -08:00
parent 5f46670bd0
commit 658128af43
190 changed files with 5685 additions and 2017 deletions

View File

@ -380,11 +380,13 @@ int print_explain_row(select_result_sink *result,
item_list.push_back(item, mem_root);
/* 'rows' */
StringBuffer<64> rows_str;
if (rows)
{
rows_str.append_ulonglong((ulonglong)(*rows));
item_list.push_back(new (mem_root)
Item_int(thd, *rows, MY_INT64_NUM_DECIMAL_DIGITS),
mem_root);
Item_string_sys(thd, rows_str.ptr(),
rows_str.length()), mem_root);
}
else
item_list.push_back(item_null, mem_root);
@ -1113,12 +1115,14 @@ void Explain_table_access::fill_key_str(String *key_str, bool is_json) const
- this is just used key length for ref/range
- for index_merge, it is a comma-separated list of lengths.
- for hash join, it is key_len:pseudo_key_len
- [tabular form only] rowid filter length is added after "|".
The column looks identical in tabular and json forms. In JSON, we consider
the column legacy, it is superceded by used_key_parts.
In JSON, we consider this column to be legacy, it is superceded by
used_key_parts.
*/
void Explain_table_access::fill_key_len_str(String *key_len_str) const
void Explain_table_access::fill_key_len_str(String *key_len_str,
bool is_json) const
{
bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
@ -1147,13 +1151,12 @@ void Explain_table_access::fill_key_len_str(String *key_len_str) const
key_len_str->append(buf, length);
}
if (key.get_filter_key_length() != (uint)-1)
if (!is_json && rowid_filter)
{
char buf[64];
size_t length;
key_len_str->append(',');
length= longlong10_to_str(key.get_filter_key_length(), buf, 10) - buf;
key_len_str->append(buf, length);
key_len_str->append('|');
StringBuffer<64> filter_key_len;
rowid_filter->quick->print_key_len(&filter_key_len);
key_len_str->append(filter_key_len);
}
}
@ -1240,7 +1243,18 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
}
/* `type` column */
push_str(thd, &item_list, join_type_str[type]);
StringBuffer<64> join_type_buf;
if (rowid_filter == NULL)
push_str(thd, &item_list, join_type_str[type]);
else
{
join_type_buf.append(join_type_str[type]);
join_type_buf.append("|filter");
item_list.push_back(new (mem_root)
Item_string_sys(thd, join_type_buf.ptr(),
join_type_buf.length()),
mem_root);
}
/* `possible_keys` column */
StringBuffer<64> possible_keys_buf;
@ -1252,6 +1266,14 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
/* `key` */
StringBuffer<64> key_str;
fill_key_str(&key_str, false);
if (rowid_filter)
{
key_str.append("|");
StringBuffer<64> rowid_key_str;
rowid_filter->quick->print_key(&rowid_key_str);
key_str.append(rowid_key_str);
}
if (key_str.length() > 0)
push_string(thd, &item_list, &key_str);
@ -1260,7 +1282,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
/* `key_len` */
StringBuffer<64> key_len_str;
fill_key_len_str(&key_len_str);
fill_key_len_str(&key_len_str, false);
if (key_len_str.length() > 0)
push_string(thd, &item_list, &key_len_str);
@ -1288,10 +1310,10 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
{
rows_str.append_ulonglong((ulonglong)rows);
if (is_filter_set())
if (rowid_filter)
{
rows_str.append(" (");
rows_str.append_ulonglong(filter_perc);
rows_str.append_ulonglong(round(rowid_filter->selectivity * 100.0));
rows_str.append("%)");
}
item_list.push_back(new (mem_root)
@ -1376,7 +1398,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
extra_buf.append(STRING_WITH_LEN("Using filesort"));
}
if (is_filter_set())
if (rowid_filter)
{
if (first)
first= false;
@ -1573,6 +1595,19 @@ void add_json_keyset(Json_writer *writer, const char *elem_name,
}
void Explain_rowid_filter::print_explain_json(Explain_query *query,
Json_writer *writer,
bool is_analyze)
{
Json_writer_nesting_guard guard(writer);
writer->add_member("rowid_filter").start_object();
quick->print_json(writer);
writer->add_member("rows").add_ll(rows);
writer->add_member("selectivity_pct").add_double(selectivity * 100.0);
writer->end_object(); // rowid_filter
}
void Explain_table_access::print_explain_json(Explain_query *query,
Json_writer *writer,
bool is_analyze)
@ -1645,7 +1680,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
/* `key_length` */
StringBuffer<64> key_len_str;
fill_key_len_str(&key_len_str);
fill_key_len_str(&key_len_str, true);
if (key_len_str.length())
writer->add_member("key_length").add_str(key_len_str);
@ -1670,6 +1705,11 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (!ref_list.is_empty())
print_json_array(writer, "ref", ref_list);
if (rowid_filter)
{
rowid_filter->print_explain_json(query, writer, is_analyze);
}
/* r_loops (not present in tabular output) */
if (is_analyze)
{