mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
This is based on the userstatv2 patch from Percona and OurDelta.
The original code comes, as far as I know, from Google (Mark Callaghan's team) with additional work from Percona, Ourdelta and Weldon Whipple. This code provides the same functionallity, but with a lot of changes to make it faster and better fit the MariaDB infrastucture. Added new status variables: - Com_show_client_statistics, Com_show_index_statistics, Com_show_table_statistics, Com_show_user_statistics - Access_denied_errors, Busy_time (clock time), Binlog_bytes_written, Cpu_time, Empty_queries, Rows_sent, Rows_read Added new variable / startup option 'userstat' to control if user statistics should be enabled or not Added my_getcputime(); Returns cpu time used by this thread. New FLUSH commands: - FLUSH SLOW QUERY LOG - FLUSH TABLE_STATISTICS - FLUSH INDEX_STATISTICS - FLUSH USER_STATISTICS - FLUSH CLIENT_STATISTICS New SHOW commands: - SHOW CLIENT_STATISTICS - SHOW USER_STATISTICS - SHOW TABLE_STATISTICS - SHOW INDEX_STATISTICS New Information schemas: - CLIENT_STATISTICS - USER_STATISTICS - INDEX_STATISTICS - TABLE_STATISTICS Added support for all new flush commands to mysqladmin Added handler::ha_... wrappers for all handler read calls to do statistics counting - Changed all code to use new ha_... calls - Count number of read rows, changed rows and rows read trough an index Added counting of number of bytes sent to binary log (status variable Binlog_bytes_written) Added counting of access denied errors (status variable Access_denied_erors) Bugs fixed: - Fixed bug in add_to_status() and add_diff_to_status() where longlong variables where threated as long - CLOCK_GETTIME was not propely working on Linuxm client/mysqladmin.cc: Added support for all new flush commmands and some common combinations: flush-slow-log flush-table-statistics flush-index-statistics flush-user-statistics flush-client-statistics flush-all-status flush-all-statistics configure.in: Added checking if clock_gettime needs the librt. (Fixes Bug #37639 clock_gettime is never used/enabled in Linux/Unix) include/my_sys.h: Added my_getcputime() include/mysql_com.h: Added LIST_PROCESS_HOST_LEN & new REFRESH target defines mysql-test/r/information_schema.result: New information schema tables added mysql-test/r/information_schema_all_engines.result: New information schema tables added mysql-test/r/information_schema_db.result: New information schema tables added mysql-test/r/log_slow.result: Added testing that flosh slow query logs is accepted mysql-test/r/status_user.result: Basic testing of user, client, table and index statistics mysql-test/t/log_slow.test: Added testing that flosh slow query logs is accepted mysql-test/t/status_user-master.opt: Ensure that we get a fresh restart before running status_user.test mysql-test/t/status_user.test: Basic testing of user, client, table and index statistics mysys/my_getsystime.c: Added my_getcputime() Returns cpu time used by this thread. sql/authors.h: Updated authors to have core and original MySQL developers first. sql/event_data_objects.cc: Updated call to mysql_reset_thd_for_next_command() sql/event_db_repository.cc: Changed to use new ha_... calls sql/filesort.cc: Changed to use new ha_... calls sql/ha_partition.cc: Changed to use new ha_... calls Fixed comment syntax sql/handler.cc: Changed to use new ha_... calls Reset table statistics Added code to update global table and index status Added counting of rows changed sql/handler.h: Added table and index statistics variables Added function reset_statistics() Added handler::ha_... wrappers for all handler read calls to do statistics counting Protected all normal read calls to ensure we use the new calls in the server. Made ha_partition a friend class so that partition code can call the old read functions sql/item_subselect.cc: Changed to use new ha_... calls sql/lex.h: Added keywords for new information schema tables and flush commands sql/log.cc: Added flush_slow_log() Added counting of number of bytes sent to binary log Removed not needed test of thd (It's used before, so it's safe to use) Added THD object to MYSQL_BIN_LOG::write_cache() to simplify statistics counting sql/log.h: Added new parameter to write_cache() Added flush_slow_log() functions. sql/log_event.cc: Updated call to mysql_reset_thd_for_next_command() Changed to use new ha_... calls sql/log_event_old.cc: Updated call to mysql_reset_thd_for_next_command() Changed to use new ha_... calls sql/mysql_priv.h: Updated call to mysql_reset_thd_for_next_command() Added new statistics functions and variables needed by these. sql/mysqld.cc: Added new statistics variables and structures to handle these Added new status variables: - Com_show_client_statistics, Com_show_index_statistics, Com_show_table_statistics, Com_show_user_statistics - Access_denied_errors, Busy_time (clock time), Binlog_bytes_written, Cpu_time, Empty_queries, Rows_set, Rows_read Added new option 'userstat' to control if user statistics should be enabled or not sql/opt_range.cc: Changed to use new ha_... calls sql/opt_range.h: Changed to use new ha_... calls sql/opt_sum.cc: Changed to use new ha_... calls sql/records.cc: Changed to use new ha_... calls sql/set_var.cc: Added variable 'userstat' sql/sp.cc: Changed to use new ha_... calls sql/sql_acl.cc: Changed to use new ha_... calls Added counting of access_denied_errors sql/sql_base.cc: Added call to statistics functions sql/sql_class.cc: Added usage of org_status_var, to store status variables at start of command Added functions THD::update_stats(), THD::update_all_stats() Fixed bug in add_to_status() and add_diff_to_status() where longlong variables where threated as long sql/sql_class.h: Added new status variables to status_var Moved variables that was not ulong in status_var last. Added variables to THD for storing temporary values during statistics counting sql/sql_connect.cc: Variables and functions to calculate user and client statistics Added counting of access_denied_errors and lost_connections sql/sql_cursor.cc: Changed to use new ha_... calls sql/sql_handler.cc: Changed to use new ha_... calls sql/sql_help.cc: Changed to use new ha_... calls sql/sql_insert.cc: Changed to use new ha_... calls sql/sql_lex.h: Added SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS, SQLCOM_SHOW_CLIENT_STATS sql/sql_parse.cc: Added handling of: - SHOW CLIENT_STATISTICS - SHOW USER_STATISTICS - SHOW TABLE_STATISTICS - SHOW INDEX_STATISTICS Added handling of new FLUSH commands: - FLUSH SLOW QUERY LOGS - FLUSH TABLE_STATISTICS - FLUSH INDEX_STATISTICS - FLUSH USER_STATISTICS - FLUSH CLIENT_STATISTICS Added THD parameter to mysql_reset_thd_for_next_command() Added initialization and calls to user statistics functions Added increment of statistics variables empty_queries, rows_sent and access_denied_errors. Added counting of cpu time per query sql/sql_plugin.cc: Changed to use new ha_... calls sql/sql_prepare.cc: Updated call to mysql_reset_thd_for_next_command() sql/sql_select.cc: Changed to use new ha_... calls Indentation changes sql/sql_servers.cc: Changed to use new ha_... calls sql/sql_show.cc: Added counting of access denied errors Added function for new information schema tables: - CLIENT_STATISTICS - USER_STATISTICS - INDEX_STATISTICS - TABLE_STATISTICS Changed to use new ha_... calls sql/sql_table.cc: Changed to use new ha_... calls sql/sql_udf.cc: Changed to use new ha_... calls sql/sql_update.cc: Changed to use new ha_... calls sql/sql_yacc.yy: Add new show and flush commands sql/structs.h: Add name_length to KEY to avoid some strlen Added cache_name to KEY for fast storage of keyvalue in cache Added structs USER_STATS, TABLE_STATS, INDEX_STATS Added function prototypes for statistics functions sql/table.cc: Store db+table+index name into keyinfo->cache_name sql/table.h: Added new information schema tables sql/tztime.cc: Changed to use new ha_... calls
This commit is contained in:
@ -10603,8 +10603,9 @@ error:
|
||||
static bool open_tmp_table(TABLE *table)
|
||||
{
|
||||
int error;
|
||||
if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
|
||||
HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
|
||||
if ((error= table->file->ha_open(table, table->s->table_name.str, O_RDWR,
|
||||
HA_OPEN_TMP_TABLE |
|
||||
HA_OPEN_INTERNAL_TABLE)))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
table->db_stat=0;
|
||||
@ -10949,7 +10950,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
|
||||
is safe as this is a temporary MyISAM table without timestamp/autoincrement
|
||||
or partitioning.
|
||||
*/
|
||||
while (!table->file->rnd_next(new_table.record[1]))
|
||||
while (!table->file->ha_rnd_next(new_table.record[1]))
|
||||
{
|
||||
write_err= new_table.file->ha_write_row(new_table.record[1]);
|
||||
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
|
||||
@ -11746,10 +11747,10 @@ int safe_index_read(JOIN_TAB *tab)
|
||||
{
|
||||
int error;
|
||||
TABLE *table= tab->table;
|
||||
if ((error=table->file->index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT)))
|
||||
if ((error= table->file->ha_index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT)))
|
||||
return report_error(table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -11858,8 +11859,8 @@ join_read_system(JOIN_TAB *tab)
|
||||
int error;
|
||||
if (table->status & STATUS_GARBAGE) // If first read
|
||||
{
|
||||
if ((error=table->file->read_first_row(table->record[0],
|
||||
table->s->primary_key)))
|
||||
if ((error= table->file->ha_read_first_row(table->record[0],
|
||||
table->s->primary_key)))
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
return report_error(table, error);
|
||||
@ -11901,10 +11902,10 @@ join_read_const(JOIN_TAB *tab)
|
||||
error=HA_ERR_KEY_NOT_FOUND;
|
||||
else
|
||||
{
|
||||
error=table->file->index_read_idx_map(table->record[0],tab->ref.key,
|
||||
(uchar*) tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
error= table->file->ha_index_read_idx_map(table->record[0],tab->ref.key,
|
||||
(uchar*) tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
}
|
||||
if (error)
|
||||
{
|
||||
@ -11949,10 +11950,10 @@ join_read_key(JOIN_TAB *tab)
|
||||
table->status=STATUS_NOT_FOUND;
|
||||
return -1;
|
||||
}
|
||||
error=table->file->index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
error= table->file->ha_index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
return report_error(table, error);
|
||||
}
|
||||
@ -12005,10 +12006,10 @@ join_read_always_key(JOIN_TAB *tab)
|
||||
|
||||
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
|
||||
return -1;
|
||||
if ((error=table->file->index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT)))
|
||||
if ((error= table->file->ha_index_read_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts),
|
||||
HA_READ_KEY_EXACT)))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
return report_error(table, error);
|
||||
@ -12039,9 +12040,9 @@ join_read_last_key(JOIN_TAB *tab)
|
||||
}
|
||||
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
|
||||
return -1;
|
||||
if ((error=table->file->index_read_last_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts))))
|
||||
if ((error= table->file->ha_index_read_last_map(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
make_prev_keypart_map(tab->ref.key_parts))))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
return report_error(table, error);
|
||||
@ -12066,9 +12067,9 @@ join_read_next_same(READ_RECORD *info)
|
||||
TABLE *table= info->table;
|
||||
JOIN_TAB *tab=table->reginfo.join_tab;
|
||||
|
||||
if ((error=table->file->index_next_same(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
tab->ref.key_length)))
|
||||
if ((error= table->file->ha_index_next_same(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
tab->ref.key_length)))
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
return report_error(table, error);
|
||||
@ -12086,7 +12087,7 @@ join_read_prev_same(READ_RECORD *info)
|
||||
TABLE *table= info->table;
|
||||
JOIN_TAB *tab=table->reginfo.join_tab;
|
||||
|
||||
if ((error=table->file->index_prev(table->record[0])))
|
||||
if ((error= table->file->ha_index_prev(table->record[0])))
|
||||
return report_error(table, error);
|
||||
if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key,
|
||||
tab->ref.key_length))
|
||||
@ -12158,7 +12159,7 @@ join_read_first(JOIN_TAB *tab)
|
||||
error= table->file->ha_index_init(tab->index, tab->sorted);
|
||||
if (!error)
|
||||
error= table->file->prepare_index_scan();
|
||||
if (error || (error=tab->table->file->index_first(tab->table->record[0])))
|
||||
if (error || (error=tab->table->file->ha_index_first(tab->table->record[0])))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
report_error(table, error);
|
||||
@ -12172,7 +12173,7 @@ static int
|
||||
join_read_next(READ_RECORD *info)
|
||||
{
|
||||
int error;
|
||||
if ((error=info->file->index_next(info->record)))
|
||||
if ((error= info->file->ha_index_next(info->record)))
|
||||
return report_error(info->table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -12199,7 +12200,7 @@ join_read_last(JOIN_TAB *tab)
|
||||
error= table->file->ha_index_init(tab->index, 1);
|
||||
if (!error)
|
||||
error= table->file->prepare_index_scan();
|
||||
if (error || (error= tab->table->file->index_last(tab->table->record[0])))
|
||||
if (error || (error= tab->table->file->ha_index_last(tab->table->record[0])))
|
||||
return report_error(table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -12209,7 +12210,7 @@ static int
|
||||
join_read_prev(READ_RECORD *info)
|
||||
{
|
||||
int error;
|
||||
if ((error= info->file->index_prev(info->record)))
|
||||
if ((error= info->file->ha_index_prev(info->record)))
|
||||
return report_error(info->table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -12234,7 +12235,7 @@ join_ft_read_first(JOIN_TAB *tab)
|
||||
#endif
|
||||
table->file->ft_init();
|
||||
|
||||
if ((error= table->file->ft_read(table->record[0])))
|
||||
if ((error= table->file->ha_ft_read(table->record[0])))
|
||||
return report_error(table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -12243,7 +12244,7 @@ static int
|
||||
join_ft_read_next(READ_RECORD *info)
|
||||
{
|
||||
int error;
|
||||
if ((error= info->file->ft_read(info->table->record[0])))
|
||||
if ((error= info->file->ha_ft_read(info->table->record[0])))
|
||||
return report_error(info->table, error);
|
||||
return 0;
|
||||
}
|
||||
@ -12535,7 +12536,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
{
|
||||
int error;
|
||||
join->found_records++;
|
||||
if ((error=table->file->ha_write_row(table->record[0])))
|
||||
if ((error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
goto end;
|
||||
@ -12590,15 +12591,15 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
if (item->maybe_null)
|
||||
group->buff[-1]= (char) group->field->is_null();
|
||||
}
|
||||
if (!table->file->index_read_map(table->record[1],
|
||||
join->tmp_table_param.group_buff,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))
|
||||
if (!table->file->ha_index_read_map(table->record[1],
|
||||
join->tmp_table_param.group_buff,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))
|
||||
{ /* Update old record */
|
||||
restore_record(table,record[1]);
|
||||
update_tmptable_sum_func(join->sum_funcs,table);
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
@ -12621,7 +12622,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
}
|
||||
init_tmptable_sum_functions(join->sum_funcs);
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
if ((error=table->file->ha_write_row(table->record[0])))
|
||||
if ((error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (create_internal_tmp_table_from_heap(join->thd, table,
|
||||
&join->tmp_table_param,
|
||||
@ -12662,7 +12663,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
copy_fields(&join->tmp_table_param); // Groups are copied twice.
|
||||
copy_funcs(join->tmp_table_param.items_to_copy);
|
||||
|
||||
if (!(error=table->file->ha_write_row(table->record[0])))
|
||||
if (!(error= table->file->ha_write_row(table->record[0])))
|
||||
join->send_records++; // New group
|
||||
else
|
||||
{
|
||||
@ -12671,15 +12672,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
}
|
||||
if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
|
||||
if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
}
|
||||
restore_record(table,record[1]);
|
||||
update_tmptable_sum_func(join->sum_funcs,table);
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
|
||||
@ -14016,7 +14017,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
||||
new_record=(char*) table->record[1]+offset;
|
||||
|
||||
file->ha_rnd_init(1);
|
||||
error=file->rnd_next(record);
|
||||
error= file->ha_rnd_next(record);
|
||||
for (;;)
|
||||
{
|
||||
if (thd->killed)
|
||||
@ -14035,9 +14036,9 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
||||
}
|
||||
if (having && !having->val_int())
|
||||
{
|
||||
if ((error=file->ha_delete_row(record)))
|
||||
if ((error= file->ha_delete_row(record)))
|
||||
goto err;
|
||||
error=file->rnd_next(record);
|
||||
error= file->ha_rnd_next(record);
|
||||
continue;
|
||||
}
|
||||
if (copy_blobs(first_field))
|
||||
@ -14052,7 +14053,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
||||
bool found=0;
|
||||
for (;;)
|
||||
{
|
||||
if ((error=file->rnd_next(record)))
|
||||
if ((error= file->ha_rnd_next(record)))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
continue;
|
||||
@ -14062,7 +14063,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
|
||||
}
|
||||
if (compare_record(table, first_field) == 0)
|
||||
{
|
||||
if ((error=file->ha_delete_row(record)))
|
||||
if ((error= file->ha_delete_row(record)))
|
||||
goto err;
|
||||
}
|
||||
else if (!found)
|
||||
@ -14152,7 +14153,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
|
||||
error=0;
|
||||
goto err;
|
||||
}
|
||||
if ((error=file->rnd_next(record)))
|
||||
if ((error= file->ha_rnd_next(record)))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
continue;
|
||||
@ -14162,7 +14163,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
|
||||
}
|
||||
if (having && !having->val_int())
|
||||
{
|
||||
if ((error=file->ha_delete_row(record)))
|
||||
if ((error= file->ha_delete_row(record)))
|
||||
goto err;
|
||||
continue;
|
||||
}
|
||||
@ -14179,7 +14180,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
|
||||
if (hash_search(&hash, org_key_pos, key_length))
|
||||
{
|
||||
/* Duplicated found ; Remove the row */
|
||||
if ((error=file->ha_delete_row(record)))
|
||||
if ((error= file->ha_delete_row(record)))
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
|
Reference in New Issue
Block a user