mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Manual merge from mysql-trunk.
Conflicts: - client/mysqltest.cc - mysql-test/collections/default.experimental - mysql-test/suite/rpl/t/disabled.def - sql/mysqld.cc - sql/opt_range.cc - sql/sp.cc - sql/sql_acl.cc - sql/sql_partition.cc - sql/sql_table.cc
This commit is contained in:
274
sql/sql_cache.cc
274
sql/sql_cache.cc
@ -286,6 +286,7 @@ functions:
|
||||
if (and only if) this query has a registered result set writer
|
||||
(thd->net.query_cache_query).
|
||||
4. Query_cache::invalidate
|
||||
Query_cache::invalidate_locked_for_write
|
||||
- Called from various places to invalidate query cache based on data-
|
||||
base, table and myisam file name. During an on going invalidation
|
||||
the query cache is temporarily disabled.
|
||||
@ -812,19 +813,20 @@ uchar *query_cache_query_get_key(const uchar *record, size_t *length,
|
||||
Note on double-check locking (DCL) usage.
|
||||
|
||||
Below, in query_cache_insert(), query_cache_abort() and
|
||||
query_cache_end_of_result() we use what is called double-check
|
||||
locking (DCL) for NET::query_cache_query. I.e. we test it first
|
||||
without a lock, and, if positive, test again under the lock.
|
||||
Query_cache::end_of_result() we use what is called double-check
|
||||
locking (DCL) for Query_cache_tls::first_query_block.
|
||||
I.e. we test it first without a lock, and, if positive, test again
|
||||
under the lock.
|
||||
|
||||
This means that if we see 'NET::query_cache_query == 0' without a
|
||||
This means that if we see 'first_query_block == 0' without a
|
||||
lock we will skip the operation. But this is safe here: when we
|
||||
started to cache a query, we called Query_cache::store_query(), and
|
||||
NET::query_cache_query was set to non-zero in this thread (and the
|
||||
'first_query_block' was set to non-zero in this thread (and the
|
||||
thread always sees results of its memory operations, mutex or not).
|
||||
If later we see 'NET::query_cache_query == 0' without locking a
|
||||
If later we see 'first_query_block == 0' without locking a
|
||||
mutex, that may only mean that some other thread have reset it by
|
||||
invalidating the query. Skipping the operation in this case is the
|
||||
right thing to do, as NET::query_cache_query won't get non-zero for
|
||||
right thing to do, as first_query_block won't get non-zero for
|
||||
this query again.
|
||||
|
||||
See also comments in Query_cache::store_query() and
|
||||
@ -833,56 +835,71 @@ uchar *query_cache_query_get_key(const uchar *record, size_t *length,
|
||||
NOTE, however, that double-check locking is not applicable in
|
||||
'invalidate' functions, as we may erroneously skip invalidation,
|
||||
because the thread doing invalidation may never see non-zero
|
||||
NET::query_cache_query.
|
||||
'first_query_block'.
|
||||
*/
|
||||
|
||||
|
||||
void query_cache_init_query(NET *net)
|
||||
/**
|
||||
libmysql convenience wrapper to insert data into query cache.
|
||||
*/
|
||||
void query_cache_insert(const char *packet, ulong length,
|
||||
unsigned pkt_nr)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
|
||||
/*
|
||||
It is safe to initialize 'NET::query_cache_query' without a lock
|
||||
here, because before it will be accessed from different threads it
|
||||
will be set in this thread under a lock, and access from the same
|
||||
thread is always safe.
|
||||
Current_thd can be NULL when a new connection is immediately ended
|
||||
due to "Too many connections". thd->store_globals() has not been
|
||||
called at this time and hence my_pthread_setspecific_ptr(THR_THD,
|
||||
this) has not been called for this thread.
|
||||
*/
|
||||
net->query_cache_query= 0;
|
||||
|
||||
if (!thd)
|
||||
return;
|
||||
|
||||
query_cache.insert(&thd->query_cache_tls,
|
||||
packet, length,
|
||||
pkt_nr);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
/**
|
||||
Insert the packet into the query cache.
|
||||
*/
|
||||
|
||||
void query_cache_insert(NET *net, const char *packet, ulong length)
|
||||
void
|
||||
Query_cache::insert(Query_cache_tls *query_cache_tls,
|
||||
const char *packet, ulong length,
|
||||
unsigned pkt_nr)
|
||||
{
|
||||
DBUG_ENTER("query_cache_insert");
|
||||
DBUG_ENTER("Query_cache::insert");
|
||||
|
||||
/* See the comment on double-check locking usage above. */
|
||||
if (net->query_cache_query == 0)
|
||||
if (is_disabled() || query_cache_tls->first_query_block == NULL)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
DBUG_EXECUTE_IF("wait_in_query_cache_insert",
|
||||
debug_wait_for_kill("wait_in_query_cache_insert"); );
|
||||
|
||||
if (query_cache.try_lock())
|
||||
|
||||
if (try_lock())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
Query_cache_block *query_block= (Query_cache_block*)net->query_cache_query;
|
||||
if (!query_block)
|
||||
Query_cache_block *query_block = query_cache_tls->first_query_block;
|
||||
if (query_block == NULL)
|
||||
{
|
||||
/*
|
||||
We lost the writer and the currently processed query has been
|
||||
invalidated; there is nothing left to do.
|
||||
*/
|
||||
query_cache.unlock();
|
||||
unlock();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
BLOCK_LOCK_WR(query_block);
|
||||
Query_cache_query *header= query_block->query();
|
||||
Query_cache_block *result= header->result();
|
||||
|
||||
DUMP(&query_cache);
|
||||
DUMP(this);
|
||||
DBUG_PRINT("qcache", ("insert packet %lu bytes long",length));
|
||||
|
||||
/*
|
||||
@ -890,8 +907,8 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
|
||||
still need structure_guard_mutex to free the query, and therefore unlock
|
||||
it later in this function.
|
||||
*/
|
||||
if (!query_cache.append_result_data(&result, length, (uchar*) packet,
|
||||
query_block))
|
||||
if (!append_result_data(&result, length, (uchar*) packet,
|
||||
query_block))
|
||||
{
|
||||
DBUG_PRINT("warning", ("Can't append data"));
|
||||
header->result(result);
|
||||
@ -900,80 +917,83 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
|
||||
query_cache.free_query(query_block);
|
||||
query_cache.refused++;
|
||||
// append_result_data no success => we need unlock
|
||||
query_cache.unlock();
|
||||
unlock();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
header->result(result);
|
||||
header->last_pkt_nr= net->pkt_nr;
|
||||
header->last_pkt_nr= pkt_nr;
|
||||
BLOCK_UNLOCK_WR(query_block);
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0););
|
||||
DBUG_EXECUTE("check_querycache",check_integrity(0););
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
void query_cache_abort(NET *net)
|
||||
void
|
||||
Query_cache::abort(Query_cache_tls *query_cache_tls)
|
||||
{
|
||||
DBUG_ENTER("query_cache_abort");
|
||||
THD *thd= current_thd;
|
||||
|
||||
/* See the comment on double-check locking usage above. */
|
||||
if (net->query_cache_query == 0)
|
||||
if (is_disabled() || query_cache_tls->first_query_block == NULL)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
if (query_cache.try_lock())
|
||||
if (try_lock())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
/*
|
||||
While we were waiting another thread might have changed the status
|
||||
of the writer. Make sure the writer still exists before continue.
|
||||
*/
|
||||
Query_cache_block *query_block= ((Query_cache_block*)
|
||||
net->query_cache_query);
|
||||
Query_cache_block *query_block= query_cache_tls->first_query_block;
|
||||
if (query_block)
|
||||
{
|
||||
thd_proc_info(thd, "storing result in query cache");
|
||||
DUMP(&query_cache);
|
||||
DUMP(this);
|
||||
BLOCK_LOCK_WR(query_block);
|
||||
// The following call will remove the lock on query_block
|
||||
query_cache.free_query(query_block);
|
||||
net->query_cache_query= 0;
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
free_query(query_block);
|
||||
query_cache_tls->first_query_block= NULL;
|
||||
DBUG_EXECUTE("check_querycache", check_integrity(1););
|
||||
}
|
||||
|
||||
query_cache.unlock();
|
||||
unlock();
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
void query_cache_end_of_result(THD *thd)
|
||||
void Query_cache::end_of_result(THD *thd)
|
||||
{
|
||||
Query_cache_block *query_block;
|
||||
DBUG_ENTER("query_cache_end_of_result");
|
||||
Query_cache_tls *query_cache_tls= &thd->query_cache_tls;
|
||||
ulonglong limit_found_rows= thd->limit_found_rows;
|
||||
DBUG_ENTER("Query_cache::end_of_result");
|
||||
|
||||
/* See the comment on double-check locking usage above. */
|
||||
if (thd->net.query_cache_query == 0)
|
||||
if (query_cache_tls->first_query_block == NULL)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
/* Ensure that only complete results are cached. */
|
||||
DBUG_ASSERT(thd->main_da.is_eof());
|
||||
DBUG_ASSERT(thd->stmt_da->is_eof());
|
||||
|
||||
if (thd->killed)
|
||||
{
|
||||
query_cache_abort(&thd->net);
|
||||
query_cache_abort(&thd->query_cache_tls);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
#ifdef EMBEDDED_LIBRARY
|
||||
query_cache_insert(&thd->net, (char*)thd,
|
||||
emb_count_querycache_size(thd));
|
||||
insert(query_cache_tls, (char*)thd,
|
||||
emb_count_querycache_size(thd), 0);
|
||||
#endif
|
||||
|
||||
if (query_cache.try_lock())
|
||||
if (try_lock())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
query_block= ((Query_cache_block*) thd->net.query_cache_query);
|
||||
query_block= query_cache_tls->first_query_block;
|
||||
if (query_block)
|
||||
{
|
||||
/*
|
||||
@ -982,7 +1002,7 @@ void query_cache_end_of_result(THD *thd)
|
||||
block, the writer should be dropped.
|
||||
*/
|
||||
thd_proc_info(thd, "storing result in query cache");
|
||||
DUMP(&query_cache);
|
||||
DUMP(this);
|
||||
BLOCK_LOCK_WR(query_block);
|
||||
Query_cache_query *header= query_block->query();
|
||||
Query_cache_block *last_result_block;
|
||||
@ -999,8 +1019,8 @@ void query_cache_end_of_result(THD *thd)
|
||||
and removed from QC.
|
||||
*/
|
||||
DBUG_ASSERT(0);
|
||||
query_cache.free_query(query_block);
|
||||
query_cache.unlock();
|
||||
free_query(query_block);
|
||||
unlock();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
last_result_block= header->result()->prev;
|
||||
@ -1009,17 +1029,17 @@ void query_cache_end_of_result(THD *thd)
|
||||
if (last_result_block->length >= query_cache.min_allocation_unit + len)
|
||||
query_cache.split_block(last_result_block,len);
|
||||
|
||||
header->found_rows(current_thd->limit_found_rows);
|
||||
header->found_rows(limit_found_rows);
|
||||
header->result()->type= Query_cache_block::RESULT;
|
||||
|
||||
/* Drop the writer. */
|
||||
header->writer(0);
|
||||
thd->net.query_cache_query= 0;
|
||||
query_cache_tls->first_query_block= NULL;
|
||||
BLOCK_UNLOCK_WR(query_block);
|
||||
DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
|
||||
|
||||
DBUG_EXECUTE("check_querycache", check_integrity(1););
|
||||
}
|
||||
query_cache.unlock();
|
||||
|
||||
unlock();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@ -1059,7 +1079,7 @@ Query_cache::Query_cache(ulong query_cache_limit_arg,
|
||||
min_result_data_size(ALIGN_SIZE(min_result_data_size_arg)),
|
||||
def_query_hash_size(ALIGN_SIZE(def_query_hash_size_arg)),
|
||||
def_table_hash_size(ALIGN_SIZE(def_table_hash_size_arg)),
|
||||
initialized(0)
|
||||
initialized(0), m_query_cache_is_disabled(FALSE)
|
||||
{
|
||||
ulong min_needed= (ALIGN_SIZE(sizeof(Query_cache_block)) +
|
||||
ALIGN_SIZE(sizeof(Query_cache_block_table)) +
|
||||
@ -1097,7 +1117,7 @@ ulong Query_cache::resize(ulong query_cache_size_arg)
|
||||
Drop the writer; this will cancel any attempts to store
|
||||
the processed statement associated with this writer.
|
||||
*/
|
||||
query->writer()->query_cache_query= 0;
|
||||
query->writer()->first_query_block= NULL;
|
||||
query->writer(0);
|
||||
refused++;
|
||||
}
|
||||
@ -1159,7 +1179,9 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
|
||||
protocol (COM_EXECUTE) cannot be served to statements asking for results
|
||||
in the text protocol (COM_QUERY) and vice-versa.
|
||||
*/
|
||||
flags.result_in_binary_protocol= (unsigned int) thd->protocol->type();
|
||||
flags.protocol_type= (unsigned int) thd->protocol->type();
|
||||
/* PROTOCOL_LOCAL results are not cached. */
|
||||
DBUG_ASSERT(flags.protocol_type != (unsigned int) Protocol::PROTOCOL_LOCAL);
|
||||
flags.more_results_exists= test(thd->server_status &
|
||||
SERVER_MORE_RESULTS_EXISTS);
|
||||
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
|
||||
@ -1188,7 +1210,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
|
||||
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
(int)flags.client_long_flag,
|
||||
(int)flags.client_protocol_41,
|
||||
(int)flags.result_in_binary_protocol,
|
||||
(int)flags.protocol_type,
|
||||
(int)flags.more_results_exists,
|
||||
flags.pkt_nr,
|
||||
flags.character_set_client_num,
|
||||
@ -1260,7 +1282,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
|
||||
/* Check if another thread is processing the same query? */
|
||||
Query_cache_block *competitor = (Query_cache_block *)
|
||||
hash_search(&queries, (uchar*) thd->query(), tot_length);
|
||||
my_hash_search(&queries, (uchar*) thd->query(), tot_length);
|
||||
DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor));
|
||||
if (competitor == 0)
|
||||
{
|
||||
@ -1289,7 +1311,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
{
|
||||
refused++;
|
||||
DBUG_PRINT("warning", ("tables list including failed"));
|
||||
hash_delete(&queries, (uchar *) query_block);
|
||||
my_hash_delete(&queries, (uchar *) query_block);
|
||||
header->unlock_n_destroy();
|
||||
free_memory_block(query_block);
|
||||
unlock();
|
||||
@ -1298,8 +1320,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
double_linked_list_simple_include(query_block, &queries_blocks);
|
||||
inserts++;
|
||||
queries_in_cache++;
|
||||
net->query_cache_query= (uchar*) query_block;
|
||||
header->writer(net);
|
||||
thd->query_cache_tls.first_query_block= query_block;
|
||||
header->writer(&thd->query_cache_tls);
|
||||
header->tables_type(tables_type);
|
||||
|
||||
unlock();
|
||||
@ -1355,7 +1377,10 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
{
|
||||
ulonglong engine_data;
|
||||
Query_cache_query *query;
|
||||
Query_cache_block *first_result_block, *result_block;
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
Query_cache_block *first_result_block;
|
||||
#endif
|
||||
Query_cache_block *result_block;
|
||||
Query_cache_block_table *block_table, *block_table_end;
|
||||
ulong tot_length;
|
||||
Query_cache_query_flags flags;
|
||||
@ -1368,8 +1393,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
|
||||
See also a note on double-check locking usage above.
|
||||
*/
|
||||
if (thd->locked_tables || thd->variables.query_cache_type == 0 ||
|
||||
query_cache_size == 0)
|
||||
if (is_disabled() || thd->locked_tables ||
|
||||
thd->variables.query_cache_type == 0 || query_cache_size == 0)
|
||||
goto err;
|
||||
|
||||
if (!thd->lex->safe_to_cache_query)
|
||||
@ -1428,12 +1453,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
if (query_cache_size == 0)
|
||||
goto err_unlock;
|
||||
|
||||
/*
|
||||
Check that we haven't forgot to reset the query cache variables;
|
||||
make sure there are no attached query cache writer to this thread.
|
||||
*/
|
||||
DBUG_ASSERT(thd->net.query_cache_query == 0);
|
||||
|
||||
Query_cache_block *query_block;
|
||||
|
||||
tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE;
|
||||
@ -1455,7 +1474,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
|
||||
flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
|
||||
flags.client_protocol_41= test(thd->client_capabilities &
|
||||
CLIENT_PROTOCOL_41);
|
||||
flags.result_in_binary_protocol= (unsigned int)thd->protocol->type();
|
||||
flags.protocol_type= (unsigned int) thd->protocol->type();
|
||||
flags.more_results_exists= test(thd->server_status &
|
||||
SERVER_MORE_RESULTS_EXISTS);
|
||||
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
|
||||
@ -1482,7 +1501,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
|
||||
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
(int)flags.client_long_flag,
|
||||
(int)flags.client_protocol_41,
|
||||
(int)flags.result_in_binary_protocol,
|
||||
(int)flags.protocol_type,
|
||||
(int)flags.more_results_exists,
|
||||
flags.pkt_nr,
|
||||
flags.character_set_client_num,
|
||||
@ -1499,8 +1518,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
(int)flags.autocommit));
|
||||
memcpy((uchar *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
|
||||
(uchar*) &flags, QUERY_CACHE_FLAGS_SIZE);
|
||||
query_block = (Query_cache_block *) hash_search(&queries, (uchar*) sql,
|
||||
tot_length);
|
||||
query_block = (Query_cache_block *) my_hash_search(&queries, (uchar*) sql,
|
||||
tot_length);
|
||||
/* Quick abort on unlocked data */
|
||||
if (query_block == 0 ||
|
||||
query_block->query()->result() == 0 ||
|
||||
@ -1515,7 +1534,10 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
BLOCK_LOCK_RD(query_block);
|
||||
|
||||
query = query_block->query();
|
||||
result_block= first_result_block= query->result();
|
||||
result_block= query->result();
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
first_result_block= result_block;
|
||||
#endif
|
||||
|
||||
if (result_block == 0 || result_block->type != Query_cache_block::RESULT)
|
||||
{
|
||||
@ -1577,7 +1599,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
table_list.db = table->db();
|
||||
table_list.alias= table_list.table_name= table->table();
|
||||
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
||||
if (check_table_access(thd,SELECT_ACL,&table_list, 1, TRUE))
|
||||
if (check_table_access(thd,SELECT_ACL,&table_list, FALSE, 1,TRUE))
|
||||
{
|
||||
DBUG_PRINT("qcache",
|
||||
("probably no SELECT access to %s.%s => return to normal processing",
|
||||
@ -1658,7 +1680,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
|
||||
|
||||
thd->limit_found_rows = query->found_rows();
|
||||
thd->status_var.last_query_cost= 0.0;
|
||||
thd->main_da.disable_status();
|
||||
thd->stmt_da->disable_status();
|
||||
|
||||
BLOCK_UNLOCK_RD(query_block);
|
||||
MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows);
|
||||
@ -1680,6 +1702,8 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
|
||||
my_bool using_transactions)
|
||||
{
|
||||
DBUG_ENTER("Query_cache::invalidate (table list)");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
using_transactions= using_transactions &&
|
||||
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
|
||||
@ -1710,6 +1734,9 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
|
||||
void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
|
||||
{
|
||||
DBUG_ENTER("Query_cache::invalidate (changed table list)");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
THD *thd= current_thd;
|
||||
for (; tables_used; tables_used= tables_used->next)
|
||||
{
|
||||
@ -1735,8 +1762,11 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
|
||||
*/
|
||||
void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
DBUG_ENTER("Query_cache::invalidate_locked_for_write");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
THD *thd= current_thd;
|
||||
for (; tables_used; tables_used= tables_used->next_local)
|
||||
{
|
||||
thd_proc_info(thd, "invalidating query cache entries (table)");
|
||||
@ -1757,7 +1787,9 @@ void Query_cache::invalidate(THD *thd, TABLE *table,
|
||||
my_bool using_transactions)
|
||||
{
|
||||
DBUG_ENTER("Query_cache::invalidate (table)");
|
||||
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
using_transactions= using_transactions &&
|
||||
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
|
||||
if (using_transactions &&
|
||||
@ -1774,6 +1806,8 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length,
|
||||
my_bool using_transactions)
|
||||
{
|
||||
DBUG_ENTER("Query_cache::invalidate (key)");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
using_transactions= using_transactions &&
|
||||
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
|
||||
@ -1792,9 +1826,12 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length,
|
||||
|
||||
void Query_cache::invalidate(char *db)
|
||||
{
|
||||
bool restart= FALSE;
|
||||
|
||||
DBUG_ENTER("Query_cache::invalidate (db)");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
bool restart= FALSE;
|
||||
/*
|
||||
Lock the query cache and queue all invalidation attempts to avoid
|
||||
the risk of a race between invalidation, cache inserts and flushes.
|
||||
@ -1879,6 +1916,9 @@ void Query_cache::invalidate_by_MyISAM_filename(const char *filename)
|
||||
void Query_cache::flush()
|
||||
{
|
||||
DBUG_ENTER("Query_cache::flush");
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
DBUG_EXECUTE_IF("wait_in_query_cache_flush1",
|
||||
debug_wait_for_kill("wait_in_query_cache_flush1"););
|
||||
|
||||
@ -1910,6 +1950,9 @@ void Query_cache::pack(ulong join_limit, uint iteration_limit)
|
||||
{
|
||||
DBUG_ENTER("Query_cache::pack");
|
||||
|
||||
if (is_disabled())
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
/*
|
||||
If the entire qc is being invalidated we can bail out early
|
||||
instead of waiting for the lock.
|
||||
@ -1967,6 +2010,15 @@ void Query_cache::init()
|
||||
pthread_cond_init(&COND_cache_status_changed, NULL);
|
||||
m_cache_lock_status= Query_cache::UNLOCKED;
|
||||
initialized = 1;
|
||||
/*
|
||||
If we explicitly turn off query cache from the command line query cache will
|
||||
be disabled for the reminder of the server life time. This is because we
|
||||
want to avoid locking the QC specific mutex if query cache isn't going to
|
||||
be used.
|
||||
*/
|
||||
if (global_system_variables.query_cache_type == 0)
|
||||
query_cache.disable_query_cache();
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@ -2105,8 +2157,8 @@ ulong Query_cache::init_cache()
|
||||
|
||||
DUMP(this);
|
||||
|
||||
VOID(hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
|
||||
query_cache_query_get_key, 0, 0));
|
||||
VOID(my_hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
|
||||
query_cache_query_get_key, 0, 0));
|
||||
#ifndef FN_NO_CASE_SENCE
|
||||
/*
|
||||
If lower_case_table_names!=0 then db and table names are already
|
||||
@ -2116,8 +2168,8 @@ ulong Query_cache::init_cache()
|
||||
lower_case_table_names == 0 then we should distinguish my_table
|
||||
and MY_TABLE cases and so again can use binary collation.
|
||||
*/
|
||||
VOID(hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
|
||||
query_cache_table_get_key, 0, 0));
|
||||
VOID(my_hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
|
||||
query_cache_table_get_key, 0, 0));
|
||||
#else
|
||||
/*
|
||||
On windows, OS/2, MacOS X with HFS+ or any other case insensitive
|
||||
@ -2127,10 +2179,10 @@ ulong Query_cache::init_cache()
|
||||
file system) and so should use case insensitive collation for
|
||||
comparison.
|
||||
*/
|
||||
VOID(hash_init(&tables,
|
||||
lower_case_table_names ? &my_charset_bin :
|
||||
files_charset_info,
|
||||
def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0));
|
||||
VOID(my_hash_init(&tables,
|
||||
lower_case_table_names ? &my_charset_bin :
|
||||
files_charset_info,
|
||||
def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0));
|
||||
#endif
|
||||
|
||||
queries_in_cache = 0;
|
||||
@ -2180,8 +2232,8 @@ void Query_cache::free_cache()
|
||||
|
||||
my_free((uchar*) cache, MYF(MY_ALLOW_ZERO_PTR));
|
||||
make_disabled();
|
||||
hash_free(&queries);
|
||||
hash_free(&tables);
|
||||
my_hash_free(&queries);
|
||||
my_hash_free(&tables);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@ -2293,7 +2345,7 @@ void Query_cache::free_query_internal(Query_cache_block *query_block)
|
||||
if (query->writer() != 0)
|
||||
{
|
||||
/* Tell MySQL that this query should not be cached anymore */
|
||||
query->writer()->query_cache_query= 0;
|
||||
query->writer()->first_query_block= NULL;
|
||||
query->writer(0);
|
||||
}
|
||||
double_linked_list_exclude(query_block, &queries_blocks);
|
||||
@ -2356,7 +2408,7 @@ void Query_cache::free_query(Query_cache_block *query_block)
|
||||
(ulong) query_block,
|
||||
query_block->query()->length() ));
|
||||
|
||||
hash_delete(&queries,(uchar *) query_block);
|
||||
my_hash_delete(&queries,(uchar *) query_block);
|
||||
free_query_internal(query_block);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
@ -2685,7 +2737,7 @@ void
|
||||
Query_cache::invalidate_table_internal(THD *thd, uchar *key, uint32 key_length)
|
||||
{
|
||||
Query_cache_block *table_block=
|
||||
(Query_cache_block*)hash_search(&tables, key, key_length);
|
||||
(Query_cache_block*)my_hash_search(&tables, key, key_length);
|
||||
if (table_block)
|
||||
{
|
||||
Query_cache_block_table *list_root= table_block->table(0);
|
||||
@ -2883,7 +2935,7 @@ Query_cache::insert_table(uint key_len, char *key,
|
||||
THD *thd= current_thd;
|
||||
|
||||
Query_cache_block *table_block=
|
||||
(Query_cache_block *)hash_search(&tables, (uchar*) key, key_len);
|
||||
(Query_cache_block *) my_hash_search(&tables, (uchar*) key, key_len);
|
||||
|
||||
if (table_block &&
|
||||
table_block->table()->engine_data() != engine_data)
|
||||
@ -2999,7 +3051,7 @@ void Query_cache::unlink_table(Query_cache_block_table *node)
|
||||
Query_cache_block *table_block= neighbour->block();
|
||||
double_linked_list_exclude(table_block,
|
||||
&tables_blocks);
|
||||
hash_delete(&tables,(uchar *) table_block);
|
||||
my_hash_delete(&tables,(uchar *) table_block);
|
||||
free_memory_block(table_block);
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
@ -3506,7 +3558,8 @@ Query_cache::process_and_count_tables(THD *thd, TABLE_LIST *tables_used,
|
||||
*/
|
||||
|
||||
TABLE_COUNTER_TYPE
|
||||
Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
|
||||
Query_cache::is_cacheable(THD *thd, size_t query_len, const char *query,
|
||||
LEX *lex,
|
||||
TABLE_LIST *tables_used, uint8 *tables_type)
|
||||
{
|
||||
TABLE_COUNTER_TYPE table_count;
|
||||
@ -3683,7 +3736,7 @@ my_bool Query_cache::move_by_type(uchar **border,
|
||||
uchar *key;
|
||||
size_t key_length;
|
||||
key=query_cache_table_get_key((uchar*) block, &key_length, 0);
|
||||
hash_first(&tables, (uchar*) key, key_length, &record_idx);
|
||||
my_hash_first(&tables, (uchar*) key, key_length, &record_idx);
|
||||
|
||||
block->destroy();
|
||||
new_block->init(len);
|
||||
@ -3717,7 +3770,7 @@ my_bool Query_cache::move_by_type(uchar **border,
|
||||
/* Fix pointer to table name */
|
||||
new_block->table()->table(new_block->table()->db() + tablename_offset);
|
||||
/* Fix hash to point at moved block */
|
||||
hash_replace(&tables, &record_idx, (uchar*) new_block);
|
||||
my_hash_replace(&tables, &record_idx, (uchar*) new_block);
|
||||
|
||||
DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
|
||||
len, (ulong) new_block, (ulong) *border));
|
||||
@ -3743,7 +3796,7 @@ my_bool Query_cache::move_by_type(uchar **border,
|
||||
uchar *key;
|
||||
size_t key_length;
|
||||
key=query_cache_query_get_key((uchar*) block, &key_length, 0);
|
||||
hash_first(&queries, (uchar*) key, key_length, &record_idx);
|
||||
my_hash_first(&queries, (uchar*) key, key_length, &record_idx);
|
||||
// Move table of used tables
|
||||
memmove((char*) new_block->table(0), (char*) block->table(0),
|
||||
ALIGN_SIZE(n_tables*sizeof(Query_cache_block_table)));
|
||||
@ -3805,13 +3858,13 @@ my_bool Query_cache::move_by_type(uchar **border,
|
||||
If someone is writing to this block, inform the writer that the block
|
||||
has been moved.
|
||||
*/
|
||||
NET *net = new_block->query()->writer();
|
||||
if (net != 0)
|
||||
Query_cache_tls *query_cache_tls= new_block->query()->writer();
|
||||
if (query_cache_tls != NULL)
|
||||
{
|
||||
net->query_cache_query= (uchar*) new_block;
|
||||
query_cache_tls->first_query_block= new_block;
|
||||
}
|
||||
/* Fix hash to point at moved block */
|
||||
hash_replace(&queries, &record_idx, (uchar*) new_block);
|
||||
my_hash_replace(&queries, &record_idx, (uchar*) new_block);
|
||||
DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
|
||||
len, (ulong) new_block, (ulong) *border));
|
||||
break;
|
||||
@ -4222,13 +4275,13 @@ my_bool Query_cache::check_integrity(bool locked)
|
||||
if (!locked)
|
||||
lock_and_suspend();
|
||||
|
||||
if (hash_check(&queries))
|
||||
if (my_hash_check(&queries))
|
||||
{
|
||||
DBUG_PRINT("error", ("queries hash is damaged"));
|
||||
result = 1;
|
||||
}
|
||||
|
||||
if (hash_check(&tables))
|
||||
if (my_hash_check(&tables))
|
||||
{
|
||||
DBUG_PRINT("error", ("tables hash is damaged"));
|
||||
result = 1;
|
||||
@ -4395,7 +4448,7 @@ my_bool Query_cache::check_integrity(bool locked)
|
||||
(ulong) block, (uint) block->type));
|
||||
size_t length;
|
||||
uchar *key = query_cache_query_get_key((uchar*) block, &length, 0);
|
||||
uchar* val = hash_search(&queries, key, length);
|
||||
uchar* val = my_hash_search(&queries, key, length);
|
||||
if (((uchar*)block) != val)
|
||||
{
|
||||
DBUG_PRINT("error", ("block 0x%lx found in queries hash like 0x%lx",
|
||||
@ -4430,7 +4483,7 @@ my_bool Query_cache::check_integrity(bool locked)
|
||||
(ulong) block, (uint) block->type));
|
||||
size_t length;
|
||||
uchar *key = query_cache_table_get_key((uchar*) block, &length, 0);
|
||||
uchar* val = hash_search(&tables, key, length);
|
||||
uchar* val = my_hash_search(&tables, key, length);
|
||||
if (((uchar*)block) != val)
|
||||
{
|
||||
DBUG_PRINT("error", ("block 0x%lx found in tables hash like 0x%lx",
|
||||
@ -4670,3 +4723,4 @@ err2:
|
||||
#endif /* DBUG_OFF */
|
||||
|
||||
#endif /*HAVE_QUERY_CACHE*/
|
||||
|
||||
|
Reference in New Issue
Block a user