1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

Initial threadpool implementation for MariaDB 5.5

This commit is contained in:
Vladislav Vaintroub
2011-12-08 19:17:49 +01:00
parent 5e7b949e61
commit e91bbca5fb
24 changed files with 2553 additions and 190 deletions

View File

@ -22,3 +22,10 @@
# The below was used for really old versions of FreeBSD, roughly: before 5.1.9
# ADD_DEFINITIONS(-DHAVE_BROKEN_REALPATH)
# Use atomic builtins
IF(CMAKE_SIZEOF_VOID_P EQUAL 4 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "i386")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=i686")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=i686")
SET(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -march=i686")
ENDIF()

View File

@ -40,7 +40,11 @@ typedef struct st_alarm_info
} ALARM_INFO;
void thr_alarm_info(ALARM_INFO *info);
extern my_bool my_disable_thr_alarm;
#ifdef _WIN32
#define DONT_USE_THR_ALARM
#endif
#if defined(DONT_USE_THR_ALARM)
#define USE_ALARM_THREAD
@ -88,7 +92,7 @@ typedef struct st_alarm {
extern uint thr_client_alarm;
extern pthread_t alarm_thread;
extern my_bool my_disable_thr_alarm;
#define thr_alarm_init(A) (*(A))=0
#define thr_alarm_in_use(A) (*(A)!= 0)

View File

@ -168,6 +168,7 @@ void vio_end(void);
#define vio_should_retry(vio) (vio)->should_retry(vio)
#define vio_was_interrupted(vio) (vio)->was_interrupted(vio)
#define vio_close(vio) ((vio)->vioclose)(vio)
#define vio_shutdown(vio,how) ((vio)->shutdown)(vio,how)
#define vio_peer_addr(vio, buf, prt, buflen) (vio)->peer_addr(vio, buf, prt, buflen)
#define vio_timeout(vio, which, seconds) (vio)->timeout(vio, which, seconds)
#define vio_poll_read(vio, timeout) (vio)->poll_read(vio, timeout)
@ -219,6 +220,7 @@ struct st_vio
void (*timeout)(Vio*, unsigned int which, unsigned int timeout);
my_bool (*poll_read)(Vio *vio, uint timeout);
my_bool (*is_connected)(Vio*);
int (*shutdown)(Vio *, int);
my_bool (*has_data) (Vio*);
#ifdef HAVE_OPENSSL
void *ssl_arg;
@ -235,6 +237,7 @@ struct st_vio
char *shared_memory_pos;
#endif /* HAVE_SMEM */
#ifdef _WIN32
DWORD thread_id; /* Used to XP only in vio_shutdown */
OVERLAPPED pipe_overlapped;
DWORD read_timeout_ms;
DWORD write_timeout_ms;

View File

@ -3461,7 +3461,9 @@ sub mysql_install_db {
mtr_add_arg($args, "--loose-skip-ndbcluster");
mtr_add_arg($args, "--loose-skip-aria");
mtr_add_arg($args, "--disable-sync-frm");
mtr_add_arg($args, "--tmpdir=%s", "$opt_vardir/tmp/");
mtr_add_arg($args, "--tmpdir=.");
mtr_add_arg($args, "--max_allowed_packet=8M");
mtr_add_arg($args, "--net_buffer_length=16K");
mtr_add_arg($args, "--core-file");
if ( $opt_debug )

View File

@ -29,7 +29,7 @@
#
# Setup
#
--source include/not_threadpool.inc
--source include/not_embedded.inc
--source include/not_threadpool.inc

View File

@ -8,7 +8,7 @@
###############################################################################
# These tests cannot run with the embedded server
-- source include/not_embedded.inc
-- source include/one_thread_per_connection.inc
#-- source include/one_thread_per_connection.inc
# Save the initial number of concurrent sessions
--source include/count_sessions.inc

View File

@ -597,93 +597,6 @@ static void *alarm_handler(void *arg __attribute__((unused)))
return 0; /* Impossible */
}
#endif /* USE_ALARM_THREAD */
/*****************************************************************************
thr_alarm for win95
*****************************************************************************/
#else /* __WIN__ */
void thr_alarm_kill(my_thread_id thread_id)
{
/* Can't do this yet */
}
sig_handler process_alarm(int sig __attribute__((unused)))
{
/* Can't do this yet */
}
my_bool thr_alarm(thr_alarm_t *alrm, uint sec, ALARM *alarm)
{
(*alrm)= &alarm->alarmed;
if (alarm_aborted)
{
alarm->alarmed.crono=0;
return 1;
}
if (!(alarm->alarmed.crono=SetTimer((HWND) NULL,0, sec*1000,
(TIMERPROC) NULL)))
return 1;
return 0;
}
my_bool thr_got_alarm(thr_alarm_t *alrm_ptr)
{
thr_alarm_t alrm= *alrm_ptr;
MSG msg;
if (alrm->crono)
{
PeekMessage(&msg,NULL,WM_TIMER,WM_TIMER,PM_REMOVE) ;
if (msg.message == WM_TIMER || alarm_aborted)
{
KillTimer(NULL, alrm->crono);
alrm->crono = 0;
}
}
return !alrm->crono || alarm_aborted;
}
void thr_end_alarm(thr_alarm_t *alrm_ptr)
{
thr_alarm_t alrm= *alrm_ptr;
/* alrm may be zero if thr_alarm aborted with an error */
if (alrm && alrm->crono)
{
KillTimer(NULL, alrm->crono);
alrm->crono = 0;
}
}
void end_thr_alarm(my_bool free_structures)
{
DBUG_ENTER("end_thr_alarm");
alarm_aborted=1; /* No more alarms */
DBUG_VOID_RETURN;
}
void init_thr_alarm(uint max_alarm)
{
DBUG_ENTER("init_thr_alarm");
alarm_aborted=0; /* Yes, Gimmie alarms */
DBUG_VOID_RETURN;
}
void thr_alarm_info(ALARM_INFO *info)
{
bzero((char*) info, sizeof(*info));
}
void resize_thr_alarm(uint max_alarms)
{
}
#endif /* __WIN__ */
#endif
/****************************************************************************
@ -954,4 +867,5 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused)))
}
#endif /* !defined(DONT_USE_ALARM_THREAD) */
#endif /* WIN */
#endif /* MAIN */

View File

@ -31,7 +31,7 @@ ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES} PROPERTIES GENERATED 1)
ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER)
ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER -DHAVE_POOL_OF_THREADS)
IF(SSL_DEFINES)
ADD_DEFINITIONS(${SSL_DEFINES})
ENDIF()
@ -82,9 +82,16 @@ SET (SQL_SOURCE
opt_index_cond_pushdown.cc opt_subselect.cc
opt_table_elimination.cc sql_expression_cache.cc
gcalc_slicescan.cc gcalc_tools.cc
threadpool_common.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE})
${MYSYS_LIBWRAP_SOURCE}
)
IF(WIN32)
SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc)
ELSE()
SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc)
ENDIF()
MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
RECOMPILE_FOR_EMBEDDED)

View File

@ -73,6 +73,7 @@
#include <waiting_threads.h>
#include "debug_sync.h"
#include "sql_callback.h"
#include "threadpool.h"
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
#include "../storage/perfschema/pfs_server.h"
@ -5236,6 +5237,8 @@ default_service_handling(char **argv,
int mysqld_main(int argc, char **argv)
{
my_progname= argv[0];
/*
When several instances are running on the same machine, we
need to have an unique named hEventShudown through the
@ -7132,6 +7135,10 @@ SHOW_VAR status_vars[]= {
{"Tc_log_max_pages_used", (char*) &tc_log_max_pages_used, SHOW_LONG},
{"Tc_log_page_size", (char*) &tc_log_page_size, SHOW_LONG},
{"Tc_log_page_waits", (char*) &tc_log_page_waits, SHOW_LONG},
#endif
#ifndef EMBEDDED_LIBRARY
{"Threadpool_idle_threads", (char *) &tp_stats.num_waiting_threads, SHOW_INT},
{"Threadpool_threads", (char *) &tp_stats.num_worker_threads, SHOW_INT},
#endif
{"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_NOFLUSH},
{"Threads_connected", (char*) &connection_count, SHOW_INT},
@ -8018,7 +8025,9 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
else if (thread_handling == SCHEDULER_NO_THREADS)
one_thread_scheduler(thread_scheduler);
else
pool_of_threads_scheduler(thread_scheduler); /* purecov: tested */
pool_of_threads_scheduler(thread_scheduler, &max_connections,
&connection_count);
one_thread_per_connection_scheduler(extra_thread_scheduler,
&extra_max_connections,
&extra_connection_count);

View File

@ -842,7 +842,7 @@ my_real_read(NET *net, size_t *complen)
DBUG_PRINT("info",("vio_read returned %ld errno: %d",
(long) length, vio_errno(net->vio)));
#if !defined(__WIN__) || defined(MYSQL_SERVER)
#if !defined(__WIN__) && defined(MYSQL_SERVER)
/*
We got an error that there was no data on the socket. We now set up
an alarm to not 'read forever', change the socket to the blocking
@ -874,7 +874,7 @@ my_real_read(NET *net, size_t *complen)
continue;
}
}
#endif /* (!defined(__WIN__) || defined(MYSQL_SERVER) */
#endif /* (!defined(__WIN__) && defined(MYSQL_SERVER) */
if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) &&
interrupted)
{ /* Probably in MIT threads */

View File

@ -79,7 +79,7 @@ static void scheduler_wait_sync_end(void) {
one_thread_scheduler() or one_thread_per_connection_scheduler() in
mysqld.cc, so this init function will always be called.
*/
static void scheduler_init() {
void scheduler_init() {
thr_set_lock_wait_callback(scheduler_wait_lock_begin,
scheduler_wait_lock_end);
thr_set_sync_wait_callback(scheduler_wait_sync_begin,
@ -124,25 +124,6 @@ void one_thread_scheduler(scheduler_functions *func)
}
#ifdef HAVE_POOL_OF_THREADS
/*
thd_scheduler keeps the link between THD and events.
It's embedded in the THD class.
*/
thd_scheduler::thd_scheduler()
: m_psi(NULL), logged_in(FALSE), io_event(NULL), thread_attached(FALSE)
{
}
thd_scheduler::~thd_scheduler()
{
my_free(io_event);
}
#endif
/*
no pluggable schedulers in mariadb.

View File

@ -76,13 +76,11 @@ void one_thread_per_connection_scheduler(scheduler_functions *func,
ulong *arg_max_connections, uint *arg_connection_count);
void one_thread_scheduler(scheduler_functions *func);
#if defined(HAVE_LIBEVENT) && !defined(EMBEDDED_LIBRARY)
#define HAVE_POOL_OF_THREADS 1
struct event;
class thd_scheduler
/*
To be used for pool-of-threads (implemeneted differently on various OSs)
*/
struct thd_scheduler
{
public:
/*
@ -96,29 +94,33 @@ public:
differently.
*/
PSI_thread *m_psi;
bool logged_in;
struct event* io_event;
LIST list;
bool thread_attached; /* Indicates if THD is attached to the OS thread */
thd_scheduler();
~thd_scheduler();
bool init(THD* parent_thd);
bool thread_attach();
void thread_detach();
void *data; /* scheduler-specific data structure */
#ifndef DBUG_OFF
bool set_explain;
char dbug_explain[512];
#endif
};
void pool_of_threads_scheduler(scheduler_functions* func);
void *thd_get_scheduler_data(THD *thd);
void thd_set_scheduler_data(THD *thd, void *data);
PSI_thread* thd_get_psi(THD *thd);
void thd_set_psi(THD *thd, PSI_thread *psi);
/* Common thread pool routines, suitable for different implementations */
extern void threadpool_remove_connection(THD *thd);
extern int threadpool_process_request(THD *thd);
extern int threadpool_add_connection(THD *thd);
extern scheduler_functions *thread_scheduler;
#endif /* SCHEDULER_INCLUDED */
#if !defined(EMBEDDED_LIBRARY)
#define HAVE_POOL_OF_THREADS 1
void pool_of_threads_scheduler(scheduler_functions* func,
ulong *arg_max_connections,
uint *arg_connection_count);
#else
#define pool_of_threads_scheduler(A) \
one_thread_per_connection_scheduler(A, &max_connections, \
&connection_count)
class thd_scheduler
{};
#endif
#define pool_of_threads_scheduler(A,B,C) \
one_thread_per_connection_scheduler(A, B, C)
#endif

View File

@ -1536,35 +1536,10 @@ void THD::awake(killed_state state_to_set)
if (state_to_set >= KILL_CONNECTION || state_to_set == NOT_KILLED)
{
#ifdef SIGNAL_WITH_VIO_CLOSE
if (this != current_thd)
if (this != current_thd)
{
/*
Before sending a signal, let's close the socket of the thread
that is being killed ("this", which is not the current thread).
This is to make sure it does not block if the signal is lost.
This needs to be done only on platforms where signals are not
a reliable interruption mechanism.
Note that the downside of this mechanism is that we could close
the connection while "this" target thread is in the middle of
sending a result to the application, thus violating the client-
server protocol.
On the other hand, without closing the socket we have a race
condition. If "this" target thread passes the check of
thd->killed, and then the current thread runs through
THD::awake(), sets the 'killed' flag and completes the
signaling, and then the target thread runs into read(), it will
block on the socket. As a result of the discussions around
Bug#37780, it has been decided that we accept the race
condition. A second KILL awakes the target from read().
If we are killing ourselves, we know that we are not blocked.
We also know that we will check thd->killed before we go for
reading the next statement.
*/
close_active_vio();
if(active_vio)
vio_shutdown(active_vio, SHUT_RDWR);
}
#endif
@ -1668,7 +1643,7 @@ void THD::disconnect()
/* Disconnect even if a active vio is not associated. */
if (net.vio != vio)
vio_close(net.vio);
vio_close(net.vio);
mysql_mutex_unlock(&LOCK_thd_data);
}
@ -1740,6 +1715,10 @@ bool THD::store_globals()
mysys_var->stack_ends_here= thread_stack + // for consistency, see libevent_thread_proc
STACK_DIRECTION * (long)my_thread_stack_size;
#ifdef _WIN32
if (net.vio)
net.vio->thread_id= real_id; /* Required to support IO cancelation on XP */
#endif
/*
We have to call thr_lock_info_init() again here as THD may have been
created in another thread

View File

@ -2339,6 +2339,10 @@ public:
{
mysql_mutex_lock(&LOCK_thd_data);
active_vio = vio;
#ifdef _WIN32
/* Required to support cancelation on XP */
active_vio->thread_id = pthread_self();
#endif
mysql_mutex_unlock(&LOCK_thd_data);
}
inline void clear_active_vio()

View File

@ -890,6 +890,7 @@ static int check_connection(THD *thd)
DBUG_PRINT("info",
("New connection received on %s", vio_description(net->vio)));
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->set_active_vio(net->vio);
#endif
@ -1175,7 +1176,7 @@ void do_handle_one_connection(THD *thd_arg)
/* We need to set this because of time_out_user_resource_limits */
thd->start_utime= thd->thr_create_utime;
if (MYSQL_CALLBACK_ELSE(thread_scheduler, init_new_connection_thread, (), 0))
if (MYSQL_CALLBACK_ELSE(thd->scheduler, init_new_connection_thread, (), 0))
{
close_connection(thd, ER_OUT_OF_RESOURCES);
statistic_increment(aborted_connects,&LOCK_status);

View File

@ -678,6 +678,7 @@ void cleanup_items(Item *item)
@retval
1 request of thread shutdown (see dispatch_command() description)
*/
int skip_net_wait_timeout = 0;
bool do_command(THD *thd)
{
@ -700,7 +701,9 @@ bool do_command(THD *thd)
the client, the connection is closed or "net_wait_timeout"
number of seconds has passed.
*/
my_net_set_read_timeout(net, thd->variables.net_wait_timeout);
if(!skip_net_wait_timeout)
my_net_set_read_timeout(net, thd->variables.net_wait_timeout);
/*
XXX: this code is here only to clear possible errors of init_connect.

View File

@ -50,6 +50,7 @@
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
#include "../storage/perfschema/pfs_server.h"
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
#include "threadpool.h"
/*
The rule for this file: everything should be 'static'. When a sys_var
@ -1804,7 +1805,13 @@ static Sys_var_enum Sys_thread_handling(
", pool-of-threads"
#endif
, READ_ONLY GLOBAL_VAR(thread_handling), CMD_LINE(REQUIRED_ARG),
thread_handling_names, DEFAULT(0));
thread_handling_names,
#ifdef HAVE_POOL_OF_THREADS
DEFAULT(2)
#else
DEFAULT(0)
#endif
);
#ifdef HAVE_QUERY_CACHE
static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type)
@ -2173,14 +2180,67 @@ static Sys_var_ulong Sys_thread_cache_size(
GLOBAL_VAR(thread_cache_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 16384), DEFAULT(0), BLOCK_SIZE(1));
#ifdef HAVE_POOL_OF_THREADS
static Sys_var_ulong Sys_thread_pool_size(
"thread_pool_size",
"How many threads we should create to handle query requests in "
"case of 'thread_handling=pool-of-threads'",
GLOBAL_VAR(thread_pool_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 16384), DEFAULT(20), BLOCK_SIZE(0));
#ifndef HAVE_POOL_OF_THREADS
static bool fix_tp_max_threads(sys_var *, THD *, enum_var_type)
{
#ifdef _WIN32
tp_set_max_threads(threadpool_max_threads);
#endif
return false;
}
#ifdef _WIN32
static bool fix_tp_min_threads(sys_var *, THD *, enum_var_type)
{
tp_set_min_threads(threadpool_min_threads);
return false;
}
#endif
#ifdef _WIN32
static Sys_var_uint Sys_threadpool_min_threads(
"thread_pool_min_threads",
"Minimuim number of threads in the thread pool.",
GLOBAL_VAR(threadpool_min_threads), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 256), DEFAULT(1), BLOCK_SIZE(1),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_tp_min_threads)
);
#else
static Sys_var_uint Sys_threadpool_idle_thread_timeout(
"thread_pool_idle_timeout",
"Timeout in seconds for an idle thread in the thread pool."
"Worker thread will be shut down after timeout",
GLOBAL_VAR(threadpool_idle_timeout), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, UINT_MAX/100), DEFAULT(60000), BLOCK_SIZE(1)
);
static Sys_var_uint Sys_threadpool_size(
"thread_pool_size",
"Number of concurrently executing threads in the pool. "
"Leaving value default (0) sets it to the number of processors.",
GLOBAL_VAR(threadpool_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 128), DEFAULT(0), BLOCK_SIZE(1)
);
static Sys_var_uint Sys_threadpool_stall_limit(
"thread_pool_stall_limit",
"Maximum query execution time before in milliseconds,"
"before an executing non-yielding thread is considered stalled."
"If a worker thread is stalled, additional worker thread "
"may be created to handle remaining clients.",
GLOBAL_VAR(threadpool_stall_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(60, UINT_MAX), DEFAULT(500), BLOCK_SIZE(1)
);
#endif /*! WIN32 */
static Sys_var_uint Sys_threadpool_max_threads(
"thread_pool_max_threads",
"Maximum allowed number of worker threads in the thread pool",
GLOBAL_VAR(threadpool_max_threads), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, UINT_MAX), DEFAULT(3000), BLOCK_SIZE(1),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_tp_max_threads)
);
#endif /* !HAVE_POOL_OF_THREADS */
/**
Can't change the 'next' tx_isolation if we are already in a

47
sql/threadpool.h Normal file
View File

@ -0,0 +1,47 @@
/* Threadpool parameters */
#ifdef _WIN32
extern uint threadpool_min_threads; /* Minimum threads in pool */
#else
extern uint threadpool_idle_timeout; /* Shutdown idle worker threads after this timeout */
extern uint threadpool_size; /* Number of parallel executing threads */
extern uint threadpool_stall_limit; /* time interval in 10 ms units for stall checks*/
#endif
extern uint threadpool_max_threads; /* Maximum threads in pool */
/*
Threadpool statistics
*/
struct TP_STATISTICS
{
/* Current number of worker thread. */
volatile int num_worker_threads;
/* Current number of idle threads. */
volatile int num_waiting_threads;
/* Number of login requests are queued but not yet processed. */
volatile int pending_login_requests;
/* Number of threads that are starting. */
volatile int pending_thread_starts;
/* Number of threads that are being shut down */
volatile int pending_thread_shutdowns;
/* Time (in milliseconds) since pool is blocked (num_waiting_threads is 0) */
ulonglong pool_block_duration;
/* Maximum duration of the pending login, im milliseconds. */
ulonglong pending_login_duration;
/* Time since last thread was created */
ulonglong time_since_last_thread_creation;
/* Number of requests processed since pool monitor run last time. */
volatile int requests_dequeued;
volatile int requests_completed;
};
extern TP_STATISTICS tp_stats;
/* Functions to set threadpool parameters */
extern void tp_set_min_threads(uint val);
extern void tp_set_max_threads(uint val);
/* Activate threadpool scheduler */
extern void tp_scheduler(void);

246
sql/threadpool_common.cc Normal file
View File

@ -0,0 +1,246 @@
#include <my_global.h>
#include <violite.h>
#include <sql_priv.h>
#include <sql_class.h>
#include <my_pthread.h>
#include <scheduler.h>
#include <sql_connect.h>
#include <sql_audit.h>
#include <debug_sync.h>
extern bool login_connection(THD *thd);
extern bool do_command(THD *thd);
extern void prepare_new_connection_state(THD* thd);
extern void end_connection(THD *thd);
extern void thd_cleanup(THD *thd);
extern void delete_thd(THD *thd);
/* Threadpool parameters */
#ifdef _WIN32
uint threadpool_min_threads;
#else
uint threadpool_idle_timeout;
uint threadpool_size;
uint threadpool_stall_limit;
#endif
uint threadpool_max_threads;
/*
Attach/associate the connection with the OS thread, for command processing.
*/
static inline bool thread_attach(THD* thd, char *stack_start, PSI_thread **save_psi_thread)
{
DBUG_ENTER("thread_attach");
if (PSI_server)
{
*save_psi_thread= PSI_server->get_thread();
PSI_server->set_thread(thd->event_scheduler.m_psi);
}
else
*save_psi_thread= NULL;
/*
We need to know the start of the stack so that we could check for
stack overruns.
*/
thd->thread_stack= stack_start;
/* Calls close_connection() on failure */
if (setup_connection_thread_globals(thd))
{
DBUG_RETURN(TRUE);
}
/* clear errors from processing the previous THD */
my_errno= 0;
thd->mysys_var->abort= 0;
#ifndef DBUG_OFF
if (thd->event_scheduler.set_explain)
DBUG_SET(thd->event_scheduler.dbug_explain);
#endif
DBUG_RETURN(FALSE);
}
/*
Detach/disassociate the connection with the OS thread.
*/
static inline void thread_detach(THD* thd, PSI_thread *restore_psi_thread)
{
DBUG_ENTER("thread_detach");
thd->mysys_var = NULL;
#ifndef DBUG_OFF
/*
If during the session @@session.dbug was assigned, the
dbug options/state has been pushed. Check if this is the
case, to be able to restore the state when we attach this
logical connection to a physical thread.
*/
if (_db_is_pushed_())
{
thd->event_scheduler.set_explain= TRUE;
if (DBUG_EXPLAIN(thd->event_scheduler.dbug_explain, sizeof(thd->event_scheduler.dbug_explain)))
sql_print_error("thd_scheduler: DBUG_EXPLAIN buffer is too small");
}
/* DBUG_POP() is a no-op in case there is no session state */
DBUG_POP();
#endif
if (PSI_server)
PSI_server->set_thread(restore_psi_thread);
pthread_setspecific(THR_THD, NULL);
DBUG_VOID_RETURN;
}
int threadpool_add_connection(THD *thd)
{
int retval=1;
PSI_thread *psi_thread;
#ifndef DBUG_OFF
thd->event_scheduler.set_explain = 0;
#endif
thread_attach(thd, (char *)&thd, &psi_thread);
ulonglong now= microsecond_interval_timer();
thd->prior_thr_create_utime= now;
thd->start_utime= now;
thd->thr_create_utime= now;
if (PSI_server)
{
thd->event_scheduler.m_psi =
PSI_server->new_thread(key_thread_one_connection, thd, thd->thread_id);
PSI_server->set_thread(thd->event_scheduler.m_psi);
}
if (setup_connection_thread_globals(thd) == 0)
{
if (login_connection(thd) == 0)
{
prepare_new_connection_state(thd);
retval = thd_is_connection_alive(thd)?0:-1;
thd->net.reading_or_writing= 1;
}
}
thread_detach(thd, psi_thread);
return retval;
}
void threadpool_remove_connection(THD *thd)
{
PSI_thread *save_psi_thread;
thread_attach(thd, (char *)&thd, &save_psi_thread);
thd->killed= KILL_CONNECTION;
thd->net.reading_or_writing= 0;
end_connection(thd);
close_connection(thd, 0);
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->event_scheduler.data= NULL;
mysql_mutex_unlock(&thd->LOCK_thd_data);
unlink_thd(thd);
mysql_mutex_unlock(&LOCK_thread_count);
mysql_cond_broadcast(&COND_thread_count);
DBUG_POP();
if (PSI_server)
PSI_server->delete_current_thread();
pthread_setspecific(THR_THD, NULL);
}
int threadpool_process_request(THD *thd)
{
int retval= 0;
PSI_thread *psi_thread;
thread_attach(thd, (char *)&thd, &psi_thread);
if (thd->killed == KILL_CONNECTION)
{
/*
kill flag can be set have been killed by
timeout handler or by a KILL command
*/
thread_detach(thd, psi_thread);
return 1;
}
for(;;)
{
Vio *vio;
thd->net.reading_or_writing= 0;
mysql_audit_release(thd);
if ((retval= do_command(thd)) != 0)
break ;
if (!thd_is_connection_alive(thd))
{
retval= 1;
break;
}
vio= thd->net.vio;
if (!vio->has_data(vio))
{
/*
More info on this debug sync is in sql_parse.cc
*/
DEBUG_SYNC(thd, "before_do_command_net_read");
break;
}
}
thread_detach(thd, psi_thread);
if (!retval)
thd->net.reading_or_writing= 1;
return retval;
}
/*
Scheduler struct, individual functions are implemented
in threadpool_unix.cc or threadpool_win.cc
*/
extern bool tp_init();
extern void tp_add_connection(THD*);
extern void tp_wait_begin(THD *, int);
extern void tp_wait_end(THD*);
extern void tp_post_kill_notification(THD *thd);
extern void tp_end(void);
static scheduler_functions tp_scheduler_functions=
{
0, // max_threads
NULL,
NULL,
tp_init, // init
NULL, // init_new_connection_thread
tp_add_connection, // add_connection
tp_wait_begin, // thd_wait_begin
tp_wait_end, // thd_wait_end
tp_post_kill_notification, // post_kill_notification
NULL, // end_thread
tp_end // end
};
extern void scheduler_init();
void pool_of_threads_scheduler(struct scheduler_functions *func,
ulong *arg_max_connections,
uint *arg_connection_count)
{
*func = tp_scheduler_functions;
func->max_threads= *arg_max_connections + 1;
func->max_connections= arg_max_connections;
func->connection_count= arg_connection_count;
scheduler_init();
}

1238
sql/threadpool_unix.cc Normal file

File diff suppressed because it is too large Load Diff

756
sql/threadpool_win.cc Normal file
View File

@ -0,0 +1,756 @@
#ifdef _WIN32_WINNT
#undef _WIN32_WINNT
#endif
#define _WIN32_WINNT 0x0601
#include <my_global.h>
#include <violite.h>
#include <sql_priv.h>
#include <sql_class.h>
#include <my_pthread.h>
#include <scheduler.h>
#include <sql_connect.h>
#include <mysqld.h>
#include <debug_sync.h>
#include <threadpool.h>
#include <windows.h>
TP_STATISTICS tp_stats;
#define WEAK_SYMBOL(return_type, function, ...) \
typedef return_type (WINAPI *pFN_##function)(__VA_ARGS__); \
static pFN_##function my_##function = (pFN_##function) \
(GetProcAddress(GetModuleHandle("kernel32"),#function))
WEAK_SYMBOL(VOID, CancelThreadpoolIo, PTP_IO);
#define CancelThreadpoolIo my_CancelThreadpoolIo
WEAK_SYMBOL(VOID, CloseThreadpool, PTP_POOL);
#define CloseThreadpool my_CloseThreadpool
WEAK_SYMBOL(VOID, CloseThreadpoolIo, PTP_IO);
#define CloseThreadpoolIo my_CloseThreadpoolIo
WEAK_SYMBOL(VOID, CloseThreadpoolTimer,PTP_TIMER);
#define CloseThreadpoolTimer my_CloseThreadpoolTimer
WEAK_SYMBOL(VOID, CloseThreadpoolWait,PTP_WAIT);
#define CloseThreadpoolWait my_CloseThreadpoolWait
WEAK_SYMBOL(PTP_POOL, CreateThreadpool,PVOID);
#define CreateThreadpool my_CreateThreadpool
WEAK_SYMBOL(PTP_IO, CreateThreadpoolIo, HANDLE, PTP_WIN32_IO_CALLBACK, PVOID ,
PTP_CALLBACK_ENVIRON);
#define CreateThreadpoolIo my_CreateThreadpoolIo
WEAK_SYMBOL(PTP_TIMER, CreateThreadpoolTimer, PTP_TIMER_CALLBACK ,
PVOID pv, PTP_CALLBACK_ENVIRON pcbe);
#define CreateThreadpoolTimer my_CreateThreadpoolTimer
WEAK_SYMBOL(PTP_WAIT, CreateThreadpoolWait, PTP_WAIT_CALLBACK, PVOID,
PTP_CALLBACK_ENVIRON);
#define CreateThreadpoolWait my_CreateThreadpoolWait
WEAK_SYMBOL(VOID, DisassociateCurrentThreadFromCallback, PTP_CALLBACK_INSTANCE);
#define DisassociateCurrentThreadFromCallback my_DisassociateCurrentThreadFromCallback
WEAK_SYMBOL(DWORD, FlsAlloc, PFLS_CALLBACK_FUNCTION);
#define FlsAlloc my_FlsAlloc
WEAK_SYMBOL(PVOID, FlsGetValue, DWORD);
#define FlsGetValue my_FlsGetValue
WEAK_SYMBOL(BOOL, FlsSetValue, DWORD, PVOID);
#define FlsSetValue my_FlsSetValue
WEAK_SYMBOL(VOID, SetThreadpoolThreadMaximum, PTP_POOL, DWORD);
#define SetThreadpoolThreadMaximum my_SetThreadpoolThreadMaximum
WEAK_SYMBOL(BOOL, SetThreadpoolThreadMinimum, PTP_POOL, DWORD);
#define SetThreadpoolThreadMinimum my_SetThreadpoolThreadMinimum
WEAK_SYMBOL(VOID, SetThreadpoolTimer, PTP_TIMER, PFILETIME,DWORD,DWORD);
#define SetThreadpoolTimer my_SetThreadpoolTimer
WEAK_SYMBOL(VOID, SetThreadpoolWait, PTP_WAIT,HANDLE,PFILETIME);
#define SetThreadpoolWait my_SetThreadpoolWait
WEAK_SYMBOL(VOID, StartThreadpoolIo, PTP_IO);
#define StartThreadpoolIo my_StartThreadpoolIo
WEAK_SYMBOL(VOID, WaitForThreadpoolIoCallbacks,PTP_IO, BOOL);
#define WaitForThreadpoolIoCallbacks my_WaitForThreadpoolIoCallbacks
WEAK_SYMBOL(VOID, WaitForThreadpoolTimerCallbacks, PTP_TIMER, BOOL);
#define WaitForThreadpoolTimerCallbacks my_WaitForThreadpoolTimerCallbacks
WEAK_SYMBOL(VOID, WaitForThreadpoolWaitCallbacks, PTP_WAIT, BOOL);
#define WaitForThreadpoolWaitCallbacks my_WaitForThreadpoolWaitCallbacks
WEAK_SYMBOL(BOOL, SetFileCompletionNotificationModes, HANDLE, UCHAR);
#define SetFileCompletionNotificationModes my_SetFileCompletionNotificationModes
WEAK_SYMBOL(BOOL, TrySubmitThreadpoolCallback, PTP_SIMPLE_CALLBACK pfns,
PVOID pv,PTP_CALLBACK_ENVIRON pcbe);
#define TrySubmitThreadpoolCallback my_TrySubmitThreadpoolCallback
WEAK_SYMBOL(PTP_WORK, CreateThreadpoolWork, PTP_WORK_CALLBACK pfnwk, PVOID pv,
PTP_CALLBACK_ENVIRON pcbe);
#define CreateThreadpoolWork my_CreateThreadpoolWork
WEAK_SYMBOL(VOID, SubmitThreadpoolWork,PTP_WORK pwk);
#define SubmitThreadpoolWork my_SubmitThreadpoolWork
WEAK_SYMBOL(VOID, CloseThreadpoolWork, PTP_WORK pwk);
#define CloseThreadpoolWork my_CloseThreadpoolWork
#if _MSC_VER >= 1600
/* Stack size manipulation available only on Win7+ /declarations in VS10 */
WEAK_SYMBOL(BOOL, SetThreadpoolStackInformation, PTP_POOL,
PTP_POOL_STACK_INFORMATION);
#define SetThreadpoolStackInformation my_SetThreadpoolStackInformation
#endif
#if _MSC_VER < 1600
#define SetThreadpoolCallbackPriority(env,prio)
typedef enum _TP_CALLBACK_PRIORITY {
TP_CALLBACK_PRIORITY_HIGH,
TP_CALLBACK_PRIORITY_NORMAL,
TP_CALLBACK_PRIORITY_LOW,
TP_CALLBACK_PRIORITY_INVALID
} TP_CALLBACK_PRIORITY;
#endif
/* Log a warning */
static void tp_log_warning(const char *msg, const char *fct)
{
sql_print_warning("Threadpool: %s. %s failed (last error %d)",msg, fct,
GetLastError());
}
PTP_POOL pool;
DWORD fls;
extern int skip_net_wait_timeout;
static bool skip_completion_port_on_success = false;
/*
Threadpool callbacks.
io_completion_callback - handle client request
timer_callback - handle wait timeout (kill connection)
shm_read_callback, shm_close_callback - shared memory stuff
login_callback - user login (submitted as threadpool work)
*/
static void CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PTP_TIMER timer);
static void CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PVOID overlapped, ULONG io_result, ULONG_PTR nbytes, PTP_IO io);
static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
PVOID Context, PTP_WAIT wait,TP_WAIT_RESULT wait_result);
static void CALLBACK shm_close_callback(PTP_CALLBACK_INSTANCE instance,
PVOID Context, PTP_WAIT wait,TP_WAIT_RESULT wait_result);
#define CONNECTION_SIGNATURE 0xAFFEAFFE
static void check_thread_init();
/* Get current time as Windows time */
static ulonglong now()
{
ulonglong current_time;
GetSystemTimeAsFileTime((PFILETIME)&current_time);
return current_time;
}
/*
Connection structure, encapsulates THD + structures for asynchronous
IO and pool.
*/
struct connection_t
{
THD *thd;
bool logged_in;
HANDLE handle;
OVERLAPPED overlapped;
/* absolute time for wait timeout (as Windows time) */
volatile ulonglong timeout;
PTP_CLEANUP_GROUP cleanup_group;
TP_CALLBACK_ENVIRON callback_environ;
PTP_IO io;
PTP_TIMER timer;
PTP_WAIT shm_read;
};
void init_connection(connection_t *connection)
{
connection->logged_in = false;
connection->handle= 0;
connection->io= 0;
connection->shm_read= 0;
connection->timer= 0;
connection->logged_in = false;
connection->timeout= ULONGLONG_MAX;
memset(&connection->overlapped, 0, sizeof(OVERLAPPED));
InitializeThreadpoolEnvironment(&connection->callback_environ);
SetThreadpoolCallbackPool(&connection->callback_environ, pool);
connection->thd = 0;
}
int init_io(connection_t *connection, THD *thd)
{
connection->thd= thd;
Vio *vio = thd->net.vio;
switch(vio->type)
{
case VIO_TYPE_SSL:
case VIO_TYPE_TCPIP:
connection->handle= (HANDLE)vio->sd;
break;
case VIO_TYPE_NAMEDPIPE:
connection->handle= (HANDLE)vio->hPipe;
break;
case VIO_TYPE_SHARED_MEMORY:
connection->shm_read= CreateThreadpoolWait(shm_read_callback, connection,
&connection->callback_environ);
if (!connection->shm_read)
{
tp_log_warning("Allocation failed", "CreateThreadpoolWait");
return -1;
}
break;
default:
abort();
}
if (connection->handle)
{
/* Performance tweaks (s. MSDN documentation)*/
UCHAR flags = FILE_SKIP_SET_EVENT_ON_HANDLE;
if (skip_completion_port_on_success)
{
flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
}
(void)SetFileCompletionNotificationModes(connection->handle, flags);
/* Assign io completion callback */
connection->io = CreateThreadpoolIo(connection->handle,
io_completion_callback, connection, &connection->callback_environ);
if(!connection->io)
{
tp_log_warning("Allocation failed", "CreateThreadpoolWait");
return -1;
}
}
connection->timer = CreateThreadpoolTimer(timer_callback, connection,
&connection->callback_environ);
if (!connection->timer)
{
tp_log_warning("Allocation failed", "CreateThreadpoolWait");
return -1;
}
return 0;
}
/*
Start asynchronous read
*/
int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
{
/* Start async read */
DWORD num_bytes = 0;
static char c;
WSABUF buf;
buf.buf= &c;
buf.len= 0;
DWORD flags=0;
DWORD last_error= 0;
int retval;
Vio *vio= connection->thd->net.vio;
if (vio->type == VIO_TYPE_SHARED_MEMORY)
{
SetThreadpoolWait(connection->shm_read, vio->event_server_wrote, NULL);
return 0;
}
if (vio->type == VIO_CLOSED)
{
return -1;
}
DBUG_ASSERT(vio->type == VIO_TYPE_TCPIP ||
vio->type == VIO_TYPE_SSL ||
vio->type == VIO_TYPE_NAMEDPIPE);
OVERLAPPED *overlapped= &connection->overlapped;
PTP_IO io= connection->io;
StartThreadpoolIo(io);
if (vio->type == VIO_TYPE_TCPIP || vio->type == VIO_TYPE_SSL)
{
/* Start async io (sockets). */
if (WSARecv(vio->sd , &buf, 1, &num_bytes, &flags,
overlapped, NULL) == 0)
{
retval= last_error= 0;
}
else
{
retval= -1;
last_error= WSAGetLastError();
}
}
else
{
/* Start async io (named pipe) */
if (ReadFile(vio->hPipe, &c, 0, &num_bytes ,overlapped))
{
retval= last_error= 0;
}
else
{
retval= -1;
last_error= GetLastError();
}
}
if (retval == 0 || last_error == ERROR_MORE_DATA)
{
/*
IO successfully finished (synchronously).
If skip_completion_port_on_success is set, we need to handle it right
here, because completion callback would not be executed by the pool.
*/
if(skip_completion_port_on_success)
{
CancelThreadpoolIo(io);
io_completion_callback(instance, connection, overlapped, last_error,
num_bytes, io);
}
return 0;
}
if(last_error == ERROR_IO_PENDING)
{
return 0;
}
/* Some error occured */
CancelThreadpoolIo(io);
return -1;
}
int login(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
{
if (threadpool_add_connection(connection->thd) == 0
&& init_io(connection, connection->thd) == 0
&& start_io(connection, instance) == 0)
{
return 0;
}
return -1;
}
/*
Recalculate wait timeout, maybe reset timer.
*/
void set_wait_timeout(connection_t *connection, ulonglong old_timeout)
{
ulonglong new_timeout = now() +
10000000LL*connection->thd->variables.net_wait_timeout;
if (new_timeout < old_timeout)
{
SetThreadpoolTimer(connection->timer, (PFILETIME) &new_timeout, 0, 1000);
}
connection->timeout = new_timeout;
}
/*
Terminates (idle) connection by closing the socket.
This will activate io_completion_callback() in a different thread
*/
void post_kill_notification(connection_t *connection)
{
check_thread_init();
THD *thd=connection->thd;
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->killed = KILL_CONNECTION;
vio_shutdown(thd->net.vio, SHUT_RDWR);
thd->mysys_var= NULL;
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
/* Connection destructor */
void destroy_connection(connection_t *connection)
{
if (connection->thd)
{
threadpool_remove_connection(connection->thd);
}
if (connection->io)
{
WaitForThreadpoolIoCallbacks(connection->io, TRUE);
CloseThreadpoolIo(connection->io);
}
if(connection->shm_read)
{
WaitForThreadpoolWaitCallbacks(connection->shm_read, TRUE);
CloseThreadpoolWait(connection->shm_read);
}
if(connection->timer)
{
SetThreadpoolTimer(connection->timer, 0, 0, 0);
WaitForThreadpoolTimerCallbacks(connection->timer, TRUE);
CloseThreadpoolTimer(connection->timer);
}
DestroyThreadpoolEnvironment(&connection->callback_environ);
}
/*
This function should be called first whenever a callback is invoked in the
threadpool, does my_thread_init() if not yet done
*/
extern ulong thread_created;
static void check_thread_init()
{
if (FlsGetValue(fls) == NULL)
{
FlsSetValue(fls, (void *)1);
my_thread_init();
thread_created++;
InterlockedIncrement((volatile long *)&tp_stats.num_worker_threads);
}
}
/*
Take care of proper cleanup when threadpool threads exit.
We do not control how threads are created, thus it is our responsibility to
check that my_thread_init() is called on thread initialization and
my_thread_end() on thread destruction. On Windows, FlsAlloc() provides the
thread destruction callbacks.
*/
static VOID WINAPI thread_destructor(void *data)
{
if(data)
{
if (InterlockedDecrement((volatile long *)&tp_stats.num_worker_threads) >= 0)
{
/*
The above check for number of thread >= 0 is due to shutdown code (
see tp_end()) where we forcefully set num_worker_threads to 0, even
if not all threads have shut down yet to the point they would ran Fls
destructors, even after CloseThreadpool(). See also comment in tp_end().
*/
mysql_mutex_lock(&LOCK_thread_count);
my_thread_end();
mysql_mutex_unlock(&LOCK_thread_count);
}
}
}
/* Scheduler callback : init */
bool tp_init(void)
{
fls= FlsAlloc(thread_destructor);
pool= CreateThreadpool(NULL);
if(!pool)
{
sql_print_error("Can't create threadpool. "
"CreateThreadpool() failed with %d. Likely cause is memory pressure",
GetLastError());
exit(1);
}
if (threadpool_max_threads)
{
SetThreadpoolThreadMaximum(pool,threadpool_max_threads);
}
if (threadpool_min_threads)
{
if (!SetThreadpoolThreadMinimum(pool, threadpool_min_threads))
{
tp_log_warning( "Can't set threadpool minimum threads",
"SetThreadpoolThreadMinimum");
}
}
/*
Control stack size (OS must be Win7 or later, plus corresponding SDK)
*/
#if _MSC_VER >=1600
if (SetThreadpoolStackInformation)
{
TP_POOL_STACK_INFORMATION stackinfo;
stackinfo.StackCommit = 0;
stackinfo.StackReserve = my_thread_stack_size;
if (!SetThreadpoolStackInformation(pool, &stackinfo))
{
tp_log_warning("Can't set threadpool stack size",
"SetThreadpoolStackInformation");
}
}
#endif
skip_net_wait_timeout = 1;
return 0;
}
/*
Scheduler callback : Destroy the scheduler.
*/
extern "C" uint THR_thread_count;
extern "C" mysql_mutex_t THR_LOCK_threads;
extern "C" mysql_cond_t THR_COND_threads;
void tp_end(void)
{
if(pool)
{
SetThreadpoolThreadMaximum(pool, 0);
CloseThreadpool(pool);
/*
Tell my_global_thread_end() we're complete.
This would not be necessary if CloseThreadpool() would synchronously
release all threads and wait until they disappear and call all their FLS
destrructors . However, threads in the pool are released asynchronously
and might spend some time in the CRT shutdown code. Thus zero
num_worker_threads, to avoid thread destructor's my_thread_end()s after
this point.
*/
LONG remaining_threads=
InterlockedExchange( (volatile long *)&tp_stats.num_worker_threads, 0);
if (remaining_threads)
{
mysql_mutex_lock(&THR_LOCK_threads);
THR_thread_count -= remaining_threads;
mysql_cond_signal(&THR_COND_threads);
mysql_mutex_unlock(&THR_LOCK_threads);
}
}
skip_net_wait_timeout= 0;
}
/*
Notify pool about connection being killed.
*/
void tp_post_kill_notification(THD *thd)
{
if (current_thd == thd)
return; /* There is nothing to do.*/
if (thd->system_thread)
return; /* Will crash if we attempt to kill system thread. */
Vio *vio= thd->net.vio;
vio_shutdown(vio, SD_BOTH);
}
/*
Handle read completion/notification.
*/
static VOID CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PVOID overlapped, ULONG io_result, ULONG_PTR nbytes, PTP_IO io)
{
if(instance)
{
check_thread_init();
}
connection_t *connection = (connection_t*)context;
THD *thd= connection->thd;
ulonglong old_timeout = connection->timeout;
connection->timeout = ULONGLONG_MAX;
if (threadpool_process_request(connection->thd))
goto error;
set_wait_timeout(connection, old_timeout);
if(start_io(connection, instance))
goto error;
return;
error:
/* Some error has occured. */
if (instance)
DisassociateCurrentThreadFromCallback(instance);
destroy_connection(connection);
my_free(connection);
}
/* Simple callback for login */
static void CALLBACK login_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PTP_WORK work)
{
if(instance)
{
check_thread_init();
}
connection_t *connection =(connection_t *)context;
if (login(connection, instance) != 0)
{
destroy_connection(connection);
my_free(connection);
}
}
/*
Timer callback.
Invoked when connection times out (wait_timeout)
*/
static VOID CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
PVOID parameter, PTP_TIMER timer)
{
check_thread_init();
connection_t *con= (connection_t*)parameter;
ulonglong timeout= con->timeout;
if (timeout <= now())
{
con->thd->killed = KILL_CONNECTION;
if(con->thd->net.vio)
vio_shutdown(con->thd->net.vio, SD_BOTH);
}
else if(timeout != ULONGLONG_MAX)
{
/*
Reset timer.
There is a tiny possibility of a race condition, since the value of timeout
could have changed to smaller value in the thread doing io callback.
Given the relative unimportance of the wait timeout, we accept race
condition.
*/
SetThreadpoolTimer(timer, (PFILETIME)&timeout, 0, 1000);
}
}
/*
Shared memory read callback.
Invoked when read event is set on connection.
*/
static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PTP_WAIT wait,TP_WAIT_RESULT wait_result)
{
connection_t *con= (connection_t *)context;
/* Disarm wait. */
SetThreadpoolWait(wait, NULL, NULL);
/*
This is an autoreset event, and one wakeup is eaten already by threadpool,
and the current state is "not set". Thus we need to reset the event again,
or vio_read will hang.
*/
HANDLE h = con->thd->net.vio->event_server_wrote;
SetEvent(h);
io_completion_callback(instance, context, NULL, 0, 0 , 0);
}
/*
Notify the thread pool about a new connection.
NOTE: LOCK_thread_count is locked on entry. This function must unlock it.
*/
void tp_add_connection(THD *thd)
{
bool success = false;
connection_t *con = (connection_t *)my_malloc(sizeof(connection_t), 0);
if (con)
threads.append(thd);
mysql_mutex_unlock(&LOCK_thread_count);
if(!con)
{
tp_log_warning("Allocation failed", "tp_add_connection");
return;
}
init_connection(con);
con->thd= thd;
/* Try to login asynchronously, using threads in the pool */
PTP_WORK wrk = CreateThreadpoolWork(login_callback,con, &con->callback_environ);
if (wrk)
{
SubmitThreadpoolWork(wrk);
CloseThreadpoolWork(wrk);
}
else
{
/* Likely memory pressure */
login_callback(NULL, con, NULL); /* deletes connection if something goes wrong */
}
}
/*
Sets the number of idle threads the thread pool maintains in anticipation of new
requests.
*/
void tp_set_min_threads(uint val)
{
if (pool)
SetThreadpoolThreadMinimum(pool, val);
}
void tp_set_max_threads(uint val)
{
if (pool)
SetThreadpoolThreadMaximum(pool, val);
}
void tp_wait_begin(THD *thd, int type)
{
if (thd && thd->event_scheduler.data)
{
/* TODO: call CallbackMayRunLong() */
}
}
void tp_wait_end(THD *thd)
{
/* Do we need to do anything ? */
}

View File

@ -49,6 +49,25 @@ static my_bool has_no_data(Vio *vio __attribute__((unused)))
return FALSE;
}
#ifdef _WIN32
my_bool vio_shared_memory_has_data(Vio *vio)
{
return (vio->shared_memory_remain > 0);
}
int vio_shared_memory_shutdown(Vio *vio, int how)
{
SetEvent(vio->event_conn_closed);
SetEvent(vio->event_server_wrote);
return 0;
}
int vio_pipe_shutdown(Vio *vio, int how)
{
return vio_socket_shutdown(vio, how); /* cancels io */
}
#endif
/*
* Helper to fill most of the Vio* with defaults.
*/
@ -89,6 +108,7 @@ static void vio_init(Vio* vio, enum enum_vio_type type,
vio->poll_read =no_poll_read;
vio->is_connected =vio_is_connected_pipe;
vio->has_data =has_no_data;
vio->shutdown =vio_pipe_shutdown;
vio->timeout=vio_win32_timeout;
/* Set default timeout */
@ -116,7 +136,8 @@ static void vio_init(Vio* vio, enum enum_vio_type type,
vio->poll_read =no_poll_read;
vio->is_connected =vio_is_connected_shared_memory;
vio->has_data =has_no_data;
vio->has_data =vio_shared_memory_has_data;
vio->shutdown =vio_shared_memory_shutdown;
/* Currently, shared memory is on Windows only, hence the below is ok*/
vio->timeout= vio_win32_timeout;
@ -145,6 +166,7 @@ static void vio_init(Vio* vio, enum enum_vio_type type,
vio->poll_read =vio_poll_read;
vio->is_connected =vio_is_connected;
vio->has_data =vio_ssl_has_data;
vio->shutdown =vio_socket_shutdown;
DBUG_VOID_RETURN;
}
#endif /* HAVE_OPENSSL */
@ -163,6 +185,7 @@ static void vio_init(Vio* vio, enum enum_vio_type type,
vio->timeout =vio_timeout;
vio->poll_read =vio_poll_read;
vio->is_connected =vio_is_connected;
vio->shutdown =vio_socket_shutdown;
vio->has_data= (flags & VIO_BUFFERED_READ) ?
vio_buff_has_data : has_no_data;
DBUG_VOID_RETURN;

View File

@ -39,6 +39,7 @@ size_t vio_read_pipe(Vio *vio, uchar * buf, size_t size);
size_t vio_write_pipe(Vio *vio, const uchar * buf, size_t size);
my_bool vio_is_connected_pipe(Vio *vio);
int vio_close_pipe(Vio * vio);
int vio_shutdown_pipe(Vio *vio,int how);
#endif
#ifdef HAVE_SMEM
@ -46,8 +47,11 @@ size_t vio_read_shared_memory(Vio *vio, uchar * buf, size_t size);
size_t vio_write_shared_memory(Vio *vio, const uchar * buf, size_t size);
my_bool vio_is_connected_shared_memory(Vio *vio);
int vio_close_shared_memory(Vio * vio);
my_bool vio_shared_memory_has_data(Vio *vio);
int vio_shutdown_shared_memory(Vio *vio, int how);
#endif
int vio_socket_shutdown(Vio *vio, int how);
void vio_timeout(Vio *vio,uint which, uint timeout);
my_bool vio_buff_has_data(Vio *vio);

View File

@ -131,6 +131,60 @@ size_t vio_write(Vio * vio, const uchar* buf, size_t size)
DBUG_RETURN(r);
}
#ifdef _WIN32
static void CALLBACK cancel_io_apc(ULONG_PTR data)
{
CancelIo((HANDLE)data);
}
/*
Cancel IO on Windows.
On XP, issue CancelIo as asynchronous procedure call to the thread that started
IO. On Vista+, simpler cancelation is done with CancelIoEx.
*/
static int cancel_io(HANDLE handle, DWORD thread_id)
{
static BOOL (WINAPI *fp_CancelIoEx) (HANDLE, OVERLAPPED *);
static volatile int first_time= 1;
int rc;
HANDLE thread_handle;
if (first_time)
{
/* Try to load CancelIoEx using GetProcAddress */
InterlockedCompareExchangePointer((volatile void *)&fp_CancelIoEx,
GetProcAddress(GetModuleHandle("kernel32"), "CancelIoEx"), NULL);
first_time =0;
}
if (fp_CancelIoEx)
{
return fp_CancelIoEx(handle, NULL)? 0 :-1;
}
thread_handle= OpenThread(THREAD_SET_CONTEXT, FALSE, thread_id);
if (thread_handle)
{
rc= QueueUserAPC(cancel_io_apc, thread_handle, (ULONG_PTR)handle);
CloseHandle(thread_handle);
}
return rc;
}
#endif
int vio_socket_shutdown(Vio *vio, int how)
{
#ifdef _WIN32
return cancel_io((HANDLE)vio->sd, vio->thread_id);
#else
return shutdown(vio->sd, how);
#endif
}
int vio_blocking(Vio * vio __attribute__((unused)), my_bool set_blocking_mode,
my_bool *old_mode)
{
@ -726,6 +780,22 @@ void vio_timeout(Vio *vio, uint which, uint timeout)
#ifdef __WIN__
/*
Disable posting IO completion event to the port.
In some cases (synchronous timed IO) we want to skip IOCP notifications.
*/
static void disable_iocp_notification(OVERLAPPED *overlapped)
{
HANDLE *handle = &(overlapped->hEvent);
*handle = ((HANDLE)((ULONG_PTR) *handle|1));
}
/* Enable posting IO completion event to the port */
static void enable_iocp_notification(OVERLAPPED *overlapped)
{
HANDLE *handle = &(overlapped->hEvent);
*handle = (HANDLE)((ULONG_PTR) *handle & ~1);
}
/*
Finish pending IO on pipe. Honor wait timeout
@ -737,7 +807,7 @@ static size_t pipe_complete_io(Vio* vio, char* buf, size_t size, DWORD timeout_m
DBUG_ENTER("pipe_complete_io");
ret= WaitForSingleObject(vio->pipe_overlapped.hEvent, timeout_ms);
ret= WaitForSingleObjectEx(vio->pipe_overlapped.hEvent, timeout_ms, TRUE);
/*
WaitForSingleObjects will normally return WAIT_OBJECT_O (success, IO completed)
or WAIT_TIMEOUT.
@ -767,7 +837,8 @@ size_t vio_read_pipe(Vio * vio, uchar *buf, size_t size)
DBUG_ENTER("vio_read_pipe");
DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %u", vio->sd, (long) buf,
(uint) size));
disable_iocp_notification(&vio->pipe_overlapped);
if (ReadFile(vio->hPipe, buf, (DWORD)size, &bytes_read,
&(vio->pipe_overlapped)))
{
@ -777,13 +848,14 @@ size_t vio_read_pipe(Vio * vio, uchar *buf, size_t size)
{
if (GetLastError() != ERROR_IO_PENDING)
{
enable_iocp_notification(&vio->pipe_overlapped);
DBUG_PRINT("error",("ReadFile() returned last error %d",
GetLastError()));
DBUG_RETURN((size_t)-1);
}
retval= pipe_complete_io(vio, buf, size,vio->read_timeout_ms);
}
enable_iocp_notification(&vio->pipe_overlapped);
DBUG_PRINT("exit", ("%lld", (longlong)retval));
DBUG_RETURN(retval);
}
@ -796,7 +868,7 @@ size_t vio_write_pipe(Vio * vio, const uchar* buf, size_t size)
DBUG_ENTER("vio_write_pipe");
DBUG_PRINT("enter", ("sd: %d buf: 0x%lx size: %u", vio->sd, (long) buf,
(uint) size));
disable_iocp_notification(&vio->pipe_overlapped);
if (WriteFile(vio->hPipe, buf, (DWORD)size, &bytes_written,
&(vio->pipe_overlapped)))
{
@ -804,6 +876,7 @@ size_t vio_write_pipe(Vio * vio, const uchar* buf, size_t size)
}
else
{
enable_iocp_notification(&vio->pipe_overlapped);
if (GetLastError() != ERROR_IO_PENDING)
{
DBUG_PRINT("vio_error",("WriteFile() returned last error %d",
@ -812,7 +885,7 @@ size_t vio_write_pipe(Vio * vio, const uchar* buf, size_t size)
}
retval= pipe_complete_io(vio, (char *)buf, size, vio->write_timeout_ms);
}
enable_iocp_notification(&vio->pipe_overlapped);
DBUG_PRINT("exit", ("%lld", (longlong)retval));
DBUG_RETURN(retval);
}