1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

Added "pool-of-threads" handling (with libevent)

This is a backport of code from MySQL 6.0 with cleanups and extensions

The following new options are supported
configure options:
  --with-libevent                  ; Enable use of libevent, which is needed for pool of threads

mysqld options:
--thread-handling=pool-of-threads  ; Use a pool of threads to handle queries
--thread-pool-size=#               ; Define how many threads should be created to handle all queries
--extra-port=#                     ; Extra tcp port that uses the old one-thread-per-connection method
--extra-max-connections=#          ; Number of connections to accept to 'extra-port'
--test-ignore-wrong-options        ; Ignore setting an enum value to a wrong option (for mysql-test-run)



BUILD/SETUP.sh:
  Added libevents (and thus pool-of-threads) to max builds
CMakeLists.txt:
  Added libevent
Makefile.am:
  Added libevents
config/ac-macros/libevent.m4:
  Libevent code for configure
config/ac-macros/libevent_configure.m4:
  Libevent code for configure
configure.in:
  Added libevents
dbug/dbug.c:
  Added _db_is_pushed(); Needed for pool-of-threads code
extra/Makefile.am:
  Added libevents
extra/libevent:
  Libevent initial code
extra/libevent/CMakeLists.txt:
  Libevent initial code
extra/libevent/Makefile.am:
  Libevent initial code
extra/libevent/README:
  Libevent initial code
extra/libevent/WIN32-Code:
  Libevent initial code
extra/libevent/WIN32-Code/config.h:
  Libevent initial code
extra/libevent/WIN32-Code/misc.c:
  Libevent initial code
extra/libevent/WIN32-Code/misc.h:
  Libevent initial code
extra/libevent/WIN32-Code/tree.h:
  Libevent initial code
extra/libevent/WIN32-Code/win32.c:
  Libevent initial code
extra/libevent/buffer.c:
  Libevent initial code
extra/libevent/compat:
  Libevent initial code
extra/libevent/compat/sys:
  Libevent initial code
extra/libevent/compat/sys/_time.h:
  Libevent initial code
extra/libevent/compat/sys/queue.h:
  Libevent initial code
extra/libevent/compat/sys/tree.h:
  Libevent initial code
extra/libevent/devpoll.c:
  Libevent initial code
extra/libevent/epoll.c:
  Libevent initial code
extra/libevent/epoll_sub.c:
  Libevent initial code
extra/libevent/evbuffer.c:
  Libevent initial code
extra/libevent/evdns.c:
  Libevent initial code
extra/libevent/evdns.h:
  Libevent initial code
extra/libevent/event-config.h:
  Libevent initial code
extra/libevent/event-internal.h:
  Libevent initial code
extra/libevent/event.c:
  Libevent initial code
extra/libevent/event.h:
  Libevent initial code
extra/libevent/event_tagging.c:
  Libevent initial code
extra/libevent/evhttp.h:
  Libevent initial code
extra/libevent/evport.c:
  Libevent initial code
extra/libevent/evrpc-internal.h:
  Libevent initial code
extra/libevent/evrpc.c:
  Libevent initial code
extra/libevent/evrpc.h:
  Libevent initial code
extra/libevent/evsignal.h:
  Libevent initial code
extra/libevent/evutil.c:
  Libevent initial code
extra/libevent/evutil.h:
  Libevent initial code
extra/libevent/http-internal.h:
  Libevent initial code
extra/libevent/http.c:
  Libevent initial code
extra/libevent/kqueue.c:
  Libevent initial code
extra/libevent/log.c:
  Libevent initial code
extra/libevent/log.h:
  Libevent initial code
extra/libevent/min_heap.h:
  Libevent initial code
extra/libevent/poll.c:
  Libevent initial code
extra/libevent/select.c:
  Libevent initial code
extra/libevent/signal.c:
  Libevent initial code
extra/libevent/strlcpy-internal.h:
  Libevent initial code
extra/libevent/strlcpy.c:
  Libevent initial code
include/config-win.h:
  Libevent support
include/my_dbug.h:
  ADded _db_is_pushed
include/mysql.h.pp:
  Update to handle new prototypes
include/typelib.h:
  Split find_type_or_exit() into two functions
include/violite.h:
  Added vio_is_pending()
libmysqld/Makefile.am:
  Added libevent
mysql-test/include/have_pool_of_threads.inc:
  Added test for pool-of-threads
mysql-test/mysql-test-run.pl:
  Don't abort based on time and don't retry test cases when run under --gdb or --debug
mysql-test/r/crash_commit_before.result:
  USE GLOBAL for debug variable
mysql-test/r/have_pool_of_threads.require:
  Added test for pool-of-threads
mysql-test/r/pool_of_threads.result:
  Added test for pool-of-threads
mysql-test/r/subselect_debug.result:
  USE GLOBAL for debug variable
mysql-test/t/crash_commit_before.test:
  USE GLOBAL for debug variable
mysql-test/t/merge-big.test:
  USE GLOBAL for debug variable
mysql-test/t/pool_of_threads-master.opt:
  Added test for pool-of-threads
mysql-test/t/pool_of_threads.test:
  Added test for pool-of-threads
mysys/typelib.c:
  Split find_type_or_exit() into find_type_with_warning()
sql/Makefile.am:
  Added libevent
sql/handler.cc:
  Indentation fix.
  Fixed memory loss bug
  Fixed crash on exit when handler plugin failed
sql/mysql_priv.h:
  Added extra_max_connections and mysqld_extra_port
  Added extern functions from sql_connect.cc
sql/mysqld.cc:
  Added support for new mysqld options
  Added code for 'extra-port' and 'extra-max-connections'
  Split some functions into smaller pieces to be able to reuse code
  Added code for test-ignore-wrong-options
sql/scheduler.cc:
  Updated schduler code from MySQL 6.0
sql/scheduler.h:
  Updated schduler code from MySQL 6.0
sql/set_var.cc:
  Added support for changing "extra_max_connections"
sql/sql_class.cc:
  Iniitalize thread schduler options in THD
sql/sql_class.h:
  Added to extra_port and scheduler to 'THD'
sql/sql_connect.cc:
  Use thd->schduler to check number of connections and terminate connection
  Made some local functions global (for scheduler.cc)
vio/viosocket.c:
  Added 'vio_pending', needed for scheduler..c
This commit is contained in:
Michael Widenius
2009-03-13 00:27:35 +02:00
parent 44e6c2f253
commit 4fe3425009
79 changed files with 23812 additions and 214 deletions

View File

@ -166,8 +166,8 @@ local_infile_configs="--enable-local-infile"
max_no_embedded_configs="$SSL_LIBRARY --with-plugins=max"
max_no_ndb_configs="$SSL_LIBRARY --with-plugins=max-no-ndb --with-embedded-server"
max_configs="$SSL_LIBRARY --with-plugins=max --with-embedded-server"
max_no_ndb_configs="$SSL_LIBRARY --with-plugins=max-no-ndb --with-embedded-server --with-libevent"
max_configs="$SSL_LIBRARY --with-plugins=max --with-embedded-server -with-libevent"
# Disable NDB in maria max builds
max_configs=$max_no_ndb_configs

View File

@ -235,6 +235,7 @@ ADD_SUBDIRECTORY(scripts)
ADD_SUBDIRECTORY(zlib)
ADD_SUBDIRECTORY(extra/yassl)
ADD_SUBDIRECTORY(extra/yassl/taocrypt)
ADD_SUBDIRECTORY(extra/libevent)
ADD_SUBDIRECTORY(extra)
ADD_SUBDIRECTORY(storage/heap)
ADD_SUBDIRECTORY(storage/myisam)

View File

@ -19,7 +19,9 @@ AUTOMAKE_OPTIONS = foreign
# These are built from source in the Docs directory
EXTRA_DIST = INSTALL-SOURCE INSTALL-WIN-SOURCE \
README COPYING EXCEPTIONS-CLIENT CMakeLists.txt
README COPYING EXCEPTIONS-CLIENT \
CMakeLists.txt \
config/ac-macros/libevent_configure.m4
SUBDIRS = . include @docs_dirs@ @zlib_dir@ \
@readline_topdir@ sql-common scripts \
@ -51,7 +53,6 @@ bin-dist: all
# Remove BK's "SCCS" subdirectories from source distribution
# Create initial database files for Windows installations and check them.
dist-hook:
rm -rf `find $(distdir) -type d -name SCCS -print`
mkdir -p $(distdir)/win
scripts/mysql_install_db --no-defaults --cross-bootstrap \
--builddir=$(top_builddir) \
@ -97,7 +98,7 @@ test-nr:
test-pr:
cd mysql-test ; \
@PERL@ ./mysql-test-run.pl $(force) $(mem) --ps-protocol --mysqld=--binlog-format=row
@PERL@ ./mysql-test-run.pl $(force) $(mem) --ps-protocol --mysqld=--binlog-format=row #@libevent_test_option@
test-ns:
cd mysql-test ; \

View File

@ -0,0 +1,55 @@
dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_USE_BUNDLED_LIBEVENT
dnl
dnl SYNOPSIS
dnl MYSQL_USE_BUNDLED_LIBEVENT()
dnl
dnl DESCRIPTION
dnl Add defines so libevent is built and linked with
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_USE_BUNDLED_LIBEVENT], [
libevent_dir="libevent"
AC_SUBST([libevent_dir])
libevent_libs="\$(top_builddir)/extra/libevent/libevent.a"
libevent_includes="-I\$(top_builddir)/extra/libevent"
libevent_test_option="--mysqld=--thread-handling=pool-of-threads"
AC_SUBST(libevent_libs)
AC_SUBST(libevent_includes)
AC_SUBST(libevent_test_option)
AC_DEFINE([HAVE_LIBEVENT], [1], [If we want to use libevent and have connection pooling])
AC_MSG_RESULT([using bundled libevent])
dnl Get the upstream file with the original libevent configure macros.
dnl Use builtin include for this, to work around path problems in old versions of aclocal.
builtin([include],[config/ac-macros/libevent_configure.m4])
])
dnl ------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_LIBEVENT
dnl
dnl SYNOPSIS
dnl MYSQL_CHECK_LIBEVENT
dnl
dnl ------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_LIBEVENT], [
AC_CONFIG_FILES(extra/libevent/Makefile)
AC_MSG_CHECKING(for libevent)
AC_ARG_WITH([libevent],
[ --with-libevent use libevent and have connection pooling],
[with_libevent=$withval],
[with_libevent=no]
)
if test "$with_libevent" != "no"; then
MYSQL_USE_BUNDLED_LIBEVENT
else
AC_MSG_RESULT([disabled])
fi
AM_CONDITIONAL([HAVE_LIBEVENT], [ test "$with_libevent" != "no" ])
])

View File

@ -0,0 +1,324 @@
dnl Checks for libraries.
AC_CHECK_LIB(socket, socket)
AC_CHECK_LIB(resolv, inet_aton)
AC_CHECK_LIB(rt, clock_gettime)
AC_CHECK_LIB(nsl, inet_ntoa)
dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS(fcntl.h stdarg.h inttypes.h stdint.h poll.h signal.h unistd.h sys/epoll.h sys/time.h sys/queue.h sys/event.h sys/param.h sys/ioctl.h sys/select.h sys/devpoll.h port.h netinet/in6.h sys/socket.h)
if test "x$ac_cv_header_sys_queue_h" = "xyes"; then
AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h)
AC_EGREP_CPP(yes,
[
#include <sys/queue.h>
#ifdef TAILQ_FOREACH
yes
#endif
], [AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TAILQFOREACH, 1,
[Define if TAILQ_FOREACH is defined in <sys/queue.h>])],
[AC_MSG_RESULT(no)]
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timeradd in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timeradd
yes
#endif
], [ AC_DEFINE(HAVE_TIMERADD, 1,
[Define if timeradd is defined in <sys/time.h>])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timercmp in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timercmp
yes
#endif
], [ AC_DEFINE(HAVE_TIMERCMP, 1,
[Define if timercmp is defined in <sys/time.h>])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timerclear in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timerclear
yes
#endif
], [ AC_DEFINE(HAVE_TIMERCLEAR, 1,
[Define if timerclear is defined in <sys/time.h>])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timerisset in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timerisset
yes
#endif
], [ AC_DEFINE(HAVE_TIMERISSET, 1,
[Define if timerisset is defined in <sys/time.h>])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
fi
dnl Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
AC_C_INLINE
AC_HEADER_TIME
dnl Checks for library functions.
AC_CHECK_FUNCS(gettimeofday vasprintf fcntl clock_gettime strtok_r strsep getaddrinfo getnameinfo strlcpy inet_ntop signal sigaction strtoll)
AC_CHECK_SIZEOF(long)
if test "x$ac_cv_func_clock_gettime" = "xyes"; then
AC_DEFINE(DNS_USE_CPU_CLOCK_FOR_ID, 1, [Define if clock_gettime is available in libc])
else
AC_DEFINE(DNS_USE_GETTIMEOFDAY_FOR_ID, 1, [Define is no secure id variant is available])
fi
AC_MSG_CHECKING(for F_SETFD in fcntl.h)
AC_EGREP_CPP(yes,
[
#define _GNU_SOURCE
#include <fcntl.h>
#ifdef F_SETFD
yes
#endif
], [ AC_DEFINE(HAVE_SETFD, 1,
[Define if F_SETFD is defined in <fcntl.h>])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
needsignal=no
haveselect=no
AC_CHECK_FUNCS(select, [haveselect=yes])
if test "x$haveselect" = "xyes" ; then
AC_LIBOBJ(select)
needsignal=yes
fi
havepoll=no
AC_CHECK_FUNCS(poll, [havepoll=yes])
if test "x$havepoll" = "xyes" ; then
AC_LIBOBJ(poll)
needsignal=yes
fi
haveepoll=no
AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes])
if test "x$haveepoll" = "xyes" ; then
AC_DEFINE(HAVE_EPOLL, 1,
[Define if your system supports the epoll system calls])
AC_LIBOBJ(epoll)
needsignal=yes
fi
havedevpoll=no
if test "x$ac_cv_header_sys_devpoll_h" = "xyes"; then
AC_DEFINE(HAVE_DEVPOLL, 1,
[Define if /dev/poll is available])
AC_LIBOBJ(devpoll)
fi
havekqueue=no
if test "x$ac_cv_header_sys_event_h" = "xyes"; then
AC_CHECK_FUNCS(kqueue, [havekqueue=yes])
if test "x$havekqueue" = "xyes" ; then
AC_MSG_CHECKING(for working kqueue)
AC_RUN_IFELSE(
[AC_LANG_PROGRAM(
[[
#include <sys/types.h>
#include <sys/time.h>
#include <sys/event.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
]],
[[
int kq;
int n;
int fd[2];
struct kevent ev;
struct timespec ts;
char buf[8000];
if (pipe(fd) == -1)
exit(1);
if (fcntl(fd[1], F_SETFL, O_NONBLOCK) == -1)
exit(1);
while ((n = write(fd[1], buf, sizeof(buf))) == sizeof(buf))
;
if ((kq = kqueue()) == -1)
exit(1);
ev.ident = fd[1];
ev.filter = EVFILT_WRITE;
ev.flags = EV_ADD | EV_ENABLE;
n = kevent(kq, &ev, 1, NULL, 0, NULL);
if (n == -1)
exit(1);
read(fd[0], buf, sizeof(buf));
ts.tv_sec = 0;
ts.tv_nsec = 0;
n = kevent(kq, NULL, 0, &ev, 1, &ts);
if (n == -1 || n == 0)
exit(1);
exit(0);
]]
)],
[AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_WORKING_KQUEUE, 1,
[Define if kqueue works correctly with pipes])
AC_LIBOBJ(kqueue)],
[AC_MSG_RESULT(no)],
[AC_MSG_RESULT(no)]
)
fi
fi
haveepollsyscall=no
if test "x$ac_cv_header_sys_epoll_h" = "xyes"; then
if test "x$haveepoll" = "xno" ; then
AC_MSG_CHECKING(for epoll system call)
AC_RUN_IFELSE(
[AC_LANG_PROGRAM(
[[
#include <stdint.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/epoll.h>
#include <unistd.h>
int
epoll_create(int size)
{
return (syscall(__NR_epoll_create, size));
}
]],
[[
int epfd;
epfd = epoll_create(256);
exit (epfd == -1 ? 1 : 0);
]]
)],
[AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_EPOLL, 1,
[Define if your system supports the epoll system calls])
needsignal=yes
AC_LIBOBJ(epoll_sub)
AC_LIBOBJ(epoll)],
[AC_MSG_RESULT(no)],
[AC_MSG_RESULT(no)]
)
fi
fi
haveeventports=no
AC_CHECK_FUNCS(port_create, [haveeventports=yes])
if test "x$haveeventports" = "xyes" ; then
AC_DEFINE(HAVE_EVENT_PORTS, 1,
[Define if your system supports event ports])
AC_LIBOBJ(evport)
needsignal=yes
fi
if test "x$bwin32" = "xtrue"; then
needsignal=yes
fi
if test "x$bwin32" = "xtrue"; then
needsignal=yes
fi
if test "x$needsignal" = "xyes" ; then
AC_LIBOBJ(signal)
fi
AC_TYPE_PID_T
AC_TYPE_SIZE_T
AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t],[],[],
[[#ifdef HAVE_STDINT_H
#include <stdint.h>
#elif defined(HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif]])
AC_CHECK_SIZEOF(long long)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(short)
AC_CHECK_TYPES([struct in6_addr],[],[],
[[#ifdef WIN32
#include <winsock2.h>
#else
#include <sys/types.h>
#include <netinet/in.h>
#include <sys/socket.h>
#endif
#ifdef HAVE_NETINET_IN6_H
#include <netinet/in6.h>
#endif]])
AC_MSG_CHECKING([for socklen_t])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM(
[[
#include <sys/types.h>
#include <sys/socket.h>
]],
[[
socklen_t x;
]]
)],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_DEFINE(socklen_t, unsigned int,
[Define to unsigned int if you dont have it])]
)
AC_MSG_CHECKING([whether our compiler supports __func__])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([],[[const char *cp = __func__;]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_CHECKING([whether our compiler supports __FUNCTION__])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([],[[const char *cp = __FUNCTION__;]])],
[AC_MSG_RESULT([yes])
AC_DEFINE(__func__, __FUNCTION__,
[Define to appropriate substitue if compiler doesnt have __func__])],
[AC_MSG_RESULT([no])
AC_DEFINE(__func__, __FILE__,
[Define to appropriate substitue if compiler doesnt have __func__])]
)]
)

View File

@ -55,6 +55,7 @@ sinclude(config/ac-macros/large_file.m4)
sinclude(config/ac-macros/misc.m4)
sinclude(config/ac-macros/readline.m4)
sinclude(config/ac-macros/ssl.m4)
sinclude(config/ac-macros/libevent.m4)
sinclude(config/ac-macros/zlib.m4)
# Remember to add a directory sql/share/LANGUAGE
@ -2389,6 +2390,7 @@ MYSQL_CHECK_BIG_TABLES
MYSQL_CHECK_MAX_INDEXES
MYSQL_CHECK_VIO
MYSQL_CHECK_SSL
MYSQL_CHECK_LIBEVENT
#--------------------------------------------------------------------
# Declare our plugin modules

View File

@ -878,6 +878,18 @@ void _db_push_(const char *control)
FixTraceFlags(old_fflags, cs);
}
/**
Returns TRUE if session-local settings have been set.
*/
int _db_is_pushed_()
{
CODE_STATE *cs= NULL;
get_code_state_or_return FALSE;
return (cs->stack != &init_settings);
}
/*
* FUNCTION
*

View File

@ -1,4 +1,4 @@
# Copyright (C) 2000-2006 MySQL AB
# Copyright (C) 2000-2006 MySQL AB, 2009 Monty Program AB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -24,8 +24,8 @@ BUILT_SOURCES= $(top_builddir)/include/mysqld_error.h \
pkginclude_HEADERS= $(BUILT_SOURCES)
EXTRA_PROGRAMS = comp_err
DISTCLEANFILES = $(BUILT_SOURCES) $(EXTRA_PROGRAMS)
SUBDIRS = @yassl_dir@
DIST_SUBDIRS = yassl
SUBDIRS = @yassl_dir@ @libevent_dir@
DIST_SUBDIRS = yassl libevent
# This will build mysqld_error.h, mysqld_ername.h and sql_state.h
# NOTE Built files should depend on their sources to avoid

View File

@ -0,0 +1,34 @@
INCLUDE_DIRECTORIES(
${CMAKE_SOURCE_DIR}/extra/libevent
${CMAKE_SOURCE_DIR}/extra/libevent/compat
${CMAKE_SOURCE_DIR}/extra/libevent/WIN32-Code)
IF(MSVC)
ADD_DEFINITIONS("-DWIN32 -DHAVE_CONFIG_H")
ENDIF(MSVC)
SET(LIBEVENT_SOURCES
buffer.c
evbuffer.c
event.c
evutil.c
log.c
signal.c
strlcpy.c
WIN32-Code/win32.c
WIN32-Code/config.h
WIN32-Code/misc.c
WIN32-Code/misc.h
event-internal.h
event.h
evsignal.h
evutil.h
log.h
min_heap.h
strlcpy-internal.h
)
CONFIGURE_FILE(WIN32-Code/config.h ${CMAKE_SOURCE_DIR}/extra/libevent/event-config.h COPYONLY)
IF(NOT SOURCE_SUBLIBS)
ADD_LIBRARY(libevent ${LIBEVENT_SOURCES})
ENDIF(NOT SOURCE_SUBLIBS)

View File

@ -0,0 +1,39 @@
AUTOMAKE_OPTIONS = foreign no-dependencies
EXTRA_DIST = README compat/sys \
buffer.c epoll.c evbuffer.c event.c evport.c evutil.c kqueue.c poll.c \
signal.c devpoll.c epoll_sub.c evdns.c event_tagging.c evrpc.c http.c \
log.c select.c strlcpy.c \
evdns.h event.h evrpc-internal.h evsignal.h http-internal.h log.h \
min_heap.h event-internal.h evhttp.h evrpc.h evutil.h strlcpy-internal.h \
WIN32-Code/misc.c WIN32-Code/misc.h WIN32-Code/config.h WIN32-Code/tree.h \
WIN32-Code/win32.c CMakeLists.txt
DISTCLEANFILES = event-config.h
noinst_LIBRARIES = libevent.a
libevent_a_SOURCES = event.c buffer.c evbuffer.c log.c evutil.c \
select.c poll.c epoll.c epoll_sub.c devpoll.c kqueue.c \
evport.c signal.c
include_HEADERS = event.h evutil.h event-config.h
BUILT_SOURCES = event-config.h
event-config.h: $(top_srcdir)/include/config.h
echo '/* event-config.h' > $@
echo ' * Generated by autoconf; post-processed by libevent.' >> $@
echo ' * Do not edit this file.' >> $@
echo ' * Do not rely on macros in this file existing in later versions.'>> $@
echo ' */' >> $@
echo '#ifndef _EVENT_CONFIG_H_' >> $@
echo '#define _EVENT_CONFIG_H_' >> $@
sed -e 's/#define /#define _EVENT_/' \
-e 's/#undef /#undef _EVENT_/' \
-e 's/#ifndef /#ifndef _EVENT_/' < $(top_srcdir)/include/config.h >> $@
echo "#endif" >> $@
AM_CPPFLAGS = -Icompat -I$(top_srcdir)/include

57
extra/libevent/README Normal file
View File

@ -0,0 +1,57 @@
To build libevent, type
$ ./configure && make
(If you got libevent from the subversion repository, you will
first need to run the included "autogen.sh" script in order to
generate the configure script.)
Install as root via
# make install
You can run the regression tests by
$ make verify
Before, reporting any problems, please run the regression tests.
To enable the low-level tracing build the library as:
CFLAGS=-DUSE_DEBUG ./configure [...]
Acknowledgements:
-----------------
The following people have helped with suggestions, ideas, code or
fixing bugs:
Alejo
Weston Andros Adamson
William Ahern
Stas Bekman
Andrew Danforth
Mike Davis
Shie Erlich
Alexander von Gernler
Artur Grabowski
Aaron Hopkins
Claudio Jeker
Scott Lamb
Adam Langley
Philip Lewis
David Libenzi
Nick Mathewson
Andrey Matveev
Richard Nyberg
Jon Oberheide
Phil Oleson
Dave Pacheco
Tassilo von Parseval
Pierre Phaneuf
Jon Poland
Bert JW Regeer
Dug Song
Taral
If I have forgotten your name, please contact me.

View File

@ -0,0 +1,247 @@
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
/* Define if clock_gettime is available in libc */
/* #undef DNS_USE_CPU_CLOCK_FOR_ID */
/* Define if no secure id variant is available */
#define DNS_USE_FTIME_FOR_ID 1
/* Define if no secure id variant is available */
/* #define DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
/* Define to 1 if you have the `clock_gettime' function. */
/* #undef HAVE_CLOCK_GETTIME */
/* Define if /dev/poll is available */
/* #undef HAVE_DEVPOLL */
/* Define to 1 if you have the <dlfcn.h> header file. */
/* #undef HAVE_DLFCN_H */
/* Define if your system supports the epoll system calls */
/* #undef HAVE_EPOLL */
/* Define to 1 if you have the `epoll_ctl' function. */
/* #undef HAVE_EPOLL_CTL */
/* Define if your system supports event ports */
/* #undef HAVE_EVENT_PORTS */
/* Define to 1 if you have the `fcntl' function. */
/* #undef HAVE_FCNTL */
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the `getaddrinfo' function. */
/* #undef HAVE_GETADDRINFO */
/* Define to 1 if you have the `getnameinfo' function. */
/* #undef HAVE_GETNAMEINFO */
/* Define to 1 if you have the `gettimeofday' function. */
/* #define HAVE_GETTIMEOFDAY 1 */
/* Define to 1 if you have the `inet_ntop' function. */
/* #undef HAVE_INET_NTOP */
/* Define to 1 if you have the <inttypes.h> header file. */
/* #undef HAVE_INTTYPES_H 1 */
/* Define to 1 if you have the `kqueue' function. */
/* #undef HAVE_KQUEUE */
/* Define to 1 if you have the `nsl' library (-lnsl). */
/* #undef HAVE_LIBNSL */
/* Define to 1 if you have the `resolv' library (-lresolv). */
/* #undef HAVE_LIBRESOLV */
/* Define to 1 if you have the `rt' library (-lrt). */
/* #undef HAVE_LIBRT */
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef HAVE_LIBSOCKET */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the <netinet/in6.h> header file. */
/* #undef HAVE_NETINET_IN6_H */
/* Define to 1 if you have the `poll' function. */
/* #undef HAVE_POLL */
/* Define to 1 if you have the <poll.h> header file. */
/* #undef HAVE_POLL_H */
/* Define to 1 if you have the `port_create' function. */
/* #undef HAVE_PORT_CREATE */
/* Define to 1 if you have the <port.h> header file. */
/* #undef HAVE_PORT_H */
/* Define to 1 if you have the `select' function. */
/* #undef HAVE_SELECT */
/* Define if F_SETFD is defined in <fcntl.h> */
/* #undef HAVE_SETFD */
/* Define to 1 if you have the `sigaction' function. */
/* #undef HAVE_SIGACTION */
/* Define to 1 if you have the `signal' function. */
#define HAVE_SIGNAL 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the <stdarg.h> header file. */
#define HAVE_STDARG_H 1
/* Define to 1 if you have the <stdint.h> header file. */
/* #define HAVE_STDINT_H 1 */
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcpy' function. */
/* #undef HAVE_STRLCPY */
/* Define to 1 if you have the `strsep' function. */
/* #undef HAVE_STRSEP */
/* Define to 1 if you have the `strtok_r' function. */
/* #undef HAVE_STRTOK_R */
/* Define to 1 if the system has the type `struct in6_addr'. */
#define HAVE_STRUCT_IN6_ADDR 1
/* Define to 1 if you have the <sys/devpoll.h> header file. */
/* #undef HAVE_SYS_DEVPOLL_H */
/* Define to 1 if you have the <sys/epoll.h> header file. */
/* #undef HAVE_SYS_EPOLL_H */
/* Define to 1 if you have the <sys/event.h> header file. */
/* #undef HAVE_SYS_EVENT_H */
/* Define to 1 if you have the <sys/ioctl.h> header file. */
/* #undef HAVE_SYS_IOCTL_H */
/* Define to 1 if you have the <sys/queue.h> header file. */
/* #undef HAVE_SYS_QUEUE_H */
/* Define to 1 if you have the <sys/select.h> header file. */
/* #undef HAVE_SYS_SELECT_H */
/* Define to 1 if you have the <sys/socket.h> header file. */
/* #undef HAVE_SYS_SOCKET_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
/* #define HAVE_SYS_STAT_H 1 */
/* Define to 1 if you have the <sys/time.h> header file. */
/* #define HAVE_SYS_TIME_H 1 */
/* Define to 1 if you have the <sys/types.h> header file. */
/* #define HAVE_SYS_TYPES_H 1 */
/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
/* #undef HAVE_TAILQFOREACH */
/* Define if timeradd is defined in <sys/time.h> */
/* #undef HAVE_TIMERADD */
/* Define if timerclear is defined in <sys/time.h> */
/* #define HAVE_TIMERCLEAR 1 */
/* Define if timercmp is defined in <sys/time.h> */
#define HAVE_TIMERCMP 1
/* Define if timerisset is defined in <sys/time.h> */
#define HAVE_TIMERISSET 1
/* Define to 1 if you have the <unistd.h> header file. */
/* #define HAVE_UNISTD_H 1 */
/* Define to 1 if you have the `vasprintf' function. */
/* #undef HAVE_VASPRINTF */
/* Define if kqueue works correctly with pipes */
/* #undef HAVE_WORKING_KQUEUE */
/* Name of package */
#ifndef PACKAGE
#define PACKAGE "libevent"
#endif
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME ""
/* Define to the version of this package. */
#define PACKAGE_VERSION ""
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Version number of package */
#define VERSION "1.3.99-trunk"
#ifndef __func__
/* Define to appropriate substitue if compiler doesnt have __func__ */
#if defined(_MSC_VER) && _MSC_VER < 1300
#define __func__ "??"
#else
#define __func__ __FUNCTION__
#endif
#endif
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
#define inline __inline
#endif
/* Define to `int' if <sys/types.h> does not define. */
/* #undef pid_t */
/* Define to `unsigned' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to unsigned int if you dont have it */
#define socklen_t unsigned int
/* Define to `unsigned short' if <sys/types.h> does not define. */
#define uint16_t unsigned short
/* Define to `unsigned int' if <sys/types.h> does not define. */
#define uint32_t unsigned int
/* Define to `unsigned long long' if <sys/types.h> does not define. */
#define uint64_t __uint64_t
/* Define to `unsigned char' if <sys/types.h> does not define. */
#define uint8_t unsigned char

View File

@ -0,0 +1,38 @@
#include <stdio.h>
#include <string.h>
#include <windows.h>
#include <sys/timeb.h>
#include <time.h>
#ifdef __GNUC__
/*our prototypes for timeval and timezone are in here, just in case the above
headers don't have them*/
#include "misc.h"
#endif
/****************************************************************************
*
* Function: gettimeofday(struct timeval *, struct timezone *)
*
* Purpose: Get current time of day.
*
* Arguments: tv => Place to store the curent time of day.
* tz => Ignored.
*
* Returns: 0 => Success.
*
****************************************************************************/
#ifndef HAVE_GETTIMEOFDAY
int gettimeofday(struct timeval *tv, struct timezone *tz) {
struct _timeb tb;
if(tv == NULL)
return -1;
_ftime(&tb);
tv->tv_sec = (long) tb.time;
tv->tv_usec = ((int) tb.millitm) * 1000;
return 0;
}
#endif

View File

@ -0,0 +1,11 @@
#ifndef MISC_H
#define MISC_H
struct timezone;
struct timeval;
#ifndef HAVE_GETTIMEOFDAY
int gettimeofday(struct timeval *,struct timezone *);
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,471 @@
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* Copyright 2003 Michael A. Davis <mike@datanerds.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef _MSC_VER
#include "./config.h"
#else
/* Avoid the windows/msvc thing. */
#include "../config.h"
#endif
#include <winsock2.h>
#include <windows.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#define RB_AUGMENT(x) (void)(x)
#include "./tree.h"
#include "log.h"
#include "event.h"
#include "event-internal.h"
#define XFREE(ptr) do { if (ptr) free(ptr); } while(0)
extern struct event_list timequeue;
extern struct event_list addqueue;
#if 0
extern struct event_list signalqueue;
#endif
struct win_fd_set {
u_int fd_count;
SOCKET fd_array[1];
};
int evsigcaught[NSIG];
volatile sig_atomic_t signal_caught = 0;
/* MSDN says this is required to handle SIGFPE */
volatile double SIGFPE_REQ = 0.0f;
#if 0
static void signal_handler(int sig);
void signal_process(void);
int signal_recalc(void);
#endif
struct event_entry {
RB_ENTRY(event_entry) node;
SOCKET sock;
int read_pos;
int write_pos;
struct event *read_event;
struct event *write_event;
};
static int
compare(struct event_entry *a, struct event_entry *b)
{
if (a->sock < b->sock)
return -1;
else if (a->sock > b->sock)
return 1;
else
return 0;
}
struct win32op {
int fd_setsz;
struct win_fd_set *readset_in;
struct win_fd_set *writeset_in;
struct win_fd_set *readset_out;
struct win_fd_set *writeset_out;
struct win_fd_set *exset_out;
RB_HEAD(event_map, event_entry) event_root;
};
RB_PROTOTYPE(event_map, event_entry, node, compare);
RB_GENERATE(event_map, event_entry, node, compare);
void *win32_init (struct event_base *);
int win32_insert (void *, struct event *);
int win32_del (void *, struct event *);
int win32_dispatch (struct event_base *base, void *, struct timeval *);
void win32_dealloc (struct event_base *, void *);
struct eventop win32ops = {
"win32",
win32_init,
win32_insert,
win32_del,
win32_dispatch,
win32_dealloc,
0
};
#define FD_SET_ALLOC_SIZE(n) ((sizeof(struct win_fd_set) + ((n)-1)*sizeof(SOCKET)))
static int
realloc_fd_sets(struct win32op *op, size_t new_size)
{
size_t size;
assert(new_size >= op->readset_in->fd_count &&
new_size >= op->writeset_in->fd_count);
assert(new_size >= 1);
size = FD_SET_ALLOC_SIZE(new_size);
if (!(op->readset_in = realloc(op->readset_in, size)))
return (-1);
if (!(op->writeset_in = realloc(op->writeset_in, size)))
return (-1);
if (!(op->readset_out = realloc(op->readset_out, size)))
return (-1);
if (!(op->exset_out = realloc(op->exset_out, size)))
return (-1);
if (!(op->writeset_out = realloc(op->writeset_out, size)))
return (-1);
op->fd_setsz = (int)new_size;
return (0);
}
static int
timeval_to_ms(struct timeval *tv)
{
return ((tv->tv_sec * 1000) + (tv->tv_usec / 1000));
}
static struct event_entry*
get_event_entry(struct win32op *op, SOCKET s, int create)
{
struct event_entry key, *val;
key.sock = s;
val = RB_FIND(event_map, &op->event_root, &key);
if (val || !create)
return val;
if (!(val = calloc(1, sizeof(struct event_entry)))) {
event_warn("%s: calloc", __func__);
return NULL;
}
val->sock = s;
val->read_pos = val->write_pos = -1;
RB_INSERT(event_map, &op->event_root, val);
return val;
}
static int
do_fd_set(struct win32op *op, struct event_entry *ent, int read)
{
SOCKET s = ent->sock;
struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
if (read) {
if (ent->read_pos >= 0)
return (0);
} else {
if (ent->write_pos >= 0)
return (0);
}
if (set->fd_count == op->fd_setsz) {
if (realloc_fd_sets(op, op->fd_setsz*2))
return (-1);
/* set pointer will have changed and needs reiniting! */
set = read ? op->readset_in : op->writeset_in;
}
set->fd_array[set->fd_count] = s;
if (read)
ent->read_pos = set->fd_count;
else
ent->write_pos = set->fd_count;
return (set->fd_count++);
}
static int
do_fd_clear(struct win32op *op, struct event_entry *ent, int read)
{
int i;
struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
if (read) {
i = ent->read_pos;
ent->read_pos = -1;
} else {
i = ent->write_pos;
ent->write_pos = -1;
}
if (i < 0)
return (0);
if (--set->fd_count != i) {
struct event_entry *ent2;
SOCKET s2;
s2 = set->fd_array[i] = set->fd_array[set->fd_count];
ent2 = get_event_entry(op, s2, 0);
if (!ent) /* This indicates a bug. */
return (0);
if (read)
ent2->read_pos = i;
else
ent2->write_pos = i;
}
return (0);
}
#define NEVENT 64
void *
win32_init(struct event_base *_base)
{
struct win32op *winop;
size_t size;
if (!(winop = calloc(1, sizeof(struct win32op))))
return NULL;
winop->fd_setsz = NEVENT;
size = FD_SET_ALLOC_SIZE(NEVENT);
if (!(winop->readset_in = malloc(size)))
goto err;
if (!(winop->writeset_in = malloc(size)))
goto err;
if (!(winop->readset_out = malloc(size)))
goto err;
if (!(winop->writeset_out = malloc(size)))
goto err;
if (!(winop->exset_out = malloc(size)))
goto err;
RB_INIT(&winop->event_root);
winop->readset_in->fd_count = winop->writeset_in->fd_count = 0;
winop->readset_out->fd_count = winop->writeset_out->fd_count
= winop->exset_out->fd_count = 0;
evsignal_init(_base);
return (winop);
err:
XFREE(winop->readset_in);
XFREE(winop->writeset_in);
XFREE(winop->readset_out);
XFREE(winop->writeset_out);
XFREE(winop->exset_out);
XFREE(winop);
return (NULL);
}
int
win32_insert(void *op, struct event *ev)
{
struct win32op *win32op = op;
struct event_entry *ent;
if (ev->ev_events & EV_SIGNAL) {
return (evsignal_add(ev));
}
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
return (0);
ent = get_event_entry(win32op, ev->ev_fd, 1);
if (!ent)
return (-1); /* out of memory */
event_debug(("%s: adding event for %d", __func__, (int)ev->ev_fd));
if (ev->ev_events & EV_READ) {
if (do_fd_set(win32op, ent, 1)<0)
return (-1);
ent->read_event = ev;
}
if (ev->ev_events & EV_WRITE) {
if (do_fd_set(win32op, ent, 0)<0)
return (-1);
ent->write_event = ev;
}
return (0);
}
int
win32_del(void *op, struct event *ev)
{
struct win32op *win32op = op;
struct event_entry *ent;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
if (!(ent = get_event_entry(win32op, ev->ev_fd, 0)))
return (-1);
event_debug(("%s: Removing event for %d", __func__, ev->ev_fd));
if (ev == ent->read_event) {
do_fd_clear(win32op, ent, 1);
ent->read_event = NULL;
}
if (ev == ent->write_event) {
do_fd_clear(win32op, ent, 0);
ent->write_event = NULL;
}
if (!ent->read_event && !ent->write_event) {
RB_REMOVE(event_map, &win32op->event_root, ent);
free(ent);
}
return 0;
}
static void
fd_set_copy(struct win_fd_set *out, const struct win_fd_set *in)
{
out->fd_count = in->fd_count;
memcpy(out->fd_array, in->fd_array, in->fd_count * (sizeof(SOCKET)));
}
/*
static void dump_fd_set(struct win_fd_set *s)
{
unsigned int i;
printf("[ ");
for(i=0;i<s->fd_count;++i)
printf("%d ",(int)s->fd_array[i]);
printf("]\n");
}
*/
int
win32_dispatch(struct event_base *base, void *op,
struct timeval *tv)
{
struct win32op *win32op = op;
int res = 0;
u_int i;
int fd_count;
fd_set_copy(win32op->readset_out, win32op->readset_in);
fd_set_copy(win32op->exset_out, win32op->readset_in);
fd_set_copy(win32op->writeset_out, win32op->writeset_in);
fd_count =
(win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
win32op->readset_out->fd_count : win32op->writeset_out->fd_count;
if (!fd_count) {
/* Windows doesn't like you to call select() with no sockets */
Sleep(timeval_to_ms(tv));
evsignal_process(base);
return (0);
}
res = select(fd_count,
(struct fd_set*)win32op->readset_out,
(struct fd_set*)win32op->writeset_out,
(struct fd_set*)win32op->exset_out, tv);
event_debug(("%s: select returned %d", __func__, res));
if(res <= 0) {
evsignal_process(base);
return res;
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
for (i=0; i<win32op->readset_out->fd_count; ++i) {
struct event_entry *ent;
SOCKET s = win32op->readset_out->fd_array[i];
if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
event_active(ent->read_event, EV_READ, 1);
}
for (i=0; i<win32op->exset_out->fd_count; ++i) {
struct event_entry *ent;
SOCKET s = win32op->exset_out->fd_array[i];
if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
event_active(ent->read_event, EV_READ, 1);
}
for (i=0; i<win32op->writeset_out->fd_count; ++i) {
struct event_entry *ent;
SOCKET s = win32op->writeset_out->fd_array[i];
if ((ent = get_event_entry(win32op, s, 0)) && ent->write_event)
event_active(ent->write_event, EV_WRITE, 1);
}
#if 0
if (signal_recalc() == -1)
return (-1);
#endif
return (0);
}
void
win32_dealloc(struct event_base *_base, void *arg)
{
struct win32op *win32op = arg;
evsignal_dealloc(_base);
if (win32op->readset_in)
free(win32op->readset_in);
if (win32op->writeset_in)
free(win32op->writeset_in);
if (win32op->readset_out)
free(win32op->readset_out);
if (win32op->writeset_out)
free(win32op->writeset_out);
if (win32op->exset_out)
free(win32op->exset_out);
/* XXXXX free the tree. */
memset(win32op, 0, sizeof(win32op));
free(win32op);
}
#if 0
static void
signal_handler(int sig)
{
evsigcaught[sig]++;
signal_caught = 1;
}
int
signal_recalc(void)
{
struct event *ev;
/* Reinstall our signal handler. */
TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
if((int)signal(EVENT_SIGNAL(ev), signal_handler) == -1)
return (-1);
}
return (0);
}
void
signal_process(void)
{
struct event *ev;
short ncalls;
TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
ncalls = evsigcaught[EVENT_SIGNAL(ev)];
if (ncalls) {
if (!(ev->ev_events & EV_PERSIST))
event_del(ev);
event_active(ev, EV_SIGNAL, ncalls);
}
}
memset(evsigcaught, 0, sizeof(evsigcaught));
signal_caught = 0;
}
#endif

455
extra/libevent/buffer.c Normal file
View File

@ -0,0 +1,455 @@
/*
* Copyright (c) 2002, 2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#include <winsock2.h>
#include <windows.h>
#endif
#ifdef HAVE_VASPRINTF
/* If we have vasprintf, we need to define this before we include stdio.h. */
#define _GNU_SOURCE
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "event.h"
#include "config.h"
struct evbuffer *
evbuffer_new(void)
{
struct evbuffer *buffer;
buffer = calloc(1, sizeof(struct evbuffer));
return (buffer);
}
void
evbuffer_free(struct evbuffer *buffer)
{
if (buffer->orig_buffer != NULL)
free(buffer->orig_buffer);
free(buffer);
}
/*
* This is a destructive add. The data from one buffer moves into
* the other buffer.
*/
#define SWAP(x,y) do { \
(x)->buffer = (y)->buffer; \
(x)->orig_buffer = (y)->orig_buffer; \
(x)->misalign = (y)->misalign; \
(x)->totallen = (y)->totallen; \
(x)->off = (y)->off; \
} while (0)
int
evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
{
int res;
/* Short cut for better performance */
if (outbuf->off == 0) {
struct evbuffer tmp;
size_t oldoff = inbuf->off;
/* Swap them directly */
SWAP(&tmp, outbuf);
SWAP(outbuf, inbuf);
SWAP(inbuf, &tmp);
/*
* Optimization comes with a price; we need to notify the
* buffer if necessary of the changes. oldoff is the amount
* of data that we transfered from inbuf to outbuf
*/
if (inbuf->off != oldoff && inbuf->cb != NULL)
(*inbuf->cb)(inbuf, oldoff, inbuf->off, inbuf->cbarg);
if (oldoff && outbuf->cb != NULL)
(*outbuf->cb)(outbuf, 0, oldoff, outbuf->cbarg);
return (0);
}
res = evbuffer_add(outbuf, inbuf->buffer, inbuf->off);
if (res == 0) {
/* We drain the input buffer on success */
evbuffer_drain(inbuf, inbuf->off);
}
return (res);
}
int
evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
{
char *buffer;
size_t space;
size_t oldoff = buf->off;
int sz;
va_list aq;
/* make sure that at least some space is available */
evbuffer_expand(buf, 64);
for (;;) {
size_t used = buf->misalign + buf->off;
buffer = (char *)buf->buffer + buf->off;
assert(buf->totallen >= used);
space = buf->totallen - used;
#ifndef va_copy
#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
#endif
va_copy(aq, ap);
#ifdef WIN32
sz = _vsnprintf(buffer, space - 1, fmt, aq);
buffer[space - 1] = '\0';
#else
sz = vsnprintf(buffer, space, fmt, aq);
#endif
va_end(aq);
if (sz < 0)
return (-1);
if ((size_t)sz < space) {
buf->off += sz;
if (buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (sz);
}
if (evbuffer_expand(buf, sz + 1) == -1)
return (-1);
}
/* NOTREACHED */
}
int
evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
{
int res = -1;
va_list ap;
va_start(ap, fmt);
res = evbuffer_add_vprintf(buf, fmt, ap);
va_end(ap);
return (res);
}
/* Reads data from an event buffer and drains the bytes read */
int
evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen)
{
size_t nread = datlen;
if (nread >= buf->off)
nread = buf->off;
memcpy(data, buf->buffer, nread);
evbuffer_drain(buf, nread);
return (int)(nread);
}
/*
* Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
* The returned buffer needs to be freed by the called.
*/
char *
evbuffer_readline(struct evbuffer *buffer)
{
u_char *data = EVBUFFER_DATA(buffer);
size_t len = EVBUFFER_LENGTH(buffer);
char *line;
unsigned int i;
for (i = 0; i < len; i++) {
if (data[i] == '\r' || data[i] == '\n')
break;
}
if (i == len)
return (NULL);
if ((line = malloc(i + 1)) == NULL) {
fprintf(stderr, "%s: out of memory\n", __func__);
evbuffer_drain(buffer, i);
return (NULL);
}
memcpy(line, data, i);
line[i] = '\0';
/*
* Some protocols terminate a line with '\r\n', so check for
* that, too.
*/
if ( i < len - 1 ) {
char fch = data[i], sch = data[i+1];
/* Drain one more character if needed */
if ( (sch == '\r' || sch == '\n') && sch != fch )
i += 1;
}
evbuffer_drain(buffer, i + 1);
return (line);
}
/* Adds data to an event buffer */
static void
evbuffer_align(struct evbuffer *buf)
{
memmove(buf->orig_buffer, buf->buffer, buf->off);
buf->buffer = buf->orig_buffer;
buf->misalign = 0;
}
/* Expands the available space in the event buffer to at least datlen */
int
evbuffer_expand(struct evbuffer *buf, size_t datlen)
{
size_t need = buf->misalign + buf->off + datlen;
/* If we can fit all the data, then we don't have to do anything */
if (buf->totallen >= need)
return (0);
/*
* If the misalignment fulfills our data needs, we just force an
* alignment to happen. Afterwards, we have enough space.
*/
if (buf->misalign >= datlen) {
evbuffer_align(buf);
} else {
void *newbuf;
size_t length = buf->totallen;
if (length < 256)
length = 256;
while (length < need)
length <<= 1;
if (buf->orig_buffer != buf->buffer)
evbuffer_align(buf);
if ((newbuf = realloc(buf->buffer, length)) == NULL)
return (-1);
buf->orig_buffer = buf->buffer = newbuf;
buf->totallen = length;
}
return (0);
}
int
evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen)
{
size_t need = buf->misalign + buf->off + datlen;
size_t oldoff = buf->off;
if (buf->totallen < need) {
if (evbuffer_expand(buf, datlen) == -1)
return (-1);
}
memcpy(buf->buffer + buf->off, data, datlen);
buf->off += datlen;
if (datlen && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (0);
}
void
evbuffer_drain(struct evbuffer *buf, size_t len)
{
size_t oldoff = buf->off;
if (len >= buf->off) {
buf->off = 0;
buf->buffer = buf->orig_buffer;
buf->misalign = 0;
goto done;
}
buf->buffer += len;
buf->misalign += len;
buf->off -= len;
done:
/* Tell someone about changes in this buffer */
if (buf->off != oldoff && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
}
/*
* Reads data from a file descriptor into a buffer.
*/
#define EVBUFFER_MAX_READ 4096
int
evbuffer_read(struct evbuffer *buf, int fd, int howmuch)
{
u_char *p;
size_t oldoff = buf->off;
int n = EVBUFFER_MAX_READ;
#if defined(FIONREAD)
#ifdef WIN32
long lng = (long)n;
if (ioctlsocket(fd, FIONREAD, &lng) == -1 || (n=lng) == 0) {
#else
if (ioctl(fd, FIONREAD, &n) == -1 || n == 0) {
#endif
n = EVBUFFER_MAX_READ;
} else if (n > EVBUFFER_MAX_READ && n > howmuch) {
/*
* It's possible that a lot of data is available for
* reading. We do not want to exhaust resources
* before the reader has a chance to do something
* about it. If the reader does not tell us how much
* data we should read, we artifically limit it.
*/
if ((size_t)n > (buf->totallen << 2))
n = (int)(buf->totallen << 2);
if (n < EVBUFFER_MAX_READ)
n = EVBUFFER_MAX_READ;
}
#endif
if (howmuch < 0 || howmuch > n)
howmuch = n;
/* If we don't have FIONREAD, we might waste some space here */
if (evbuffer_expand(buf, howmuch) == -1)
return (-1);
/* We can append new data at this point */
p = buf->buffer + buf->off;
#ifndef WIN32
n = read(fd, p, howmuch);
#else
n = recv(fd, p, howmuch, 0);
#endif
if (n == -1)
return (-1);
if (n == 0)
return (0);
buf->off += n;
/* Tell someone about changes in this buffer */
if (buf->off != oldoff && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (n);
}
int
evbuffer_write(struct evbuffer *buffer, int fd)
{
int n;
#ifndef WIN32
n = write(fd, buffer->buffer, buffer->off);
#else
n = send(fd, buffer->buffer, (int)buffer->off, 0);
#endif
if (n == -1)
return (-1);
if (n == 0)
return (0);
evbuffer_drain(buffer, n);
return (n);
}
u_char *
evbuffer_find(struct evbuffer *buffer, const u_char *what, size_t len)
{
u_char *search = buffer->buffer, *end = search + buffer->off;
u_char *p;
while (search < end &&
(p = memchr(search, *what, end - search)) != NULL) {
if (p + len > end)
break;
if (memcmp(p, what, len) == 0)
return (p);
search = p + 1;
}
return (NULL);
}
void evbuffer_setcb(struct evbuffer *buffer,
void (*cb)(struct evbuffer *, size_t, size_t, void *),
void *cbarg)
{
buffer->cb = cb;
buffer->cbarg = cbarg;
}

View File

@ -0,0 +1,163 @@
/* $OpenBSD: time.h,v 1.11 2000/10/10 13:36:48 itojun Exp $ */
/* $NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $ */
/*
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)time.h 8.2 (Berkeley) 7/10/94
*/
#ifndef _SYS_TIME_H_
#define _SYS_TIME_H_
#include <sys/types.h>
/*
* Structure returned by gettimeofday(2) system call,
* and used in other calls.
*/
struct timeval {
long tv_sec; /* seconds */
long tv_usec; /* and microseconds */
};
/*
* Structure defined by POSIX.1b to be like a timeval.
*/
struct timespec {
time_t tv_sec; /* seconds */
long tv_nsec; /* and nanoseconds */
};
#define TIMEVAL_TO_TIMESPEC(tv, ts) { \
(ts)->tv_sec = (tv)->tv_sec; \
(ts)->tv_nsec = (tv)->tv_usec * 1000; \
}
#define TIMESPEC_TO_TIMEVAL(tv, ts) { \
(tv)->tv_sec = (ts)->tv_sec; \
(tv)->tv_usec = (ts)->tv_nsec / 1000; \
}
struct timezone {
int tz_minuteswest; /* minutes west of Greenwich */
int tz_dsttime; /* type of dst correction */
};
#define DST_NONE 0 /* not on dst */
#define DST_USA 1 /* USA style dst */
#define DST_AUST 2 /* Australian style dst */
#define DST_WET 3 /* Western European dst */
#define DST_MET 4 /* Middle European dst */
#define DST_EET 5 /* Eastern European dst */
#define DST_CAN 6 /* Canada */
/* Operations on timevals. */
#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#define timercmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? \
((tvp)->tv_usec cmp (uvp)->tv_usec) : \
((tvp)->tv_sec cmp (uvp)->tv_sec))
#define timeradd(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
if ((vvp)->tv_usec >= 1000000) { \
(vvp)->tv_sec++; \
(vvp)->tv_usec -= 1000000; \
} \
} while (0)
#define timersub(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
if ((vvp)->tv_usec < 0) { \
(vvp)->tv_sec--; \
(vvp)->tv_usec += 1000000; \
} \
} while (0)
/* Operations on timespecs. */
#define timespecclear(tsp) (tsp)->tv_sec = (tsp)->tv_nsec = 0
#define timespecisset(tsp) ((tsp)->tv_sec || (tsp)->tv_nsec)
#define timespeccmp(tsp, usp, cmp) \
(((tsp)->tv_sec == (usp)->tv_sec) ? \
((tsp)->tv_nsec cmp (usp)->tv_nsec) : \
((tsp)->tv_sec cmp (usp)->tv_sec))
#define timespecadd(tsp, usp, vsp) \
do { \
(vsp)->tv_sec = (tsp)->tv_sec + (usp)->tv_sec; \
(vsp)->tv_nsec = (tsp)->tv_nsec + (usp)->tv_nsec; \
if ((vsp)->tv_nsec >= 1000000000L) { \
(vsp)->tv_sec++; \
(vsp)->tv_nsec -= 1000000000L; \
} \
} while (0)
#define timespecsub(tsp, usp, vsp) \
do { \
(vsp)->tv_sec = (tsp)->tv_sec - (usp)->tv_sec; \
(vsp)->tv_nsec = (tsp)->tv_nsec - (usp)->tv_nsec; \
if ((vsp)->tv_nsec < 0) { \
(vsp)->tv_sec--; \
(vsp)->tv_nsec += 1000000000L; \
} \
} while (0)
/*
* Names of the interval timers, and structure
* defining a timer setting.
*/
#define ITIMER_REAL 0
#define ITIMER_VIRTUAL 1
#define ITIMER_PROF 2
struct itimerval {
struct timeval it_interval; /* timer interval */
struct timeval it_value; /* current value */
};
/*
* Getkerninfo clock information structure
*/
struct clockinfo {
int hz; /* clock frequency */
int tick; /* micro-seconds per hz tick */
int tickadj; /* clock skew rate for adjtime() */
int stathz; /* statistics clock frequency */
int profhz; /* profiling clock frequency */
};
#define CLOCK_REALTIME 0
#define CLOCK_VIRTUAL 1
#define CLOCK_PROF 2
#define TIMER_RELTIME 0x0 /* relative timer */
#define TIMER_ABSTIME 0x1 /* absolute timer */
/* --- stuff got cut here - niels --- */
#endif /* !_SYS_TIME_H_ */

View File

@ -0,0 +1,488 @@
/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* Singly-linked List definitions.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifndef WIN32
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
#endif
/*
* Singly-linked List access methods.
*/
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_END(head) NULL
#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_FOREACH(var, head, field) \
for((var) = SLIST_FIRST(head); \
(var) != SLIST_END(head); \
(var) = SLIST_NEXT(var, field))
/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) { \
SLIST_FIRST(head) = SLIST_END(head); \
}
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (0)
/*
* List definitions.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List access methods
*/
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_END(head) NULL
#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_FOREACH(var, head, field) \
for((var) = LIST_FIRST(head); \
(var)!= LIST_END(head); \
(var) = LIST_NEXT(var, field))
/*
* List functions.
*/
#define LIST_INIT(head) do { \
LIST_FIRST(head) = LIST_END(head); \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (0)
#define LIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (0)
#define LIST_REPLACE(elm, elm2, field) do { \
if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
(elm2)->field.le_next->field.le_prev = \
&(elm2)->field.le_next; \
(elm2)->field.le_prev = (elm)->field.le_prev; \
*(elm2)->field.le_prev = (elm2); \
} while (0)
/*
* Simple queue definitions.
*/
#define SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue access methods.
*/
#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define SIMPLEQ_END(head) NULL
#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
#define SIMPLEQ_FOREACH(var, head, field) \
for((var) = SIMPLEQ_FIRST(head); \
(var) != SIMPLEQ_END(head); \
(var) = SIMPLEQ_NEXT(var, field))
/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (0)
#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (0)
#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
/*
* Tail queue definitions.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* tail queue access methods
*/
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
/* XXX */
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_EMPTY(head) \
(TAILQ_FIRST(head) == TAILQ_END(head))
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_FOREACH_REVERSE(var, head, field, headname) \
for((var) = TAILQ_LAST(head, headname); \
(var) != TAILQ_END(head); \
(var) = TAILQ_PREV(var, headname, field))
/*
* Tail queue functions.
*/
#define TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (0)
#define TAILQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
(elm2)->field.tqe_next->field.tqe_prev = \
&(elm2)->field.tqe_next; \
else \
(head)->tqh_last = &(elm2)->field.tqe_next; \
(elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
*(elm2)->field.tqe_prev = (elm2); \
} while (0)
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue access methods
*/
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_END(head) ((void *)(head))
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_EMPTY(head) \
(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
#define CIRCLEQ_FOREACH(var, head, field) \
for((var) = CIRCLEQ_FIRST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_NEXT(var, field))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for((var) = CIRCLEQ_LAST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_PREV(var, field))
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
(head)->cqh_first = CIRCLEQ_END(head); \
(head)->cqh_last = CIRCLEQ_END(head); \
} while (0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = CIRCLEQ_END(head); \
if ((head)->cqh_last == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (0)
#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
CIRCLEQ_END(head)) \
(head).cqh_last = (elm2); \
else \
(elm2)->field.cqe_next->field.cqe_prev = (elm2); \
if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
CIRCLEQ_END(head)) \
(head).cqh_first = (elm2); \
else \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

View File

@ -0,0 +1,677 @@
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
/*
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SYS_TREE_H_
#define _SYS_TREE_H_
/*
* This file defines data structures for different types of trees:
* splay trees and red-black trees.
*
* A splay tree is a self-organizing data structure. Every operation
* on the tree causes a splay to happen. The splay moves the requested
* node to the root of the tree and partly rebalances it.
*
* This has the benefit that request locality causes faster lookups as
* the requested nodes move to the top of the tree. On the other hand,
* every lookup causes memory writes.
*
* The Balance Theorem bounds the total access time for m operations
* and n inserts on an initially empty tree as O((m + n)lg n). The
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#define SPLAY_HEAD(name, type) \
struct name { \
struct type *sph_root; /* root of the tree */ \
}
#define SPLAY_INITIALIZER(root) \
{ NULL }
#define SPLAY_INIT(root) do { \
(root)->sph_root = NULL; \
} while (0)
#define SPLAY_ENTRY(type) \
struct { \
struct type *spe_left; /* left element */ \
struct type *spe_right; /* right element */ \
}
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
#define SPLAY_ROOT(head) (head)->sph_root
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (0)
#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (0)
#define SPLAY_LINKLEFT(head, tmp, field) do { \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
} while (0)
#define SPLAY_LINKRIGHT(head, tmp, field) do { \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} while (0)
#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
} while (0)
/* Generates prototypes and inline functions */
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
void name##_SPLAY(struct name *, struct type *); \
void name##_SPLAY_MINMAX(struct name *, int); \
struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
\
/* Finds the node with the same key as elm */ \
static __inline struct type * \
name##_SPLAY_FIND(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) \
return(NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) \
return (head->sph_root); \
return (NULL); \
} \
\
static __inline struct type * \
name##_SPLAY_NEXT(struct name *head, struct type *elm) \
{ \
name##_SPLAY(head, elm); \
if (SPLAY_RIGHT(elm, field) != NULL) { \
elm = SPLAY_RIGHT(elm, field); \
while (SPLAY_LEFT(elm, field) != NULL) { \
elm = SPLAY_LEFT(elm, field); \
} \
} else \
elm = NULL; \
return (elm); \
} \
\
static __inline struct type * \
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
* Moves node close to the key of elm to top
*/
#define SPLAY_GENERATE(name, type, field, cmp) \
struct type * \
name##_SPLAY_INSERT(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) { \
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
} else { \
int __comp; \
name##_SPLAY(head, elm); \
__comp = (cmp)(elm, (head)->sph_root); \
if(__comp < 0) { \
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
SPLAY_LEFT((head)->sph_root, field) = NULL; \
} else if (__comp > 0) { \
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
SPLAY_LEFT(elm, field) = (head)->sph_root; \
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
} else \
return ((head)->sph_root); \
} \
(head)->sph_root = (elm); \
return (NULL); \
} \
\
struct type * \
name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *__tmp; \
if (SPLAY_EMPTY(head)) \
return (NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) { \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
} else { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
name##_SPLAY(head, elm); \
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
} \
return (elm); \
} \
return (NULL); \
} \
\
void \
name##_SPLAY(struct name *head, struct type *elm) \
{ \
struct type __node, *__left, *__right, *__tmp; \
int __comp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while ((__comp = (cmp)(elm, (head)->sph_root))) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) > 0){ \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
} \
\
/* Splay with either the minimum or the maximum element \
* Used to find minimum or maximum element in tree. \
*/ \
void name##_SPLAY_MINMAX(struct name *head, int __comp) \
{ \
struct type __node, *__left, *__right, *__tmp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while (1) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
}
#define SPLAY_NEGINF -1
#define SPLAY_INF 1
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
#define SPLAY_FOREACH(x, name, head) \
for ((x) = SPLAY_MIN(name, head); \
(x) != NULL; \
(x) = SPLAY_NEXT(name, head, x))
/* Macros that define a red-back tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
} while (0)
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) (elm)->field.rbe_left
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
#define RB_COLOR(elm, field) (elm)->field.rbe_color
#define RB_ROOT(head) (head)->rbh_root
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (0)
#ifndef RB_AUGMENT
#define RB_AUGMENT(x)
#endif
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
void name##_RB_INSERT_COLOR(struct name *, struct type *); \
void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
struct type *name##_RB_REMOVE(struct name *, struct type *); \
struct type *name##_RB_INSERT(struct name *, struct type *); \
struct type *name##_RB_FIND(struct name *, struct type *); \
struct type *name##_RB_NEXT(struct type *); \
struct type *name##_RB_MINMAX(struct name *, int); \
\
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
} \
\
void \
name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
struct type *oleft; \
if ((oleft = RB_LEFT(tmp, field)))\
RB_COLOR(oleft, field) = RB_BLACK;\
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, field);\
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
RB_ROTATE_LEFT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
struct type *oright; \
if ((oright = RB_RIGHT(tmp, field)))\
RB_COLOR(oright, field) = RB_BLACK;\
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright, field);\
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
RB_ROTATE_RIGHT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
} \
\
struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field))) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old)\
RB_LEFT(RB_PARENT(old, field), field) = elm;\
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm;\
RB_AUGMENT(RB_PARENT(old, field)); \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
do { \
RB_AUGMENT(left); \
} while ((left = RB_PARENT(left, field))); \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return (old); \
} \
\
/* Inserts a node into the RB tree */ \
struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return (NULL); \
} \
\
/* Finds the node with the same key as elm */ \
struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (NULL); \
} \
\
struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return (parent); \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#endif /* _SYS_TREE_H_ */

421
extra/libevent/devpoll.c Normal file
View File

@ -0,0 +1,421 @@
/*
* Copyright 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_DEVPOLL
#include <sys/types.h>
#include <sys/resource.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <sys/queue.h>
#include <sys/devpoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <assert.h>
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
/* due to limitations in the devpoll interface, we need to keep track of
* all file descriptors outself.
*/
struct evdevpoll {
struct event *evread;
struct event *evwrite;
};
struct devpollop {
struct evdevpoll *fds;
int nfds;
struct pollfd *events;
int nevents;
int dpfd;
struct pollfd *changes;
int nchanges;
};
static void *devpoll_init (struct event_base *);
static int devpoll_add (void *, struct event *);
static int devpoll_del (void *, struct event *);
static int devpoll_dispatch (struct event_base *, void *, struct timeval *);
static void devpoll_dealloc (struct event_base *, void *);
struct eventop devpollops = {
"devpoll",
devpoll_init,
devpoll_add,
devpoll_del,
devpoll_dispatch,
devpoll_dealloc,
1 /* need reinit */
};
#define NEVENT 32000
static int
devpoll_commit(struct devpollop *devpollop)
{
/*
* Due to a bug in Solaris, we have to use pwrite with an offset of 0.
* Write is limited to 2GB of data, until it will fail.
*/
if (pwrite(devpollop->dpfd, devpollop->changes,
sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
return(-1);
devpollop->nchanges = 0;
return(0);
}
static int
devpoll_queue(struct devpollop *devpollop, int fd, int events) {
struct pollfd *pfd;
if (devpollop->nchanges >= devpollop->nevents) {
/*
* Change buffer is full, must commit it to /dev/poll before
* adding more
*/
if (devpoll_commit(devpollop) != 0)
return(-1);
}
pfd = &devpollop->changes[devpollop->nchanges++];
pfd->fd = fd;
pfd->events = events;
pfd->revents = 0;
return(0);
}
static void *
devpoll_init(struct event_base *base)
{
int dpfd, nfiles = NEVENT;
struct rlimit rl;
struct devpollop *devpollop;
/* Disable devpoll when this environment variable is set */
if (getenv("EVENT_NODEVPOLL"))
return (NULL);
if (!(devpollop = calloc(1, sizeof(struct devpollop))))
return (NULL);
if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
rl.rlim_cur != RLIM_INFINITY)
nfiles = rl.rlim_cur - 1;
/* Initialize the kernel queue */
if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
event_warn("open: /dev/poll");
free(devpollop);
return (NULL);
}
devpollop->dpfd = dpfd;
/* Initialize fields */
devpollop->events = calloc(nfiles, sizeof(struct pollfd));
if (devpollop->events == NULL) {
free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nevents = nfiles;
devpollop->fds = calloc(nfiles, sizeof(struct evdevpoll));
if (devpollop->fds == NULL) {
free(devpollop->events);
free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nfds = nfiles;
devpollop->changes = calloc(nfiles, sizeof(struct pollfd));
if (devpollop->changes == NULL) {
free(devpollop->fds);
free(devpollop->events);
free(devpollop);
close(dpfd);
return (NULL);
}
evsignal_init(base);
return (devpollop);
}
static int
devpoll_recalc(struct event_base *base, void *arg, int max)
{
struct devpollop *devpollop = arg;
if (max > devpollop->nfds) {
struct evdevpoll *fds;
int nfds;
nfds = devpollop->nfds;
while (nfds < max)
nfds <<= 1;
fds = realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
if (fds == NULL) {
event_warn("realloc");
return (-1);
}
devpollop->fds = fds;
memset(fds + devpollop->nfds, 0,
(nfds - devpollop->nfds) * sizeof(struct evdevpoll));
devpollop->nfds = nfds;
}
return (0);
}
static int
devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct devpollop *devpollop = arg;
struct pollfd *events = devpollop->events;
struct dvpoll dvp;
struct evdevpoll *evdp;
int i, res, timeout = -1;
if (devpollop->nchanges)
devpoll_commit(devpollop);
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
dvp.dp_fds = devpollop->events;
dvp.dp_nfds = devpollop->nevents;
dvp.dp_timeout = timeout;
res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
if (res == -1) {
if (errno != EINTR) {
event_warn("ioctl: DP_POLL");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: devpoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
int what = events[i].revents;
struct event *evread = NULL, *evwrite = NULL;
assert(events[i].fd < devpollop->nfds);
evdp = &devpollop->fds[events[i].fd];
if (what & POLLHUP)
what |= POLLIN | POLLOUT;
else if (what & POLLERR)
what |= POLLIN | POLLOUT;
if (what & POLLIN) {
evread = evdp->evread;
which |= EV_READ;
}
if (what & POLLOUT) {
evwrite = evdp->evwrite;
which |= EV_WRITE;
}
if (!which)
continue;
if (evread != NULL && !(evread->ev_events & EV_PERSIST))
event_del(evread);
if (evwrite != NULL && evwrite != evread &&
!(evwrite->ev_events & EV_PERSIST))
event_del(evwrite);
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
}
return (0);
}
static int
devpoll_add(void *arg, struct event *ev)
{
struct devpollop *devpollop = arg;
struct evdevpoll *evdp;
int fd, events;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
fd = ev->ev_fd;
if (fd >= devpollop->nfds) {
/* Extend the file descriptor array as necessary */
if (devpoll_recalc(ev->ev_base, devpollop, fd) == -1)
return (-1);
}
evdp = &devpollop->fds[fd];
/*
* It's not necessary to OR the existing read/write events that we
* are currently interested in with the new event we are adding.
* The /dev/poll driver ORs any new events with the existing events
* that it has cached for the fd.
*/
events = 0;
if (ev->ev_events & EV_READ) {
if (evdp->evread && evdp->evread != ev) {
/* There is already a different read event registered */
return(-1);
}
events |= POLLIN;
}
if (ev->ev_events & EV_WRITE) {
if (evdp->evwrite && evdp->evwrite != ev) {
/* There is already a different write event registered */
return(-1);
}
events |= POLLOUT;
}
if (devpoll_queue(devpollop, fd, events) != 0)
return(-1);
/* Update events responsible */
if (ev->ev_events & EV_READ)
evdp->evread = ev;
if (ev->ev_events & EV_WRITE)
evdp->evwrite = ev;
return (0);
}
static int
devpoll_del(void *arg, struct event *ev)
{
struct devpollop *devpollop = arg;
struct evdevpoll *evdp;
int fd, events;
int needwritedelete = 1, needreaddelete = 1;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
fd = ev->ev_fd;
if (fd >= devpollop->nfds)
return (0);
evdp = &devpollop->fds[fd];
events = 0;
if (ev->ev_events & EV_READ)
events |= POLLIN;
if (ev->ev_events & EV_WRITE)
events |= POLLOUT;
/*
* The only way to remove an fd from the /dev/poll monitored set is
* to use POLLREMOVE by itself. This removes ALL events for the fd
* provided so if we care about two events and are only removing one
* we must re-add the other event after POLLREMOVE.
*/
if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
return(-1);
if ((events & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
/*
* We're not deleting all events, so we must resubmit the
* event that we are still interested in if one exists.
*/
if ((events & POLLIN) && evdp->evwrite != NULL) {
/* Deleting read, still care about write */
devpoll_queue(devpollop, fd, POLLOUT);
needwritedelete = 0;
} else if ((events & POLLOUT) && evdp->evread != NULL) {
/* Deleting write, still care about read */
devpoll_queue(devpollop, fd, POLLIN);
needreaddelete = 0;
}
}
if (needreaddelete)
evdp->evread = NULL;
if (needwritedelete)
evdp->evwrite = NULL;
return (0);
}
static void
devpoll_dealloc(struct event_base *base, void *arg)
{
struct devpollop *devpollop = arg;
evsignal_dealloc(base);
if (devpollop->fds)
free(devpollop->fds);
if (devpollop->events)
free(devpollop->events);
if (devpollop->changes)
free(devpollop->changes);
if (devpollop->dpfd >= 0)
close(devpollop->dpfd);
memset(devpollop, 0, sizeof(struct devpollop));
free(devpollop);
}
#endif /* HAVE_DEVPOLL */

359
extra/libevent/epoll.c Normal file
View File

@ -0,0 +1,359 @@
/*
* Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_EPOLL
#include <stdint.h>
#include <sys/types.h>
#include <sys/resource.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <sys/queue.h>
#include <sys/epoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
/* due to limitations in the epoll interface, we need to keep track of
* all file descriptors outself.
*/
struct evepoll {
struct event *evread;
struct event *evwrite;
};
struct epollop {
struct evepoll *fds;
int nfds;
struct epoll_event *events;
int nevents;
int epfd;
};
static void *epoll_init (struct event_base *);
static int epoll_add (void *, struct event *);
static int epoll_del (void *, struct event *);
static int epoll_dispatch (struct event_base *, void *, struct timeval *);
static void epoll_dealloc (struct event_base *, void *);
struct eventop epollops = {
"epoll",
epoll_init,
epoll_add,
epoll_del,
epoll_dispatch,
epoll_dealloc,
1 /* need reinit */
};
#ifdef HAVE_SETFD
#define FD_CLOSEONEXEC(x) do { \
if (fcntl(x, F_SETFD, 1) == -1) \
event_warn("fcntl(%d, F_SETFD)", x); \
} while (0)
#else
#define FD_CLOSEONEXEC(x)
#endif
#define NEVENT 32000
static void *
epoll_init(struct event_base *base)
{
int epfd, nfiles = NEVENT;
struct rlimit rl;
struct epollop *epollop;
/* Disable epollueue when this environment variable is set */
if (getenv("EVENT_NOEPOLL"))
return (NULL);
if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
rl.rlim_cur != RLIM_INFINITY) {
/*
* Solaris is somewhat retarded - it's important to drop
* backwards compatibility when making changes. So, don't
* dare to put rl.rlim_cur here.
*/
nfiles = rl.rlim_cur - 1;
}
/* Initalize the kernel queue */
if ((epfd = epoll_create(nfiles)) == -1) {
event_warn("epoll_create");
return (NULL);
}
FD_CLOSEONEXEC(epfd);
if (!(epollop = calloc(1, sizeof(struct epollop))))
return (NULL);
epollop->epfd = epfd;
/* Initalize fields */
epollop->events = malloc(nfiles * sizeof(struct epoll_event));
if (epollop->events == NULL) {
free(epollop);
return (NULL);
}
epollop->nevents = nfiles;
epollop->fds = calloc(nfiles, sizeof(struct evepoll));
if (epollop->fds == NULL) {
free(epollop->events);
free(epollop);
return (NULL);
}
epollop->nfds = nfiles;
evsignal_init(base);
return (epollop);
}
static int
epoll_recalc(struct event_base *base, void *arg, int max)
{
struct epollop *epollop = arg;
if (max > epollop->nfds) {
struct evepoll *fds;
int nfds;
nfds = epollop->nfds;
while (nfds < max)
nfds <<= 1;
fds = realloc(epollop->fds, nfds * sizeof(struct evepoll));
if (fds == NULL) {
event_warn("realloc");
return (-1);
}
epollop->fds = fds;
memset(fds + epollop->nfds, 0,
(nfds - epollop->nfds) * sizeof(struct evepoll));
epollop->nfds = nfds;
}
return (0);
}
static int
epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct epollop *epollop = arg;
struct epoll_event *events = epollop->events;
struct evepoll *evep;
int i, res, timeout = -1;
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
if (res == -1) {
if (errno != EINTR) {
event_warn("epoll_wait");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: epoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int what = events[i].events;
struct event *evread = NULL, *evwrite = NULL;
evep = (struct evepoll *)events[i].data.ptr;
if (what & (EPOLLHUP|EPOLLERR)) {
evread = evep->evread;
evwrite = evep->evwrite;
} else {
if (what & EPOLLIN) {
evread = evep->evread;
}
if (what & EPOLLOUT) {
evwrite = evep->evwrite;
}
}
if (!(evread||evwrite))
continue;
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
}
return (0);
}
static int
epoll_add(void *arg, struct event *ev)
{
struct epollop *epollop = arg;
struct epoll_event epev = {0, {0}};
struct evepoll *evep;
int fd, op, events;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
fd = ev->ev_fd;
if (fd >= epollop->nfds) {
/* Extent the file descriptor array as necessary */
if (epoll_recalc(ev->ev_base, epollop, fd) == -1)
return (-1);
}
evep = &epollop->fds[fd];
op = EPOLL_CTL_ADD;
events = 0;
if (evep->evread != NULL) {
events |= EPOLLIN;
op = EPOLL_CTL_MOD;
}
if (evep->evwrite != NULL) {
events |= EPOLLOUT;
op = EPOLL_CTL_MOD;
}
if (ev->ev_events & EV_READ)
events |= EPOLLIN;
if (ev->ev_events & EV_WRITE)
events |= EPOLLOUT;
epev.data.ptr = evep;
epev.events = events;
if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
return (-1);
/* Update events responsible */
if (ev->ev_events & EV_READ)
evep->evread = ev;
if (ev->ev_events & EV_WRITE)
evep->evwrite = ev;
return (0);
}
static int
epoll_del(void *arg, struct event *ev)
{
struct epollop *epollop = arg;
struct epoll_event epev = {0, {0}};
struct evepoll *evep;
int fd, events, op;
int needwritedelete = 1, needreaddelete = 1;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
fd = ev->ev_fd;
if (fd >= epollop->nfds)
return (0);
evep = &epollop->fds[fd];
op = EPOLL_CTL_DEL;
events = 0;
if (ev->ev_events & EV_READ)
events |= EPOLLIN;
if (ev->ev_events & EV_WRITE)
events |= EPOLLOUT;
if ((events & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
if ((events & EPOLLIN) && evep->evwrite != NULL) {
needwritedelete = 0;
events = EPOLLOUT;
op = EPOLL_CTL_MOD;
} else if ((events & EPOLLOUT) && evep->evread != NULL) {
needreaddelete = 0;
events = EPOLLIN;
op = EPOLL_CTL_MOD;
}
}
epev.events = events;
epev.data.ptr = evep;
if (needreaddelete)
evep->evread = NULL;
if (needwritedelete)
evep->evwrite = NULL;
if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
return (-1);
return (0);
}
static void
epoll_dealloc(struct event_base *base, void *arg)
{
struct epollop *epollop = arg;
evsignal_dealloc(base);
if (epollop->fds)
free(epollop->fds);
if (epollop->events)
free(epollop->events);
if (epollop->epfd >= 0)
close(epollop->epfd);
memset(epollop, 0, sizeof(struct epollop));
free(epollop);
}
#endif /* HAVE_EPOLL */

View File

@ -0,0 +1,60 @@
/*
* Copyright 2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_EPOLL
#include <stdint.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/epoll.h>
#include <unistd.h>
int
epoll_create(int size)
{
return (syscall(__NR_epoll_create, size));
}
int
epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
{
return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
}
int
epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
}
#endif /* HAVE_EPOLL */

418
extra/libevent/evbuffer.c Normal file
View File

@ -0,0 +1,418 @@
/*
* Copyright (c) 2002-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include "evutil.h"
#include "event.h"
/* prototypes */
void bufferevent_setwatermark(struct bufferevent *, short, size_t, size_t);
void bufferevent_read_pressure_cb(struct evbuffer *, size_t, size_t, void *);
static int
bufferevent_add(struct event *ev, int timeout)
{
struct timeval tv, *ptv = NULL;
if (timeout) {
evutil_timerclear(&tv);
tv.tv_sec = timeout;
ptv = &tv;
}
return (event_add(ev, ptv));
}
/*
* This callback is executed when the size of the input buffer changes.
* We use it to apply back pressure on the reading side.
*/
void
bufferevent_read_pressure_cb(struct evbuffer *buf, size_t old, size_t now,
void *arg) {
struct bufferevent *bufev = arg;
/*
* If we are below the watermark then reschedule reading if it's
* still enabled.
*/
if (bufev->wm_read.high == 0 || now < bufev->wm_read.high) {
evbuffer_setcb(buf, NULL, NULL);
if (bufev->enabled & EV_READ)
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
}
}
static void
bufferevent_readcb(int fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
int res = 0;
short what = EVBUFFER_READ;
size_t len;
int howmuch = -1;
if (event == EV_TIMEOUT) {
what |= EVBUFFER_TIMEOUT;
goto error;
}
/*
* If we have a high watermark configured then we don't want to
* read more data than would make us reach the watermark.
*/
if (bufev->wm_read.high != 0)
howmuch = (int)bufev->wm_read.high;
res = evbuffer_read(bufev->input, fd, howmuch);
if (res == -1) {
if (errno == EAGAIN || errno == EINTR)
goto reschedule;
/* error case */
what |= EVBUFFER_ERROR;
} else if (res == 0) {
/* eof case */
what |= EVBUFFER_EOF;
}
if (res <= 0)
goto error;
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
/* See if this callbacks meets the water marks */
len = EVBUFFER_LENGTH(bufev->input);
if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
return;
if (bufev->wm_read.high != 0 && len > bufev->wm_read.high) {
struct evbuffer *buf = bufev->input;
event_del(&bufev->ev_read);
/* Now schedule a callback for us */
evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev);
return;
}
/* Invoke the user callback - must always be called last */
if (bufev->readcb != NULL)
(*bufev->readcb)(bufev, bufev->cbarg);
return;
reschedule:
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
return;
error:
(*bufev->errorcb)(bufev, what, bufev->cbarg);
}
static void
bufferevent_writecb(int fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
int res = 0;
short what = EVBUFFER_WRITE;
if (event == EV_TIMEOUT) {
what |= EVBUFFER_TIMEOUT;
goto error;
}
if (EVBUFFER_LENGTH(bufev->output)) {
res = evbuffer_write(bufev->output, fd);
if (res == -1) {
#ifndef WIN32
/*todo. evbuffer uses WriteFile when WIN32 is set. WIN32 system calls do not
*set errno. thus this error checking is not portable*/
if (errno == EAGAIN ||
errno == EINTR ||
errno == EINPROGRESS)
goto reschedule;
/* error case */
what |= EVBUFFER_ERROR;
#else
goto reschedule;
#endif
} else if (res == 0) {
/* eof case */
what |= EVBUFFER_EOF;
}
if (res <= 0)
goto error;
}
if (EVBUFFER_LENGTH(bufev->output) != 0)
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
/*
* Invoke the user callback if our buffer is drained or below the
* low watermark.
*/
if (bufev->writecb != NULL &&
EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
(*bufev->writecb)(bufev, bufev->cbarg);
return;
reschedule:
if (EVBUFFER_LENGTH(bufev->output) != 0)
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
return;
error:
(*bufev->errorcb)(bufev, what, bufev->cbarg);
}
/*
* Create a new buffered event object.
*
* The read callback is invoked whenever we read new data.
* The write callback is invoked whenever the output buffer is drained.
* The error callback is invoked on a write/read error or on EOF.
*
* Both read and write callbacks maybe NULL. The error callback is not
* allowed to be NULL and have to be provided always.
*/
struct bufferevent *
bufferevent_new(int fd, evbuffercb readcb, evbuffercb writecb,
everrorcb errorcb, void *cbarg)
{
struct bufferevent *bufev;
if ((bufev = calloc(1, sizeof(struct bufferevent))) == NULL)
return (NULL);
if ((bufev->input = evbuffer_new()) == NULL) {
free(bufev);
return (NULL);
}
if ((bufev->output = evbuffer_new()) == NULL) {
evbuffer_free(bufev->input);
free(bufev);
return (NULL);
}
event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
bufev->readcb = readcb;
bufev->writecb = writecb;
bufev->errorcb = errorcb;
bufev->cbarg = cbarg;
/*
* Set to EV_WRITE so that using bufferevent_write is going to
* trigger a callback. Reading needs to be explicitly enabled
* because otherwise no data will be available.
*/
bufev->enabled = EV_WRITE;
return (bufev);
}
int
bufferevent_priority_set(struct bufferevent *bufev, int priority)
{
if (event_priority_set(&bufev->ev_read, priority) == -1)
return (-1);
if (event_priority_set(&bufev->ev_write, priority) == -1)
return (-1);
return (0);
}
/* Closing the file descriptor is the responsibility of the caller */
void
bufferevent_free(struct bufferevent *bufev)
{
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
free(bufev);
}
/*
* Returns 0 on success;
* -1 on failure.
*/
int
bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
{
int res;
res = evbuffer_add(bufev->output, data, size);
if (res == -1)
return (res);
/* If everything is okay, we need to schedule a write */
if (size > 0 && (bufev->enabled & EV_WRITE))
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
return (res);
}
int
bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
int res;
res = bufferevent_write(bufev, buf->buffer, buf->off);
if (res != -1)
evbuffer_drain(buf, buf->off);
return (res);
}
size_t
bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
{
struct evbuffer *buf = bufev->input;
if (buf->off < size)
size = buf->off;
/* Copy the available data to the user buffer */
memcpy(data, buf->buffer, size);
if (size)
evbuffer_drain(buf, size);
return (size);
}
int
bufferevent_enable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (bufferevent_add(&bufev->ev_read, bufev->timeout_read) == -1)
return (-1);
}
if (event & EV_WRITE) {
if (bufferevent_add(&bufev->ev_write, bufev->timeout_write) == -1)
return (-1);
}
bufev->enabled |= event;
return (0);
}
int
bufferevent_disable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (event_del(&bufev->ev_read) == -1)
return (-1);
}
if (event & EV_WRITE) {
if (event_del(&bufev->ev_write) == -1)
return (-1);
}
bufev->enabled &= ~event;
return (0);
}
/*
* Sets the read and write timeout for a buffered event.
*/
void
bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write) {
bufev->timeout_read = timeout_read;
bufev->timeout_write = timeout_write;
}
/*
* Sets the water marks
*/
void
bufferevent_setwatermark(struct bufferevent *bufev, short events,
size_t lowmark, size_t highmark)
{
if (events & EV_READ) {
bufev->wm_read.low = lowmark;
bufev->wm_read.high = highmark;
}
if (events & EV_WRITE) {
bufev->wm_write.low = lowmark;
bufev->wm_write.high = highmark;
}
/* If the watermarks changed then see if we should call read again */
bufferevent_read_pressure_cb(bufev->input,
0, EVBUFFER_LENGTH(bufev->input), bufev);
}
int
bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
{
int res;
res = event_base_set(base, &bufev->ev_read);
if (res == -1)
return (res);
res = event_base_set(base, &bufev->ev_write);
return (res);
}

3150
extra/libevent/evdns.c Normal file

File diff suppressed because it is too large Load Diff

528
extra/libevent/evdns.h Normal file
View File

@ -0,0 +1,528 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The original DNS code is due to Adam Langley with heavy
* modifications by Nick Mathewson. Adam put his DNS software in the
* public domain. You can find his original copyright below. Please,
* aware that the code as part of libevent is governed by the 3-clause
* BSD license above.
*
* This software is Public Domain. To view a copy of the public domain dedication,
* visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
* Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
*
* I ask and expect, but do not require, that all derivative works contain an
* attribution similar to:
* Parts developed by Adam Langley <agl@imperialviolet.org>
*
* You may wish to replace the word "Parts" with something else depending on
* the amount of original code.
*
* (Derivative works does not include programs which link against, run or include
* the source verbatim in their source distributions)
*/
/** @file evdns.h
*
* Welcome, gentle reader
*
* Async DNS lookups are really a whole lot harder than they should be,
* mostly stemming from the fact that the libc resolver has never been
* very good at them. Before you use this library you should see if libc
* can do the job for you with the modern async call getaddrinfo_a
* (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
* please continue.
*
* This code is based on libevent and you must call event_init before
* any of the APIs in this file. You must also seed the OpenSSL random
* source if you are using OpenSSL for ids (see below).
*
* This library is designed to be included and shipped with your source
* code. You statically link with it. You should also test for the
* existence of strtok_r and define HAVE_STRTOK_R if you have it.
*
* The DNS protocol requires a good source of id numbers and these
* numbers should be unpredictable for spoofing reasons. There are
* three methods for generating them here and you must define exactly
* one of them. In increasing order of preference:
*
* DNS_USE_GETTIMEOFDAY_FOR_ID:
* Using the bottom 16 bits of the usec result from gettimeofday. This
* is a pretty poor solution but should work anywhere.
* DNS_USE_CPU_CLOCK_FOR_ID:
* Using the bottom 16 bits of the nsec result from the CPU's time
* counter. This is better, but may not work everywhere. Requires
* POSIX realtime support and you'll need to link against -lrt on
* glibc systems at least.
* DNS_USE_OPENSSL_FOR_ID:
* Uses the OpenSSL RAND_bytes call to generate the data. You must
* have seeded the pool before making any calls to this library.
*
* The library keeps track of the state of nameservers and will avoid
* them when they go down. Otherwise it will round robin between them.
*
* Quick start guide:
* #include "evdns.h"
* void callback(int result, char type, int count, int ttl,
* void *addresses, void *arg);
* evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
* evdns_resolve("www.hostname.com", 0, callback, NULL);
*
* When the lookup is complete the callback function is called. The
* first argument will be one of the DNS_ERR_* defines in evdns.h.
* Hopefully it will be DNS_ERR_NONE, in which case type will be
* DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
* which the data can be cached for (in seconds), addresses will point
* to an array of uint32_t's and arg will be whatever you passed to
* evdns_resolve.
*
* Searching:
*
* In order for this library to be a good replacement for glibc's resolver it
* supports searching. This involves setting a list of default domains, in
* which names will be queried for. The number of dots in the query name
* determines the order in which this list is used.
*
* Searching appears to be a single lookup from the point of view of the API,
* although many DNS queries may be generated from a single call to
* evdns_resolve. Searching can also drastically slow down the resolution
* of names.
*
* To disable searching:
* 1. Never set it up. If you never call evdns_resolv_conf_parse or
* evdns_search_add then no searching will occur.
*
* 2. If you do call evdns_resolv_conf_parse then don't pass
* DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
*
* 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
*
* The order of searches depends on the number of dots in the name. If the
* number is greater than the ndots setting then the names is first tried
* globally. Otherwise each search domain is appended in turn.
*
* The ndots setting can either be set from a resolv.conf, or by calling
* evdns_search_ndots_set.
*
* For example, with ndots set to 1 (the default) and a search domain list of
* ["myhome.net"]:
* Query: www
* Order: www.myhome.net, www.
*
* Query: www.abc
* Order: www.abc., www.abc.myhome.net
*
* Internals:
*
* Requests are kept in two queues. The first is the inflight queue. In
* this queue requests have an allocated transaction id and nameserver.
* They will soon be transmitted if they haven't already been.
*
* The second is the waiting queue. The size of the inflight ring is
* limited and all other requests wait in waiting queue for space. This
* bounds the number of concurrent requests so that we don't flood the
* nameserver. Several algorithms require a full walk of the inflight
* queue and so bounding its size keeps thing going nicely under huge
* (many thousands of requests) loads.
*
* If a nameserver loses too many requests it is considered down and we
* try not to use it. After a while we send a probe to that nameserver
* (a lookup for google.com) and, if it replies, we consider it working
* again. If the nameserver fails a probe we wait longer to try again
* with the next probe.
*/
#ifndef EVENTDNS_H
#define EVENTDNS_H
#ifdef __cplusplus
extern "C" {
#endif
/* For integer types. */
#include <evutil.h>
/** Error codes 0-5 are as described in RFC 1035. */
#define DNS_ERR_NONE 0
/** The name server was unable to interpret the query */
#define DNS_ERR_FORMAT 1
/** The name server was unable to process this query due to a problem with the
* name server */
#define DNS_ERR_SERVERFAILED 2
/** The domain name does not exist */
#define DNS_ERR_NOTEXIST 3
/** The name server does not support the requested kind of query */
#define DNS_ERR_NOTIMPL 4
/** The name server refuses to reform the specified operation for policy
* reasons */
#define DNS_ERR_REFUSED 5
/** The reply was truncated or ill-formated */
#define DNS_ERR_TRUNCATED 65
/** An unknown error occurred */
#define DNS_ERR_UNKNOWN 66
/** Communication with the server timed out */
#define DNS_ERR_TIMEOUT 67
/** The request was canceled because the DNS subsystem was shut down. */
#define DNS_ERR_SHUTDOWN 68
#define DNS_IPv4_A 1
#define DNS_PTR 2
#define DNS_IPv6_AAAA 3
#define DNS_QUERY_NO_SEARCH 1
#define DNS_OPTION_SEARCH 1
#define DNS_OPTION_NAMESERVERS 2
#define DNS_OPTION_MISC 4
#define DNS_OPTIONS_ALL 7
/**
* The callback that contains the results from a lookup.
* - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
* - count contains the number of addresses of form type
* - ttl is the number of seconds the resolution may be cached for.
* - addresses needs to be cast according to type
*/
typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
/**
Initialize the asynchronous DNS library.
This function initializes support for non-blocking name resolution by
calling evdns_resolv_conf_parse() on UNIX and
evdns_config_windows_nameservers() on Windows.
@return 0 if successful, or -1 if an error occurred
@see evdns_shutdown()
*/
int evdns_init(void);
/**
Shut down the asynchronous DNS resolver and terminate all active requests.
If the 'fail_requests' option is enabled, all active requests will return
an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
the requests will be silently discarded.
@param fail_requests if zero, active requests will be aborted; if non-zero,
active requests will return DNS_ERR_SHUTDOWN.
@see evdns_init()
*/
void evdns_shutdown(int fail_requests);
/**
Convert a DNS error code to a string.
@param err the DNS error code
@return a string containing an explanation of the error code
*/
const char *evdns_err_to_string(int err);
/**
Add a nameserver.
The address should be an IPv4 address in network byte order.
The type of address is chosen so that it matches in_addr.s_addr.
@param address an IP address in network byte order
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_ip_add()
*/
int evdns_nameserver_add(unsigned long int address);
/**
Get the number of configured nameservers.
This returns the number of configured nameservers (not necessarily the
number of running nameservers). This is useful for double-checking
whether our calls to the various nameserver configuration functions
have been successful.
@return the number of configured nameservers
@see evdns_nameserver_add()
*/
int evdns_count_nameservers(void);
/**
Remove all configured nameservers, and suspend all pending resolves.
Resolves will not necessarily be re-attempted until evdns_resume() is called.
@return 0 if successful, or -1 if an error occurred
@see evdns_resume()
*/
int evdns_clear_nameservers_and_suspend(void);
/**
Resume normal operation and continue any suspended resolve requests.
Re-attempt resolves left in limbo after an earlier call to
evdns_clear_nameservers_and_suspend().
@return 0 if successful, or -1 if an error occurred
@see evdns_clear_nameservers_and_suspend()
*/
int evdns_resume(void);
/**
Add a nameserver.
This wraps the evdns_nameserver_add() function by parsing a string as an IP
address and adds it as a nameserver.
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_add()
*/
int evdns_nameserver_ip_add(const char *ip_as_string);
/**
Lookup an A record for a given name.
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup an AAAA record for a given name.
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
struct in_addr;
struct in6_addr;
/**
Lookup a PTR record for a given IP address.
@param in an IPv4 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse(struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup a PTR record for a given IPv6 address.
@param in an IPv6 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse_ipv6(struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Set the value of a configuration option.
The currently available configuration options are:
ndots, timeout, max-timeouts, max-inflight, and attempts
@param option the name of the configuration option to be modified
@param val the value to be set
@param flags either 0 | DNS_OPTION_SEARCH | DNS_OPTION_MISC
@return 0 if successful, or -1 if an error occurred
*/
int evdns_set_option(const char *option, const char *val, int flags);
/**
Parse a resolv.conf file.
The 'flags' parameter determines what information is parsed from the
resolv.conf file. See the man page for resolv.conf for the format of this
file.
The following directives are not parsed from the file: sortlist, rotate,
no-check-names, inet6, debug.
If this function encounters an error, the possible return values are: 1 =
failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
memory, 5 = short read from file, 6 = no nameservers listed in the file
@param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
DNS_OPTIONS_ALL
@param filename the path to the resolv.conf file
@return 0 if successful, or various positive error codes if an error
occurred (see above)
@see resolv.conf(3), evdns_config_windows_nameservers()
*/
int evdns_resolv_conf_parse(int flags, const char *const filename);
/**
Obtain nameserver information using the Windows API.
Attempt to configure a set of nameservers based on platform settings on
a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
looks in the registry.
@return 0 if successful, or -1 if an error occurred
@see evdns_resolv_conf_parse()
*/
#ifdef MS_WINDOWS
int evdns_config_windows_nameservers(void);
#endif
/**
Clear the list of search domains.
*/
void evdns_search_clear(void);
/**
Add a domain to the list of search domains
@param domain the domain to be added to the search list
*/
void evdns_search_add(const char *domain);
/**
Set the 'ndots' parameter for searches.
Sets the number of dots which, when found in a name, causes
the first query to be without any search domain.
@param ndots the new ndots parameter
*/
void evdns_search_ndots_set(const int ndots);
/**
A callback that is invoked when a log message is generated
@param is_warning indicates if the log message is a 'warning'
@param msg the content of the log message
*/
typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
/**
Set the callback function to handle log messages.
@param fn the callback to be invoked when a log message is generated
*/
void evdns_set_log_fn(evdns_debug_log_fn_type fn);
/**
Set a callback that will be invoked to generate transaction IDs. By
default, we pick transaction IDs based on the current clock time.
@param fn the new callback, or NULL to use the default.
*/
void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
#define DNS_NO_SEARCH 1
/*
* Structures and functions used to implement a DNS server.
*/
struct evdns_server_request {
int flags;
int nquestions;
struct evdns_server_question **questions;
};
struct evdns_server_question {
int type;
#ifdef __cplusplus
int dns_question_class;
#else
/* You should refer to this field as "dns_question_class". The
* name "class" works in C for backward compatibility, and will be
* removed in a future version. (1.5 or later). */
int class;
#define dns_question_class class
#endif
char name[1];
};
typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
#define EVDNS_ANSWER_SECTION 0
#define EVDNS_AUTHORITY_SECTION 1
#define EVDNS_ADDITIONAL_SECTION 2
#define EVDNS_TYPE_A 1
#define EVDNS_TYPE_NS 2
#define EVDNS_TYPE_CNAME 5
#define EVDNS_TYPE_SOA 6
#define EVDNS_TYPE_PTR 12
#define EVDNS_TYPE_MX 15
#define EVDNS_TYPE_TXT 16
#define EVDNS_TYPE_AAAA 28
#define EVDNS_QTYPE_AXFR 252
#define EVDNS_QTYPE_ALL 255
#define EVDNS_CLASS_INET 1
struct evdns_server_port *evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type callback, void *user_data);
void evdns_close_server_port(struct evdns_server_port *port);
int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
int evdns_server_request_respond(struct evdns_server_request *req, int err);
int evdns_server_request_drop(struct evdns_server_request *req);
struct sockaddr;
int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len);
#ifdef __cplusplus
}
#endif
#endif /* !EVENTDNS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_INTERNAL_H_
#define _EVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "config.h"
#include "min_heap.h"
#include "evsignal.h"
struct eventop {
const char *name;
void *(*init)(struct event_base *);
int (*add)(void *, struct event *);
int (*del)(void *, struct event *);
int (*dispatch)(struct event_base *, void *, struct timeval *);
void (*dealloc)(struct event_base *, void *);
/* set if we need to reinitialize the event base */
int need_reinit;
};
struct event_base {
const struct eventop *evsel;
void *evbase;
int event_count; /* counts number of total events */
int event_count_active; /* counts number of active events */
int event_gotterm; /* Set to terminate loop */
int event_break; /* Set to terminate loop immediately */
/* active event management */
struct event_list **activequeues;
int nactivequeues;
/* signal handling info */
struct evsignal_info sig;
struct event_list eventqueue;
struct timeval event_tv;
struct min_heap timeheap;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#ifndef HAVE_TAILQFOREACH
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#endif /* TAILQ_FOREACH */
int _evsignal_set_handler(struct event_base *base, int evsignal,
void (*fn)(int));
int _evsignal_restore_handler(struct event_base *base, int evsignal);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_INTERNAL_H_ */

988
extra/libevent/event.c Normal file
View File

@ -0,0 +1,988 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#include "misc.h"
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include "event.h"
#include "event-internal.h"
#include "evutil.h"
#include "log.h"
#ifdef HAVE_EVENT_PORTS
extern const struct eventop evportops;
#endif
#ifdef HAVE_SELECT
extern const struct eventop selectops;
#endif
#ifdef HAVE_POLL
extern const struct eventop pollops;
#endif
#ifdef HAVE_EPOLL
extern const struct eventop epollops;
#endif
#ifdef HAVE_WORKING_KQUEUE
extern const struct eventop kqops;
#endif
#ifdef HAVE_DEVPOLL
extern const struct eventop devpollops;
#endif
#ifdef WIN32
extern const struct eventop win32ops;
#endif
/* In order of preference */
const struct eventop *eventops[] = {
#ifdef HAVE_EVENT_PORTS
&evportops,
#endif
#ifdef HAVE_WORKING_KQUEUE
&kqops,
#endif
#ifdef HAVE_EPOLL
&epollops,
#endif
#ifdef HAVE_DEVPOLL
&devpollops,
#endif
#ifdef HAVE_POLL
&pollops,
#endif
#ifdef HAVE_SELECT
&selectops,
#endif
#ifdef WIN32
&win32ops,
#endif
NULL
};
/* Global state */
struct event_base *current_base = NULL;
extern struct event_base *evsignal_base;
static int use_monotonic;
/* Handle signals - This is a deprecated interface */
int (*event_sigcb)(void); /* Signal callback when gotsig is set */
volatile sig_atomic_t event_gotsig; /* Set in signal handler */
/* Prototypes */
static void event_queue_insert(struct event_base *, struct event *, int);
static void event_queue_remove(struct event_base *, struct event *, int);
static int event_haveevents(struct event_base *);
static void event_process_active(struct event_base *);
static int timeout_next(struct event_base *, struct timeval **);
static void timeout_process(struct event_base *);
static void timeout_correct(struct event_base *, struct timeval *);
static void
detect_monotonic(void)
{
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
use_monotonic = 1;
#endif
}
static int
gettime(struct timeval *tp)
{
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
if (use_monotonic) {
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
return (-1);
tp->tv_sec = ts.tv_sec;
tp->tv_usec = ts.tv_nsec / 1000;
return (0);
}
#endif
return (gettimeofday(tp, NULL));
}
struct event_base *
event_init(void)
{
struct event_base *base = event_base_new();
if (base != NULL)
current_base = base;
return (base);
}
struct event_base *
event_base_new(void)
{
int i;
struct event_base *base;
if ((base = calloc(1, sizeof(struct event_base))) == NULL)
event_err(1, "%s: calloc", __func__);
event_sigcb = NULL;
event_gotsig = 0;
detect_monotonic();
gettime(&base->event_tv);
min_heap_ctor(&base->timeheap);
TAILQ_INIT(&base->eventqueue);
TAILQ_INIT(&base->sig.signalqueue);
base->sig.ev_signal_pair[0] = -1;
base->sig.ev_signal_pair[1] = -1;
base->evbase = NULL;
for (i = 0; eventops[i] && !base->evbase; i++) {
base->evsel = eventops[i];
base->evbase = base->evsel->init(base);
}
if (base->evbase == NULL)
event_errx(1, "%s: no event mechanism available", __func__);
if (getenv("EVENT_SHOW_METHOD"))
event_msgx("libevent using: %s\n",
base->evsel->name);
/* allocate a single active event queue */
event_base_priority_init(base, 1);
return (base);
}
void
event_base_free(struct event_base *base)
{
int i, n_deleted=0;
struct event *ev;
if (base == NULL && current_base)
base = current_base;
if (base == current_base)
current_base = NULL;
/* XXX(niels) - check for internal events first */
assert(base);
/* Delete all non-internal events. */
for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
struct event *next = TAILQ_NEXT(ev, ev_next);
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
event_del(ev);
++n_deleted;
}
ev = next;
}
while ((ev = min_heap_top(&base->timeheap)) != NULL) {
event_del(ev);
++n_deleted;
}
if (n_deleted)
event_debug(("%s: %d events were still set in base",
__func__, n_deleted));
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
for (i = 0; i < base->nactivequeues; ++i)
assert(TAILQ_EMPTY(base->activequeues[i]));
assert(min_heap_empty(&base->timeheap));
min_heap_dtor(&base->timeheap);
for (i = 0; i < base->nactivequeues; ++i)
free(base->activequeues[i]);
free(base->activequeues);
assert(TAILQ_EMPTY(&base->eventqueue));
free(base);
}
/* reinitialized the event base after a fork */
int
event_reinit(struct event_base *base)
{
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
int res = 0;
struct event *ev;
/* check if this event mechanism requires reinit */
if (!evsel->need_reinit)
return (0);
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
base->evbase = evsel->init(base);
if (base->evbase == NULL)
event_errx(1, "%s: could not reinitialize event mechanism",
__func__);
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
if (evsel->add(evbase, ev) == -1)
res = -1;
}
return (res);
}
int
event_priority_init(int npriorities)
{
return event_base_priority_init(current_base, npriorities);
}
int
event_base_priority_init(struct event_base *base, int npriorities)
{
int i;
if (base->event_count_active)
return (-1);
if (base->nactivequeues && npriorities != base->nactivequeues) {
for (i = 0; i < base->nactivequeues; ++i) {
free(base->activequeues[i]);
}
free(base->activequeues);
}
/* Allocate our priority queues */
base->nactivequeues = npriorities;
base->activequeues = (struct event_list **)calloc(base->nactivequeues,
npriorities * sizeof(struct event_list *));
if (base->activequeues == NULL)
event_err(1, "%s: calloc", __func__);
for (i = 0; i < base->nactivequeues; ++i) {
base->activequeues[i] = malloc(sizeof(struct event_list));
if (base->activequeues[i] == NULL)
event_err(1, "%s: malloc", __func__);
TAILQ_INIT(base->activequeues[i]);
}
return (0);
}
int
event_haveevents(struct event_base *base)
{
return (base->event_count > 0);
}
/*
* Active events are stored in priority queues. Lower priorities are always
* process before higher priorities. Low priority events can starve high
* priority ones.
*/
static void
event_process_active(struct event_base *base)
{
struct event *ev;
struct event_list *activeq = NULL;
int i;
short ncalls;
for (i = 0; i < base->nactivequeues; ++i) {
if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
activeq = base->activequeues[i];
break;
}
}
assert(activeq != NULL);
for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
if (ev->ev_events & EV_PERSIST)
event_queue_remove(base, ev, EVLIST_ACTIVE);
else
event_del(ev);
/* Allows deletes to work */
ncalls = ev->ev_ncalls;
ev->ev_pncalls = &ncalls;
while (ncalls) {
ncalls--;
ev->ev_ncalls = ncalls;
(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
if (event_gotsig || base->event_break)
return;
}
}
}
/*
* Wait continously for events. We exit only if no events are left.
*/
int
event_dispatch(void)
{
return (event_loop(0));
}
int
event_base_dispatch(struct event_base *event_base)
{
return (event_base_loop(event_base, 0));
}
const char *
event_base_get_method(struct event_base *base)
{
assert(base);
return (base->evsel->name);
}
static void
event_loopexit_cb(int fd, short what, void *arg)
{
struct event_base *base = arg;
base->event_gotterm = 1;
}
/* not thread safe */
int
event_loopexit(struct timeval *tv)
{
return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
current_base, tv));
}
int
event_base_loopexit(struct event_base *event_base, struct timeval *tv)
{
return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
event_base, tv));
}
/* not thread safe */
int
event_loopbreak(void)
{
return (event_base_loopbreak(current_base));
}
int
event_base_loopbreak(struct event_base *event_base)
{
if (event_base == NULL)
return (-1);
event_base->event_break = 1;
return (0);
}
/* not thread safe */
int
event_loop(int flags)
{
return event_base_loop(current_base, flags);
}
int
event_base_loop(struct event_base *base, int flags)
{
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
struct timeval tv;
struct timeval *tv_p;
int res, done;
if(!TAILQ_EMPTY(&base->sig.signalqueue))
evsignal_base = base;
done = 0;
while (!done) {
/* Terminate the loop if we have been asked to */
if (base->event_gotterm) {
base->event_gotterm = 0;
break;
}
if (base->event_break) {
base->event_break = 0;
break;
}
/* You cannot use this interface for multi-threaded apps */
while (event_gotsig) {
event_gotsig = 0;
if (event_sigcb) {
res = (*event_sigcb)();
if (res == -1) {
errno = EINTR;
return (-1);
}
}
}
timeout_correct(base, &tv);
tv_p = &tv;
if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
timeout_next(base, &tv_p);
} else {
/*
* if we have active events, we just poll new events
* without waiting.
*/
evutil_timerclear(&tv);
}
/* If we have no events, we just exit */
if (!event_haveevents(base)) {
event_debug(("%s: no events registered.", __func__));
return (1);
}
res = evsel->dispatch(base, evbase, tv_p);
if (res == -1)
return (-1);
timeout_process(base);
if (base->event_count_active) {
event_process_active(base);
if (!base->event_count_active && (flags & EVLOOP_ONCE))
done = 1;
} else if (flags & EVLOOP_NONBLOCK)
done = 1;
}
event_debug(("%s: asked to terminate loop.", __func__));
return (0);
}
/* Sets up an event for processing once */
struct event_once {
struct event ev;
void (*cb)(int, short, void *);
void *arg;
};
/* One-time callback, it deletes itself */
static void
event_once_cb(int fd, short events, void *arg)
{
struct event_once *eonce = arg;
(*eonce->cb)(fd, events, eonce->arg);
free(eonce);
}
/* not threadsafe, event scheduled once. */
int
event_once(int fd, short events,
void (*callback)(int, short, void *), void *arg, struct timeval *tv)
{
return event_base_once(current_base, fd, events, callback, arg, tv);
}
/* Schedules an event once */
int
event_base_once(struct event_base *base, int fd, short events,
void (*callback)(int, short, void *), void *arg, struct timeval *tv)
{
struct event_once *eonce;
struct timeval etv;
int res;
/* We cannot support signals that just fire once */
if (events & EV_SIGNAL)
return (-1);
if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
return (-1);
eonce->cb = callback;
eonce->arg = arg;
if (events == EV_TIMEOUT) {
if (tv == NULL) {
evutil_timerclear(&etv);
tv = &etv;
}
evtimer_set(&eonce->ev, event_once_cb, eonce);
} else if (events & (EV_READ|EV_WRITE)) {
events &= EV_READ|EV_WRITE;
event_set(&eonce->ev, fd, events, event_once_cb, eonce);
} else {
/* Bad event combination */
free(eonce);
return (-1);
}
res = event_base_set(base, &eonce->ev);
if (res == 0)
res = event_add(&eonce->ev, tv);
if (res != 0) {
free(eonce);
return (res);
}
return (0);
}
void
event_set(struct event *ev, int fd, short events,
void (*callback)(int, short, void *), void *arg)
{
/* Take the current base - caller needs to set the real base later */
ev->ev_base = current_base;
ev->ev_callback = callback;
ev->ev_arg = arg;
ev->ev_fd = fd;
ev->ev_events = events;
ev->ev_res = 0;
ev->ev_flags = EVLIST_INIT;
ev->ev_ncalls = 0;
ev->ev_pncalls = NULL;
min_heap_elem_init(ev);
/* by default, we put new events into the middle priority */
if(current_base)
ev->ev_pri = current_base->nactivequeues/2;
}
int
event_base_set(struct event_base *base, struct event *ev)
{
/* Only innocent events may be assigned to a different base */
if (ev->ev_flags != EVLIST_INIT)
return (-1);
ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2;
return (0);
}
/*
* Set's the priority of an event - if an event is already scheduled
* changing the priority is going to fail.
*/
int
event_priority_set(struct event *ev, int pri)
{
if (ev->ev_flags & EVLIST_ACTIVE)
return (-1);
if (pri < 0 || pri >= ev->ev_base->nactivequeues)
return (-1);
ev->ev_pri = pri;
return (0);
}
/*
* Checks if a specific event is pending or scheduled.
*/
int
event_pending(struct event *ev, short event, struct timeval *tv)
{
struct timeval now, res;
int flags = 0;
if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE));
if (ev->ev_flags & EVLIST_ACTIVE)
flags |= ev->ev_res;
if (ev->ev_flags & EVLIST_TIMEOUT)
flags |= EV_TIMEOUT;
if (ev->ev_flags & EVLIST_SIGNAL)
flags |= EV_SIGNAL;
event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
/* See if there is a timeout that we should report */
if (tv != NULL && (flags & event & EV_TIMEOUT)) {
gettime(&now);
evutil_timersub(&ev->ev_timeout, &now, &res);
/* correctly remap to real time */
gettimeofday(&now, NULL);
evutil_timeradd(&now, &res, tv);
}
return (flags & event);
}
int
event_add(struct event *ev, struct timeval *tv)
{
struct event_base *base = ev->ev_base;
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
event_debug((
"event_add: event: %p, %s%s%scall %p",
ev,
ev->ev_events & EV_READ ? "EV_READ " : " ",
ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
tv ? "EV_TIMEOUT " : " ",
ev->ev_callback));
assert(!(ev->ev_flags & ~EVLIST_ALL));
if (tv != NULL) {
struct timeval now;
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
else if (min_heap_reserve(&base->timeheap,
1 + min_heap_size(&base->timeheap)) == -1)
return (-1); /* ENOMEM == errno */
/* Check if it is active due to a timeout. Rescheduling
* this timeout before the callback can be executed
* removes it from the active list. */
if ((ev->ev_flags & EVLIST_ACTIVE) &&
(ev->ev_res & EV_TIMEOUT)) {
/* See if we are just active executing this
* event in a loop
*/
if (ev->ev_ncalls && ev->ev_pncalls) {
/* Abort loop */
*ev->ev_pncalls = 0;
}
event_queue_remove(base, ev, EVLIST_ACTIVE);
}
gettime(&now);
evutil_timeradd(&now, tv, &ev->ev_timeout);
event_debug((
"event_add: timeout in %d seconds, call %p",
tv->tv_sec, ev->ev_callback));
event_queue_insert(base, ev, EVLIST_TIMEOUT);
}
if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
int res = evsel->add(evbase, ev);
if (res != -1)
event_queue_insert(base, ev, EVLIST_INSERTED);
return (res);
} else if ((ev->ev_events & EV_SIGNAL) &&
!(ev->ev_flags & EVLIST_SIGNAL)) {
int res = evsel->add(evbase, ev);
if (res != -1)
event_queue_insert(base, ev, EVLIST_SIGNAL);
return (res);
}
return (0);
}
int
event_del(struct event *ev)
{
struct event_base *base;
const struct eventop *evsel;
void *evbase;
event_debug(("event_del: %p, callback %p",
ev, ev->ev_callback));
/* An event without a base has not been added */
if (ev->ev_base == NULL)
return (-1);
base = ev->ev_base;
evsel = base->evsel;
evbase = base->evbase;
assert(!(ev->ev_flags & ~EVLIST_ALL));
/* See if we are just active executing this event in a loop */
if (ev->ev_ncalls && ev->ev_pncalls) {
/* Abort loop */
*ev->ev_pncalls = 0;
}
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
if (ev->ev_flags & EVLIST_ACTIVE)
event_queue_remove(base, ev, EVLIST_ACTIVE);
if (ev->ev_flags & EVLIST_INSERTED) {
event_queue_remove(base, ev, EVLIST_INSERTED);
return (evsel->del(evbase, ev));
} else if (ev->ev_flags & EVLIST_SIGNAL) {
event_queue_remove(base, ev, EVLIST_SIGNAL);
return (evsel->del(evbase, ev));
}
return (0);
}
void
event_active(struct event *ev, int res, short ncalls)
{
/* We get different kinds of events, add them together */
if (ev->ev_flags & EVLIST_ACTIVE) {
ev->ev_res |= res;
return;
}
ev->ev_res = res;
ev->ev_ncalls = ncalls;
ev->ev_pncalls = NULL;
event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
}
static int
timeout_next(struct event_base *base, struct timeval **tv_p)
{
struct timeval now;
struct event *ev;
struct timeval *tv = *tv_p;
if ((ev = min_heap_top(&base->timeheap)) == NULL) {
/* if no time-based events are active wait for I/O */
*tv_p = NULL;
return (0);
}
if (gettime(&now) == -1)
return (-1);
if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
evutil_timerclear(tv);
return (0);
}
evutil_timersub(&ev->ev_timeout, &now, tv);
assert(tv->tv_sec >= 0);
assert(tv->tv_usec >= 0);
event_debug(("timeout_next: in %d seconds", tv->tv_sec));
return (0);
}
/*
* Determines if the time is running backwards by comparing the current
* time against the last time we checked. Not needed when using clock
* monotonic.
*/
static void
timeout_correct(struct event_base *base, struct timeval *tv)
{
struct event **pev;
unsigned int size;
struct timeval off;
if (use_monotonic)
return;
/* Check if time is running backwards */
gettime(tv);
if (evutil_timercmp(tv, &base->event_tv, >=)) {
base->event_tv = *tv;
return;
}
event_debug(("%s: time is running backwards, corrected",
__func__));
evutil_timersub(&base->event_tv, tv, &off);
/*
* We can modify the key element of the node without destroying
* the key, beause we apply it to all in the right order.
*/
pev = base->timeheap.p;
size = base->timeheap.n;
for (; size-- > 0; ++pev) {
struct timeval *ev_tv = &(**pev).ev_timeout;
evutil_timersub(ev_tv, &off, ev_tv);
}
}
void
timeout_process(struct event_base *base)
{
struct timeval now;
struct event *ev;
if (min_heap_empty(&base->timeheap))
return;
gettime(&now);
while ((ev = min_heap_top(&base->timeheap))) {
if (evutil_timercmp(&ev->ev_timeout, &now, >))
break;
/* delete this event from the I/O queues */
event_del(ev);
event_debug(("timeout_process: call %p",
ev->ev_callback));
event_active(ev, EV_TIMEOUT, 1);
}
}
void
event_queue_remove(struct event_base *base, struct event *ev, int queue)
{
if (!(ev->ev_flags & queue))
event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
ev, ev->ev_fd, queue);
if (~ev->ev_flags & EVLIST_INTERNAL)
base->event_count--;
ev->ev_flags &= ~queue;
switch (queue) {
case EVLIST_ACTIVE:
base->event_count_active--;
TAILQ_REMOVE(base->activequeues[ev->ev_pri],
ev, ev_active_next);
break;
case EVLIST_SIGNAL:
TAILQ_REMOVE(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT:
min_heap_erase(&base->timeheap, ev);
break;
case EVLIST_INSERTED:
TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
break;
default:
event_errx(1, "%s: unknown queue %x", __func__, queue);
}
}
void
event_queue_insert(struct event_base *base, struct event *ev, int queue)
{
if (ev->ev_flags & queue) {
/* Double insertion is possible for active events */
if (queue & EVLIST_ACTIVE)
return;
event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
ev, ev->ev_fd, queue);
}
if (~ev->ev_flags & EVLIST_INTERNAL)
base->event_count++;
ev->ev_flags |= queue;
switch (queue) {
case EVLIST_ACTIVE:
base->event_count_active++;
TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
ev,ev_active_next);
break;
case EVLIST_SIGNAL:
TAILQ_INSERT_TAIL(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT: {
min_heap_push(&base->timeheap, ev);
break;
}
case EVLIST_INSERTED:
TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
break;
default:
event_errx(1, "%s: unknown queue %x", __func__, queue);
}
}
/* Functions for debugging */
const char *
event_get_version(void)
{
return (VERSION);
}
/*
* No thread-safe interface needed - the information should be the same
* for all threads.
*/
const char *
event_get_method(void)
{
return (current_base->evsel->name);
}

1127
extra/libevent/event.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,443 @@
/*
* Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#else
#include <sys/ioctl.h>
#endif
#include <sys/queue.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32
#include <syslog.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "event.h"
#include "evutil.h"
#include "log.h"
int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
static struct evbuffer *_buf; /* not thread safe */
void
evtag_init(void)
{
if (_buf != NULL)
return;
if ((_buf = evbuffer_new()) == NULL)
event_err(1, "%s: malloc", __func__);
}
/*
* We encode integer's by nibbles; the first nibble contains the number
* of significant nibbles - 1; this allows us to encode up to 64-bit
* integers. This function is byte-order independent.
*/
void
encode_int(struct evbuffer *evbuf, ev_uint32_t number)
{
int off = 1, nibbles = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(data));
while (number) {
if (off & 0x1)
data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f);
else
data[off/2] = (data[off/2] & 0x0f) |
((number & 0x0f) << 4);
number >>= 4;
off++;
}
if (off > 2)
nibbles = off - 2;
/* Off - 1 is the number of encoded nibbles */
data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4);
evbuffer_add(evbuf, data, (off + 1) / 2);
}
/*
* Support variable length encoding of tags; we use the high bit in each
* octet as a continuation signal.
*/
int
evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
{
int bytes = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(data));
do {
ev_uint8_t lower = tag & 0x7f;
tag >>= 7;
if (tag)
lower |= 0x80;
data[bytes++] = lower;
} while (tag);
if (evbuf != NULL)
evbuffer_add(evbuf, data, bytes);
return (bytes);
}
static int
decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
ev_uint8_t *data = EVBUFFER_DATA(evbuf);
int len = EVBUFFER_LENGTH(evbuf);
int count = 0, shift = 0, done = 0;
while (count++ < len) {
ev_uint8_t lower = *data++;
number |= (lower & 0x7f) << shift;
shift += 7;
if (!(lower & 0x80)) {
done = 1;
break;
}
}
if (!done)
return (-1);
if (dodrain)
evbuffer_drain(evbuf, count);
if (ptag != NULL)
*ptag = number;
return (count);
}
int
evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
{
return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
}
/*
* Marshal a data type, the general format is as follows:
*
* tag number: one byte; length: var bytes; payload: var bytes
*/
void
evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
const void *data, ev_uint32_t len)
{
evtag_encode_tag(evbuf, tag);
encode_int(evbuf, len);
evbuffer_add(evbuf, (void *)data, len);
}
/* Marshaling for integers */
void
evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
{
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
encode_int(_buf, integer);
evtag_encode_tag(evbuf, tag);
encode_int(evbuf, EVBUFFER_LENGTH(_buf));
evbuffer_add_buffer(evbuf, _buf);
}
void
evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
{
evtag_marshal(buf, tag, string, strlen(string));
}
void
evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
{
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
encode_int(_buf, tv->tv_sec);
encode_int(_buf, tv->tv_usec);
evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf),
EVBUFFER_LENGTH(_buf));
}
static int
decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
ev_uint8_t *data = EVBUFFER_DATA(evbuf);
int len = EVBUFFER_LENGTH(evbuf);
int nibbles = 0;
if (!len)
return (-1);
nibbles = ((data[0] & 0xf0) >> 4) + 1;
if (nibbles > 8 || (nibbles >> 1) + 1 > len)
return (-1);
len = (nibbles >> 1) + 1;
while (nibbles > 0) {
number <<= 4;
if (nibbles & 0x1)
number |= data[nibbles >> 1] & 0x0f;
else
number |= (data[nibbles >> 1] & 0xf0) >> 4;
nibbles--;
}
if (dodrain)
evbuffer_drain(evbuf, len);
*pnumber = number;
return (len);
}
int
evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
{
return (decode_int_internal(pnumber, evbuf, 1) == -1 ? -1 : 0);
}
int
evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
}
int
evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
struct evbuffer tmp;
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
tmp = *evbuf;
tmp.buffer += len;
tmp.off -= len;
res = decode_int_internal(plength, &tmp, 0);
if (res == -1)
return (-1);
*plength += res + len;
return (0);
}
int
evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
struct evbuffer tmp;
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
tmp = *evbuf;
tmp.buffer += len;
tmp.off -= len;
res = decode_int_internal(plength, &tmp, 0);
if (res == -1)
return (-1);
return (0);
}
int
evtag_consume(struct evbuffer *evbuf)
{
ev_uint32_t len;
if (decode_tag_internal(NULL, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (0);
}
/* Reads the data type from an event buffer */
int
evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
{
ev_uint32_t len;
ev_uint32_t integer;
if (decode_tag_internal(ptag, src, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&integer, src) == -1)
return (-1);
len = integer;
if (EVBUFFER_LENGTH(src) < len)
return (-1);
if (evbuffer_add(dst, EVBUFFER_DATA(src), len) == -1)
return (-1);
evbuffer_drain(src, len);
return (len);
}
/* Marshaling for integers */
int
evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint32_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
ev_uint32_t integer;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&integer, evbuf) == -1)
return (-1);
len = integer;
if (EVBUFFER_LENGTH(evbuf) < len)
return (-1);
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evbuffer_add(_buf, EVBUFFER_DATA(evbuf), len) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (evtag_decode_int(pinteger, _buf));
}
/* Unmarshal a fixed length tag */
int
evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
size_t len)
{
ev_uint32_t tag;
/* Initialize this event buffer so that we can read into it */
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
/* Now unmarshal a tag and check that it matches the tag we want */
if (evtag_unmarshal(src, &tag, _buf) == -1 || tag != need_tag)
return (-1);
if (EVBUFFER_LENGTH(_buf) != len)
return (-1);
memcpy(data, EVBUFFER_DATA(_buf), len);
return (0);
}
int
evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
char **pstring)
{
ev_uint32_t tag;
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
return (-1);
*pstring = calloc(EVBUFFER_LENGTH(_buf) + 1, 1);
if (*pstring == NULL)
event_err(1, "%s: calloc", __func__);
evbuffer_remove(_buf, *pstring, EVBUFFER_LENGTH(_buf));
return (0);
}
int
evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct timeval *ptv)
{
ev_uint32_t tag;
ev_uint32_t integer;
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
return (-1);
if (evtag_decode_int(&integer, _buf) == -1)
return (-1);
ptv->tv_sec = integer;
if (evtag_decode_int(&integer, _buf) == -1)
return (-1);
ptv->tv_usec = integer;
return (0);
}

340
extra/libevent/evhttp.h Normal file
View File

@ -0,0 +1,340 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVHTTP_H_
#define _EVHTTP_H_
#include <event.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <winsock2.h>
#undef WIN32_LEAN_AND_MEAN
#endif
/** @file evhttp.h
*
* Basic support for HTTP serving.
*
* As libevent is a library for dealing with event notification and most
* interesting applications are networked today, I have often found the
* need to write HTTP code. The following prototypes and definitions provide
* an application with a minimal interface for making HTTP requests and for
* creating a very simple HTTP server.
*/
/* Response codes */
#define HTTP_OK 200
#define HTTP_NOCONTENT 204
#define HTTP_MOVEPERM 301
#define HTTP_MOVETEMP 302
#define HTTP_NOTMODIFIED 304
#define HTTP_BADREQUEST 400
#define HTTP_NOTFOUND 404
#define HTTP_SERVUNAVAIL 503
struct evhttp;
struct evhttp_request;
struct evkeyvalq;
/** Create a new HTTP server
*
* @param base (optional) the event base to receive the HTTP events
* @return a pointer to a newly initialized evhttp server structure
*/
struct evhttp *evhttp_new(struct event_base *base);
/**
* Binds an HTTP server on the specified address and port.
*
* Can be called multiple times to bind the same http server
* to multiple different ports.
*
* @param http a pointer to an evhttp object
* @param address a string containing the IP address to listen(2) on
* @param port the port number to listen on
* @return a newly allocated evhttp struct
* @see evhttp_free()
*/
int evhttp_bind_socket(struct evhttp *http, const char *address, u_short port);
/**
* Free the previously created HTTP server.
*
* Works only if no requests are currently being served.
*
* @param http the evhttp server object to be freed
* @see evhttp_start()
*/
void evhttp_free(struct evhttp* http);
/** Set a callback for a specified URI */
void evhttp_set_cb(struct evhttp *, const char *,
void (*)(struct evhttp_request *, void *), void *);
/** Removes the callback for a specified URI */
int evhttp_del_cb(struct evhttp *, const char *);
/** Set a callback for all requests that are not caught by specific callbacks
*/
void evhttp_set_gencb(struct evhttp *,
void (*)(struct evhttp_request *, void *), void *);
/**
* Set the timeout for an HTTP request.
*
* @param http an evhttp object
* @param timeout_in_secs the timeout, in seconds
*/
void evhttp_set_timeout(struct evhttp *, int timeout_in_secs);
/* Request/Response functionality */
/**
* Send an HTML error message to the client.
*
* @param req a request object
* @param error the HTTP error code
* @param reason a brief explanation of the error
*/
void evhttp_send_error(struct evhttp_request *req, int error,
const char *reason);
/**
* Send an HTML reply to the client.
*
* @param req a request object
* @param code the HTTP response code to send
* @param reason a brief message to send with the response code
* @param databuf the body of the response
*/
void evhttp_send_reply(struct evhttp_request *req, int code,
const char *reason, struct evbuffer *databuf);
/* Low-level response interface, for streaming/chunked replies */
void evhttp_send_reply_start(struct evhttp_request *, int, const char *);
void evhttp_send_reply_chunk(struct evhttp_request *, struct evbuffer *);
void evhttp_send_reply_end(struct evhttp_request *);
/**
* Start an HTTP server on the specified address and port
*
* DEPRECATED: it does not allow an event base to be specified
*
* @param address the address to which the HTTP server should be bound
* @param port the port number on which the HTTP server should listen
* @return an struct evhttp object
*/
struct evhttp *evhttp_start(const char *address, u_short port);
/*
* Interfaces for making requests
*/
enum evhttp_cmd_type { EVHTTP_REQ_GET, EVHTTP_REQ_POST, EVHTTP_REQ_HEAD };
enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
/**
* the request structure that a server receives.
* WARNING: expect this structure to change. I will try to provide
* reasonable accessors.
*/
struct evhttp_request {
#if defined(TAILQ_ENTRY)
TAILQ_ENTRY(evhttp_request) next;
#else
struct {
struct evhttp_request *tqe_next;
struct evhttp_request **tqe_prev;
} next;
#endif
/* the connection object that this request belongs to */
struct evhttp_connection *evcon;
int flags;
#define EVHTTP_REQ_OWN_CONNECTION 0x0001
#define EVHTTP_PROXY_REQUEST 0x0002
struct evkeyvalq *input_headers;
struct evkeyvalq *output_headers;
/* address of the remote host and the port connection came from */
char *remote_host;
u_short remote_port;
enum evhttp_request_kind kind;
enum evhttp_cmd_type type;
char *uri; /* uri after HTTP request was parsed */
char major; /* HTTP Major number */
char minor; /* HTTP Minor number */
int got_firstline;
int response_code; /* HTTP Response code */
char *response_code_line; /* Readable response */
struct evbuffer *input_buffer; /* read data */
ev_int64_t ntoread;
int chunked;
struct evbuffer *output_buffer; /* outgoing post or data */
/* Callback */
void (*cb)(struct evhttp_request *, void *);
void *cb_arg;
/*
* Chunked data callback - call for each completed chunk if
* specified. If not specified, all the data is delivered via
* the regular callback.
*/
void (*chunk_cb)(struct evhttp_request *, void *);
};
/**
* Creates a new request object that needs to be filled in with the request
* parameters. The callback is executed when the request completed or an
* error occurred.
*/
struct evhttp_request *evhttp_request_new(
void (*cb)(struct evhttp_request *, void *), void *arg);
/** enable delivery of chunks to requestor */
void evhttp_request_set_chunked_cb(struct evhttp_request *,
void (*cb)(struct evhttp_request *, void *));
/** Frees the request object and removes associated events. */
void evhttp_request_free(struct evhttp_request *req);
/**
* A connection object that can be used to for making HTTP requests. The
* connection object tries to establish the connection when it is given an
* http request object.
*/
struct evhttp_connection *evhttp_connection_new(
const char *address, unsigned short port);
/** Frees an http connection */
void evhttp_connection_free(struct evhttp_connection *evcon);
/** sets the ip address from which http connections are made */
void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
const char *address);
/** Sets the timeout for events related to this connection */
void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
int timeout_in_secs);
/** Sets the retry limit for this connection - -1 repeats indefnitely */
void evhttp_connection_set_retries(struct evhttp_connection *evcon,
int retry_max);
/** Set a callback for connection close. */
void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
void (*)(struct evhttp_connection *, void *), void *);
/**
* Associates an event base with the connection - can only be called
* on a freshly created connection object that has not been used yet.
*/
void evhttp_connection_set_base(struct evhttp_connection *evcon,
struct event_base *base);
/** Get the remote address and port associated with this connection. */
void evhttp_connection_get_peer(struct evhttp_connection *evcon,
char **address, u_short *port);
/** The connection gets ownership of the request */
int evhttp_make_request(struct evhttp_connection *evcon,
struct evhttp_request *req,
enum evhttp_cmd_type type, const char *uri);
const char *evhttp_request_uri(struct evhttp_request *req);
/* Interfaces for dealing with HTTP headers */
const char *evhttp_find_header(const struct evkeyvalq *, const char *);
int evhttp_remove_header(struct evkeyvalq *, const char *);
int evhttp_add_header(struct evkeyvalq *, const char *, const char *);
void evhttp_clear_headers(struct evkeyvalq *);
/* Miscellaneous utility functions */
/**
Helper function to encode a URI.
The returned string must be freed by the caller.
@param uri an unencoded URI
@return a newly allocated URI-encoded string
*/
char *evhttp_encode_uri(const char *uri);
/**
Helper function to decode a URI.
The returned string must be freed by the caller.
@param uri an encoded URI
@return a newly allocated unencoded URI
*/
char *evhttp_decode_uri(const char *uri);
/**
* Helper function to parse out arguments in a query.
* The arguments are separated by key and value.
* URI should already be decoded.
*/
void evhttp_parse_query(const char *uri, struct evkeyvalq *);
/**
* Escape HTML character entities in a string.
*
* Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
* &#039; and &amp; correspondingly.
*
* The returned string needs to be freed by the caller.
*
* @param html an unescaped HTML string
* @return an escaped HTML string
*/
char *evhttp_htmlescape(const char *html);
#ifdef __cplusplus
}
#endif
#endif /* _EVHTTP_H_ */

517
extra/libevent/evport.c Normal file
View File

@ -0,0 +1,517 @@
/*
* Submitted by David Pacheco (dp.spambait@gmail.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2007 Sun Microsystems. All rights reserved.
* Use is subject to license terms.
*/
/*
* evport.c: event backend using Solaris 10 event ports. See port_create(3C).
* This implementation is loosely modeled after the one used for select(2) (in
* select.c).
*
* The outstanding events are tracked in a data structure called evport_data.
* Each entry in the ed_fds array corresponds to a file descriptor, and contains
* pointers to the read and write events that correspond to that fd. (That is,
* when the file is readable, the "read" event should handle it, etc.)
*
* evport_add and evport_del update this data structure. evport_dispatch uses it
* to determine where to callback when an event occurs (which it gets from
* port_getn).
*
* Helper functions are used: grow() grows the file descriptor array as
* necessary when large fd's come in. reassociate() takes care of maintaining
* the proper file-descriptor/event-port associations.
*
* As in the select(2) implementation, signals are handled by evsignal.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_EVENT_PORTS
#include <sys/time.h>
#include <assert.h>
#include <sys/queue.h>
#include <errno.h>
#include <poll.h>
#include <port.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "log.h"
#include "evsignal.h"
/*
* Default value for ed_nevents, which is the maximum file descriptor number we
* can handle. If an event comes in for a file descriptor F > nevents, we will
* grow the array of file descriptors, doubling its size.
*/
#define DEFAULT_NFDS 16
/*
* EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
* any particular call. You can speed things up by increasing this, but it will
* (obviously) require more memory.
*/
#define EVENTS_PER_GETN 8
/*
* Per-file-descriptor information about what events we're subscribed to. These
* fields are NULL if no event is subscribed to either of them.
*/
struct fd_info {
struct event* fdi_revt; /* the event responsible for the "read" */
struct event* fdi_wevt; /* the event responsible for the "write" */
};
#define FDI_HAS_READ(fdi) ((fdi)->fdi_revt != NULL)
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_wevt != NULL)
#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
(FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
struct evport_data {
int ed_port; /* event port for system events */
int ed_nevents; /* number of allocated fdi's */
struct fd_info *ed_fds; /* allocated fdi table */
/* fdi's that we need to reassoc */
int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
};
static void* evport_init (struct event_base *);
static int evport_add (void *, struct event *);
static int evport_del (void *, struct event *);
static int evport_dispatch (struct event_base *, void *, struct timeval *);
static void evport_dealloc (struct event_base *, void *);
const struct eventop evportops = {
"event ports",
evport_init,
evport_add,
evport_del,
evport_dispatch,
evport_dealloc,
1 /* need reinit */
};
/*
* Initialize the event port implementation.
*/
static void*
evport_init(struct event_base *base)
{
struct evport_data *evpd;
int i;
/*
* Disable event ports when this environment variable is set
*/
if (getenv("EVENT_NOEVPORT"))
return (NULL);
if (!(evpd = calloc(1, sizeof(struct evport_data))))
return (NULL);
if ((evpd->ed_port = port_create()) == -1) {
free(evpd);
return (NULL);
}
/*
* Initialize file descriptor structure
*/
evpd->ed_fds = calloc(DEFAULT_NFDS, sizeof(struct fd_info));
if (evpd->ed_fds == NULL) {
close(evpd->ed_port);
free(evpd);
return (NULL);
}
evpd->ed_nevents = DEFAULT_NFDS;
for (i = 0; i < EVENTS_PER_GETN; i++)
evpd->ed_pending[i] = -1;
evsignal_init(base);
return (evpd);
}
#ifdef CHECK_INVARIANTS
/*
* Checks some basic properties about the evport_data structure. Because it
* checks all file descriptors, this function can be expensive when the maximum
* file descriptor ever used is rather large.
*/
static void
check_evportop(struct evport_data *evpd)
{
assert(evpd);
assert(evpd->ed_nevents > 0);
assert(evpd->ed_port > 0);
assert(evpd->ed_fds > 0);
/*
* Verify the integrity of the fd_info struct as well as the events to
* which it points (at least, that they're valid references and correct
* for their position in the structure).
*/
int i;
for (i = 0; i < evpd->ed_nevents; ++i) {
struct event *ev;
struct fd_info *fdi;
fdi = &evpd->ed_fds[i];
if ((ev = fdi->fdi_revt) != NULL) {
assert(ev->ev_fd == i);
}
if ((ev = fdi->fdi_wevt) != NULL) {
assert(ev->ev_fd == i);
}
}
}
/*
* Verifies very basic integrity of a given port_event.
*/
static void
check_event(port_event_t* pevt)
{
/*
* We've only registered for PORT_SOURCE_FD events. The only
* other thing we can legitimately receive is PORT_SOURCE_ALERT,
* but since we're not using port_alert either, we can assume
* PORT_SOURCE_FD.
*/
assert(pevt->portev_source == PORT_SOURCE_FD);
assert(pevt->portev_user == NULL);
}
#else
#define check_evportop(epop)
#define check_event(pevt)
#endif /* CHECK_INVARIANTS */
/*
* Doubles the size of the allocated file descriptor array.
*/
static int
grow(struct evport_data *epdp, int factor)
{
struct fd_info *tmp;
int oldsize = epdp->ed_nevents;
int newsize = factor * oldsize;
assert(factor > 1);
check_evportop(epdp);
tmp = realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
if (NULL == tmp)
return -1;
epdp->ed_fds = tmp;
memset((char*) (epdp->ed_fds + oldsize), 0,
(newsize - oldsize)*sizeof(struct fd_info));
epdp->ed_nevents = newsize;
check_evportop(epdp);
return 0;
}
/*
* (Re)associates the given file descriptor with the event port. The OS events
* are specified (implicitly) from the fd_info struct.
*/
static int
reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
{
int sysevents = FDI_TO_SYSEVENTS(fdip);
if (sysevents != 0) {
if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
fd, sysevents, NULL) == -1) {
event_warn("port_associate");
return (-1);
}
}
check_evportop(epdp);
return (0);
}
/*
* Main event loop - polls port_getn for some number of events, and processes
* them.
*/
static int
evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int i, res;
struct evport_data *epdp = arg;
port_event_t pevtlist[EVENTS_PER_GETN];
/*
* port_getn will block until it has at least nevents events. It will
* also return how many it's given us (which may be more than we asked
* for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
* nevents.
*/
int nevents = 1;
/*
* We have to convert a struct timeval to a struct timespec
* (only difference is nanoseconds vs. microseconds). If no time-based
* events are active, we should wait for I/O (and tv == NULL).
*/
struct timespec ts;
struct timespec *ts_p = NULL;
if (tv != NULL) {
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
ts_p = &ts;
}
/*
* Before doing anything else, we need to reassociate the events we hit
* last time which need reassociation. See comment at the end of the
* loop below.
*/
for (i = 0; i < EVENTS_PER_GETN; ++i) {
struct fd_info *fdi = NULL;
if (epdp->ed_pending[i] != -1) {
fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
}
if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
int fd = FDI_HAS_READ(fdi) ? fdi->fdi_revt->ev_fd :
fdi->fdi_wevt->ev_fd;
reassociate(epdp, fdi, fd);
epdp->ed_pending[i] = -1;
}
}
if ((res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
(unsigned int *) &nevents, ts_p)) == -1) {
if (errno == EINTR || errno == EAGAIN) {
evsignal_process(base);
return (0);
} else if (errno == ETIME) {
if (nevents == 0)
return (0);
} else {
event_warn("port_getn");
return (-1);
}
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: port_getn reports %d events", __func__, nevents));
for (i = 0; i < nevents; ++i) {
struct event *ev;
struct fd_info *fdi;
port_event_t *pevt = &pevtlist[i];
int fd = (int) pevt->portev_object;
check_evportop(epdp);
check_event(pevt);
epdp->ed_pending[i] = fd;
/*
* Figure out what kind of event it was
* (because we have to pass this to the callback)
*/
res = 0;
if (pevt->portev_events & POLLIN)
res |= EV_READ;
if (pevt->portev_events & POLLOUT)
res |= EV_WRITE;
assert(epdp->ed_nevents > fd);
fdi = &(epdp->ed_fds[fd]);
/*
* We now check for each of the possible events (READ
* or WRITE). Then, we activate the event (which will
* cause its callback to be executed).
*/
if ((res & EV_READ) && ((ev = fdi->fdi_revt) != NULL)) {
event_active(ev, res, 1);
}
if ((res & EV_WRITE) && ((ev = fdi->fdi_wevt) != NULL)) {
event_active(ev, res, 1);
}
} /* end of all events gotten */
check_evportop(epdp);
return (0);
}
/*
* Adds the given event (so that you will be notified when it happens via
* the callback function).
*/
static int
evport_add(void *arg, struct event *ev)
{
struct evport_data *evpd = arg;
struct fd_info *fdi;
int factor;
check_evportop(evpd);
/*
* Delegate, if it's not ours to handle.
*/
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
/*
* If necessary, grow the file descriptor info table
*/
factor = 1;
while (ev->ev_fd >= factor * evpd->ed_nevents)
factor *= 2;
if (factor > 1) {
if (-1 == grow(evpd, factor)) {
return (-1);
}
}
fdi = &evpd->ed_fds[ev->ev_fd];
if (ev->ev_events & EV_READ)
fdi->fdi_revt = ev;
if (ev->ev_events & EV_WRITE)
fdi->fdi_wevt = ev;
return reassociate(evpd, fdi, ev->ev_fd);
}
/*
* Removes the given event from the list of events to wait for.
*/
static int
evport_del(void *arg, struct event *ev)
{
struct evport_data *evpd = arg;
struct fd_info *fdi;
int i;
int associated = 1;
check_evportop(evpd);
/*
* Delegate, if it's not ours to handle
*/
if (ev->ev_events & EV_SIGNAL) {
return (evsignal_del(ev));
}
if (evpd->ed_nevents < ev->ev_fd) {
return (-1);
}
for (i = 0; i < EVENTS_PER_GETN; ++i) {
if (evpd->ed_pending[i] == ev->ev_fd) {
associated = 0;
break;
}
}
fdi = &evpd->ed_fds[ev->ev_fd];
if (ev->ev_events & EV_READ)
fdi->fdi_revt = NULL;
if (ev->ev_events & EV_WRITE)
fdi->fdi_wevt = NULL;
if (associated) {
if (!FDI_HAS_EVENTS(fdi) &&
port_dissociate(evpd->ed_port, PORT_SOURCE_FD,
ev->ev_fd) == -1) {
/*
* Ignre EBADFD error the fd could have been closed
* before event_del() was called.
*/
if (errno != EBADFD) {
event_warn("port_dissociate");
return (-1);
}
} else {
if (FDI_HAS_EVENTS(fdi)) {
return (reassociate(evpd, fdi, ev->ev_fd));
}
}
} else {
if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) {
evpd->ed_pending[i] = -1;
}
}
return 0;
}
static void
evport_dealloc(struct event_base *base, void *arg)
{
struct evport_data *evpd = arg;
evsignal_dealloc(base);
close(evpd->ed_port);
if (evpd->ed_fds)
free(evpd->ed_fds);
free(evpd);
}
#endif /* HAVE_EVENT_PORTS */

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_INTERNAL_H_
#define _EVRPC_INTERNAL_H_
#include "http-internal.h"
struct evrpc;
#define EVRPC_URI_PREFIX "/.rpc."
struct evrpc_hook {
TAILQ_ENTRY(evrpc_hook) (next);
/* returns -1; if the rpc should be aborted, is allowed to rewrite */
int (*process)(struct evhttp_request *, struct evbuffer *, void *);
void *process_arg;
};
TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
/*
* this is shared between the base and the pool, so that we can reuse
* the hook adding functions; we alias both evrpc_pool and evrpc_base
* to this common structure.
*/
struct _evrpc_hooks {
/* hooks for processing outbound and inbound rpcs */
struct evrpc_hook_list in_hooks;
struct evrpc_hook_list out_hooks;
};
#define input_hooks common.in_hooks
#define output_hooks common.out_hooks
struct evrpc_base {
struct _evrpc_hooks common;
/* the HTTP server under which we register our RPC calls */
struct evhttp* http_server;
/* a list of all RPCs registered with us */
TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
};
struct evrpc_req_generic;
void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
/* A pool for holding evhttp_connection objects */
struct evrpc_pool {
struct _evrpc_hooks common;
struct event_base *base;
struct evconq connections;
int timeout;
TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) requests;
};
#endif /* _EVRPC_INTERNAL_H_ */

658
extra/libevent/evrpc.c Normal file
View File

@ -0,0 +1,658 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <winsock2.h>
#undef WIN32_LEAN_AND_MEAN
#include "misc.h"
#endif
#include <sys/types.h>
#ifndef WIN32
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <assert.h>
#include "event.h"
#include "evrpc.h"
#include "evrpc-internal.h"
#include "evhttp.h"
#include "evutil.h"
#include "log.h"
struct evrpc_base *
evrpc_init(struct evhttp *http_server)
{
struct evrpc_base* base = calloc(1, sizeof(struct evrpc_base));
if (base == NULL)
return (NULL);
/* we rely on the tagging sub system */
evtag_init();
TAILQ_INIT(&base->registered_rpcs);
TAILQ_INIT(&base->input_hooks);
TAILQ_INIT(&base->output_hooks);
base->http_server = http_server;
return (base);
}
void
evrpc_free(struct evrpc_base *base)
{
struct evrpc *rpc;
struct evrpc_hook *hook;
while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
assert(evrpc_unregister_rpc(base, rpc->uri));
}
while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
assert(evrpc_remove_hook(base, INPUT, hook));
}
while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
assert(evrpc_remove_hook(base, OUTPUT, hook));
}
free(base);
}
void *
evrpc_add_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hook_list *head = NULL;
struct evrpc_hook *hook = NULL;
switch (hook_type) {
case INPUT:
head = &base->in_hooks;
break;
case OUTPUT:
head = &base->out_hooks;
break;
default:
assert(hook_type == INPUT || hook_type == OUTPUT);
}
hook = calloc(1, sizeof(struct evrpc_hook));
assert(hook != NULL);
hook->process = cb;
hook->process_arg = cb_arg;
TAILQ_INSERT_TAIL(head, hook, next);
return (hook);
}
static int
evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
{
struct evrpc_hook *hook = NULL;
TAILQ_FOREACH(hook, head, next) {
if (hook == handle) {
TAILQ_REMOVE(head, hook, next);
free(hook);
return (1);
}
}
return (0);
}
/*
* remove the hook specified by the handle
*/
int
evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hook_list *head = NULL;
switch (hook_type) {
case INPUT:
head = &base->in_hooks;
break;
case OUTPUT:
head = &base->out_hooks;
break;
default:
assert(hook_type == INPUT || hook_type == OUTPUT);
}
return (evrpc_remove_hook_internal(head, handle));
}
static int
evrpc_process_hooks(struct evrpc_hook_list *head,
struct evhttp_request *req, struct evbuffer *evbuf)
{
struct evrpc_hook *hook;
TAILQ_FOREACH(hook, head, next) {
if (hook->process(req, evbuf, hook->process_arg) == -1)
return (-1);
}
return (0);
}
static void evrpc_pool_schedule(struct evrpc_pool *pool);
static void evrpc_request_cb(struct evhttp_request *, void *);
void evrpc_request_done(struct evrpc_req_generic*);
/*
* Registers a new RPC with the HTTP server. The evrpc object is expected
* to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
* calls this function.
*/
static char *
evrpc_construct_uri(const char *uri)
{
char *constructed_uri;
int constructed_uri_len;
constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
if ((constructed_uri = malloc(constructed_uri_len)) == NULL)
event_err(1, "%s: failed to register rpc at %s",
__func__, uri);
memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
constructed_uri[constructed_uri_len - 1] = '\0';
return (constructed_uri);
}
int
evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
{
char *constructed_uri = evrpc_construct_uri(rpc->uri);
rpc->base = base;
rpc->cb = cb;
rpc->cb_arg = cb_arg;
TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
evhttp_set_cb(base->http_server,
constructed_uri,
evrpc_request_cb,
rpc);
free(constructed_uri);
return (0);
}
int
evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
{
char *registered_uri = NULL;
struct evrpc *rpc;
/* find the right rpc; linear search might be slow */
TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
if (strcmp(rpc->uri, name) == 0)
break;
}
if (rpc == NULL) {
/* We did not find an RPC with this name */
return (-1);
}
TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
free((char *)rpc->uri);
free(rpc);
registered_uri = evrpc_construct_uri(name);
/* remove the http server callback */
assert(evhttp_del_cb(base->http_server, registered_uri) == 0);
free(registered_uri);
return (0);
}
static void
evrpc_request_cb(struct evhttp_request *req, void *arg)
{
struct evrpc *rpc = arg;
struct evrpc_req_generic *rpc_state = NULL;
/* let's verify the outside parameters */
if (req->type != EVHTTP_REQ_POST ||
EVBUFFER_LENGTH(req->input_buffer) <= 0)
goto error;
/*
* we might want to allow hooks to suspend the processing,
* but at the moment, we assume that they just act as simple
* filters.
*/
if (evrpc_process_hooks(&rpc->base->input_hooks,
req, req->input_buffer) == -1)
goto error;
rpc_state = calloc(1, sizeof(struct evrpc_req_generic));
if (rpc_state == NULL)
goto error;
/* let's check that we can parse the request */
rpc_state->request = rpc->request_new();
if (rpc_state->request == NULL)
goto error;
rpc_state->rpc = rpc;
if (rpc->request_unmarshal(
rpc_state->request, req->input_buffer) == -1) {
/* we failed to parse the request; that's a bummer */
goto error;
}
/* at this point, we have a well formed request, prepare the reply */
rpc_state->reply = rpc->reply_new();
if (rpc_state->reply == NULL)
goto error;
rpc_state->http_req = req;
rpc_state->done = evrpc_request_done;
/* give the rpc to the user; they can deal with it */
rpc->cb(rpc_state, rpc->cb_arg);
return;
error:
evrpc_reqstate_free(rpc_state);
evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
return;
}
void
evrpc_reqstate_free(struct evrpc_req_generic* rpc_state)
{
/* clean up all memory */
if (rpc_state != NULL) {
struct evrpc *rpc = rpc_state->rpc;
if (rpc_state->request != NULL)
rpc->request_free(rpc_state->request);
if (rpc_state->reply != NULL)
rpc->reply_free(rpc_state->reply);
free(rpc_state);
}
}
void
evrpc_request_done(struct evrpc_req_generic* rpc_state)
{
struct evhttp_request *req = rpc_state->http_req;
struct evrpc *rpc = rpc_state->rpc;
struct evbuffer* data = NULL;
if (rpc->reply_complete(rpc_state->reply) == -1) {
/* the reply was not completely filled in. error out */
goto error;
}
if ((data = evbuffer_new()) == NULL) {
/* out of memory */
goto error;
}
/* serialize the reply */
rpc->reply_marshal(data, rpc_state->reply);
/* do hook based tweaks to the request */
if (evrpc_process_hooks(&rpc->base->output_hooks,
req, data) == -1)
goto error;
/* on success, we are going to transmit marshaled binary data */
if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
evhttp_add_header(req->output_headers,
"Content-Type", "application/octet-stream");
}
evhttp_send_reply(req, HTTP_OK, "OK", data);
evbuffer_free(data);
evrpc_reqstate_free(rpc_state);
return;
error:
if (data != NULL)
evbuffer_free(data);
evrpc_reqstate_free(rpc_state);
evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
return;
}
/* Client implementation of RPC site */
static int evrpc_schedule_request(struct evhttp_connection *connection,
struct evrpc_request_wrapper *ctx);
struct evrpc_pool *
evrpc_pool_new(struct event_base *base)
{
struct evrpc_pool *pool = calloc(1, sizeof(struct evrpc_pool));
if (pool == NULL)
return (NULL);
TAILQ_INIT(&pool->connections);
TAILQ_INIT(&pool->requests);
TAILQ_INIT(&pool->input_hooks);
TAILQ_INIT(&pool->output_hooks);
pool->base = base;
pool->timeout = -1;
return (pool);
}
static void
evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
{
free(request->name);
free(request);
}
void
evrpc_pool_free(struct evrpc_pool *pool)
{
struct evhttp_connection *connection;
struct evrpc_request_wrapper *request;
struct evrpc_hook *hook;
while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
TAILQ_REMOVE(&pool->requests, request, next);
/* if this gets more complicated we need our own function */
evrpc_request_wrapper_free(request);
}
while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
TAILQ_REMOVE(&pool->connections, connection, next);
evhttp_connection_free(connection);
}
while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
assert(evrpc_remove_hook(pool, INPUT, hook));
}
while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
assert(evrpc_remove_hook(pool, OUTPUT, hook));
}
free(pool);
}
/*
* Add a connection to the RPC pool. A request scheduled on the pool
* may use any available connection.
*/
void
evrpc_pool_add_connection(struct evrpc_pool *pool,
struct evhttp_connection *connection) {
assert(connection->http_server == NULL);
TAILQ_INSERT_TAIL(&pool->connections, connection, next);
/*
* associate an event base with this connection
*/
if (pool->base != NULL)
evhttp_connection_set_base(connection, pool->base);
/*
* unless a timeout was specifically set for a connection,
* the connection inherits the timeout from the pool.
*/
if (connection->timeout == -1)
connection->timeout = pool->timeout;
/*
* if we have any requests pending, schedule them with the new
* connections.
*/
if (TAILQ_FIRST(&pool->requests) != NULL) {
struct evrpc_request_wrapper *request =
TAILQ_FIRST(&pool->requests);
TAILQ_REMOVE(&pool->requests, request, next);
evrpc_schedule_request(connection, request);
}
}
void
evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
{
struct evhttp_connection *evcon;
TAILQ_FOREACH(evcon, &pool->connections, next) {
evcon->timeout = timeout_in_secs;
}
pool->timeout = timeout_in_secs;
}
static void evrpc_reply_done(struct evhttp_request *, void *);
static void evrpc_request_timeout(int, short, void *);
/*
* Finds a connection object associated with the pool that is currently
* idle and can be used to make a request.
*/
static struct evhttp_connection *
evrpc_pool_find_connection(struct evrpc_pool *pool)
{
struct evhttp_connection *connection;
TAILQ_FOREACH(connection, &pool->connections, next) {
if (TAILQ_FIRST(&connection->requests) == NULL)
return (connection);
}
return (NULL);
}
/*
* We assume that the ctx is no longer queued on the pool.
*/
static int
evrpc_schedule_request(struct evhttp_connection *connection,
struct evrpc_request_wrapper *ctx)
{
struct evhttp_request *req = NULL;
struct evrpc_pool *pool = ctx->pool;
struct evrpc_status status;
char *uri = NULL;
int res = 0;
if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
goto error;
/* serialize the request data into the output buffer */
ctx->request_marshal(req->output_buffer, ctx->request);
uri = evrpc_construct_uri(ctx->name);
if (uri == NULL)
goto error;
/* we need to know the connection that we might have to abort */
ctx->evcon = connection;
/* apply hooks to the outgoing request */
if (evrpc_process_hooks(&pool->output_hooks,
req, req->output_buffer) == -1)
goto error;
if (pool->timeout > 0) {
/*
* a timeout after which the whole rpc is going to be aborted.
*/
struct timeval tv;
evutil_timerclear(&tv);
tv.tv_sec = pool->timeout;
evtimer_add(&ctx->ev_timeout, &tv);
}
/* start the request over the connection */
res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
free(uri);
if (res == -1)
goto error;
return (0);
error:
memset(&status, 0, sizeof(status));
status.error = EVRPC_STATUS_ERR_UNSTARTED;
(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
evrpc_request_wrapper_free(ctx);
return (-1);
}
int
evrpc_make_request(struct evrpc_request_wrapper *ctx)
{
struct evrpc_pool *pool = ctx->pool;
/* initialize the event structure for this rpc */
evtimer_set(&ctx->ev_timeout, evrpc_request_timeout, ctx);
if (pool->base != NULL)
event_base_set(pool->base, &ctx->ev_timeout);
/* we better have some available connections on the pool */
assert(TAILQ_FIRST(&pool->connections) != NULL);
/*
* if no connection is available, we queue the request on the pool,
* the next time a connection is empty, the rpc will be send on that.
*/
TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
evrpc_pool_schedule(pool);
return (0);
}
static void
evrpc_reply_done(struct evhttp_request *req, void *arg)
{
struct evrpc_request_wrapper *ctx = arg;
struct evrpc_pool *pool = ctx->pool;
struct evrpc_status status;
int res = -1;
/* cancel any timeout we might have scheduled */
event_del(&ctx->ev_timeout);
memset(&status, 0, sizeof(status));
status.http_req = req;
/* we need to get the reply now */
if (req != NULL) {
/* apply hooks to the incoming request */
if (evrpc_process_hooks(&pool->input_hooks,
req, req->input_buffer) == -1) {
status.error = EVRPC_STATUS_ERR_HOOKABORTED;
res = -1;
} else {
res = ctx->reply_unmarshal(ctx->reply,
req->input_buffer);
if (res == -1) {
status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
}
}
} else {
status.error = EVRPC_STATUS_ERR_TIMEOUT;
}
if (res == -1) {
/* clear everything that we might have written previously */
ctx->reply_clear(ctx->reply);
}
(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
evrpc_request_wrapper_free(ctx);
/* the http layer owns the request structure */
/* see if we can schedule another request */
evrpc_pool_schedule(pool);
}
static void
evrpc_pool_schedule(struct evrpc_pool *pool)
{
struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
struct evhttp_connection *evcon;
/* if no requests are pending, we have no work */
if (ctx == NULL)
return;
if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
TAILQ_REMOVE(&pool->requests, ctx, next);
evrpc_schedule_request(evcon, ctx);
}
}
static void
evrpc_request_timeout(int fd, short what, void *arg)
{
struct evrpc_request_wrapper *ctx = arg;
struct evhttp_connection *evcon = ctx->evcon;
assert(evcon != NULL);
evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
}

477
extra/libevent/evrpc.h Normal file
View File

@ -0,0 +1,477 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_H_
#define _EVRPC_H_
#ifdef __cplusplus
extern "C" {
#endif
/** @file evrpc.h
*
* This header files provides basic support for an RPC server and client.
*
* To support RPCs in a server, every supported RPC command needs to be
* defined and registered.
*
* EVRPC_HEADER(SendCommand, Request, Reply);
*
* SendCommand is the name of the RPC command.
* Request is the name of a structure generated by event_rpcgen.py.
* It contains all parameters relating to the SendCommand RPC. The
* server needs to fill in the Reply structure.
* Reply is the name of a structure generated by event_rpcgen.py. It
* contains the answer to the RPC.
*
* To register an RPC with an HTTP server, you need to first create an RPC
* base with:
*
* struct evrpc_base *base = evrpc_init(http);
*
* A specific RPC can then be registered with
*
* EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg);
*
* when the server receives an appropriately formatted RPC, the user callback
* is invokved. The callback needs to fill in the reply structure.
*
* void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
*
* To send the reply, call EVRPC_REQUEST_DONE(rpc);
*
* See the regression test for an example.
*/
struct evbuffer;
struct event_base;
struct evrpc_req_generic;
/* Encapsulates a request */
struct evrpc {
TAILQ_ENTRY(evrpc) next;
/* the URI at which the request handler lives */
const char* uri;
/* creates a new request structure */
void *(*request_new)(void);
/* frees the request structure */
void (*request_free)(void *);
/* unmarshals the buffer into the proper request structure */
int (*request_unmarshal)(void *, struct evbuffer *);
/* creates a new reply structure */
void *(*reply_new)(void);
/* creates a new reply structure */
void (*reply_free)(void *);
/* verifies that the reply is valid */
int (*reply_complete)(void *);
/* marshals the reply into a buffer */
void (*reply_marshal)(struct evbuffer*, void *);
/* the callback invoked for each received rpc */
void (*cb)(struct evrpc_req_generic *, void *);
void *cb_arg;
/* reference for further configuration */
struct evrpc_base *base;
};
/** The type of a specific RPC Message
*
* @param rpcname the name of the RPC message
*/
#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
struct evhttp_request;
struct evrpc_status;
/* We alias the RPC specific structs to this voided one */
struct evrpc_req_generic {
/* the unmarshaled request object */
void *request;
/* the empty reply object that needs to be filled in */
void *reply;
/*
* the static structure for this rpc; that can be used to
* automatically unmarshal and marshal the http buffers.
*/
struct evrpc *rpc;
/*
* the http request structure on which we need to answer.
*/
struct evhttp_request* http_req;
/*
* callback to reply and finish answering this rpc
*/
void (*done)(struct evrpc_req_generic* rpc);
};
/** Creates the definitions and prototypes for an RPC
*
* You need to use EVRPC_HEADER to create structures and function prototypes
* needed by the server and client implementation. The structures have to be
* defined in an .rpc file and converted to source code via event_rpcgen.py
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_GENERATE()
*/
#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
EVRPC_STRUCT(rpcname) { \
struct reqstruct* request; \
struct rplystruct* reply; \
struct evrpc* rpc; \
struct evhttp_request* http_req; \
void (*done)(struct evrpc_status *, \
struct evrpc* rpc, void *request, void *reply); \
}; \
int evrpc_send_request_##rpcname(struct evrpc_pool *, \
struct reqstruct *, struct rplystruct *, \
void (*)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *);
/** Generates the code for receiving and sending an RPC message
*
* EVRPC_GENERATE is used to create the code corresponding to sending
* and receiving a particular RPC message
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_HEADER()
*/
#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
struct reqstruct *request, struct rplystruct *reply, \
void (*cb)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *cbarg) { \
struct evrpc_status status; \
struct evrpc_request_wrapper *ctx; \
ctx = (struct evrpc_request_wrapper *) \
malloc(sizeof(struct evrpc_request_wrapper)); \
if (ctx == NULL) \
goto error; \
ctx->pool = pool; \
ctx->evcon = NULL; \
ctx->name = strdup(#rpcname); \
if (ctx->name == NULL) { \
free(ctx); \
goto error; \
} \
ctx->cb = (void (*)(struct evrpc_status *, \
void *, void *, void *))cb; \
ctx->cb_arg = cbarg; \
ctx->request = (void *)request; \
ctx->reply = (void *)reply; \
ctx->request_marshal = (void (*)(struct evbuffer *, void *))reqstruct##_marshal; \
ctx->reply_clear = (void (*)(void *))rplystruct##_clear; \
ctx->reply_unmarshal = (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal; \
return (evrpc_make_request(ctx)); \
error: \
memset(&status, 0, sizeof(status)); \
status.error = EVRPC_STATUS_ERR_UNSTARTED; \
(*(cb))(&status, request, reply, cbarg); \
return (-1); \
}
/** Provides access to the HTTP request object underlying an RPC
*
* Access to the underlying http object; can be used to look at headers or
* for getting the remote ip address
*
* @param rpc_req the rpc request structure provided to the server callback
* @return an struct evhttp_request object that can be inspected for
* HTTP headers or sender information.
*/
#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
/** Creates the reply to an RPC request
*
* EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
* to have been filled in. The request and reply pointers become invalid
* after this call has finished.
*
* @param rpc_req the rpc request structure provided to the server callback
*/
#define EVRPC_REQUEST_DONE(rpc_req) do { \
struct evrpc_req_generic *_req = (struct evrpc_req_generic *)(rpc_req); \
_req->done(_req); \
} while (0)
/* Takes a request object and fills it in with the right magic */
#define EVRPC_REGISTER_OBJECT(rpc, name, request, reply) \
do { \
(rpc)->uri = strdup(#name); \
if ((rpc)->uri == NULL) { \
fprintf(stderr, "failed to register object\n"); \
exit(1); \
} \
(rpc)->request_new = (void *(*)(void))request##_new; \
(rpc)->request_free = (void (*)(void *))request##_free; \
(rpc)->request_unmarshal = (int (*)(void *, struct evbuffer *))request##_unmarshal; \
(rpc)->reply_new = (void *(*)(void))reply##_new; \
(rpc)->reply_free = (void (*)(void *))reply##_free; \
(rpc)->reply_complete = (int (*)(void *))reply##_complete; \
(rpc)->reply_marshal = (void (*)(struct evbuffer*, void *))reply##_marshal; \
} while (0)
struct evrpc_base;
struct evhttp;
/* functions to start up the rpc system */
/** Creates a new rpc base from which RPC requests can be received
*
* @param server a pointer to an existing HTTP server
* @return a newly allocated evrpc_base struct
* @see evrpc_free()
*/
struct evrpc_base *evrpc_init(struct evhttp *server);
/**
* Frees the evrpc base
*
* For now, you are responsible for making sure that no rpcs are ongoing.
*
* @param base the evrpc_base object to be freed
* @see evrpc_init
*/
void evrpc_free(struct evrpc_base *base);
/** register RPCs with the HTTP Server
*
* registers a new RPC with the HTTP server, each RPC needs to have
* a unique name under which it can be identified.
*
* @param base the evrpc_base structure in which the RPC should be
* registered.
* @param name the name of the RPC
* @param request the name of the RPC request structure
* @param reply the name of the RPC reply structure
* @param callback the callback that should be invoked when the RPC
* is received. The callback has the following prototype
* void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
* @param cbarg an additional parameter that can be passed to the callback.
* The parameter can be used to carry around state.
*/
#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
do { \
struct evrpc* rpc = (struct evrpc *)calloc(1, sizeof(struct evrpc)); \
EVRPC_REGISTER_OBJECT(rpc, name, request, reply); \
evrpc_register_rpc(base, rpc, \
(void (*)(struct evrpc_req_generic*, void *))callback, cbarg); \
} while (0)
int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
void (*)(struct evrpc_req_generic*, void *), void *);
/**
* Unregisters an already registered RPC
*
* @param base the evrpc_base object from which to unregister an RPC
* @param name the name of the rpc to unregister
* @return -1 on error or 0 when successful.
* @see EVRPC_REGISTER()
*/
#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc(base, #name)
int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
/*
* Client-side RPC support
*/
struct evrpc_pool;
struct evhttp_connection;
/**
* provides information about the completed RPC request.
*/
struct evrpc_status {
#define EVRPC_STATUS_ERR_NONE 0
#define EVRPC_STATUS_ERR_TIMEOUT 1
#define EVRPC_STATUS_ERR_BADPAYLOAD 2
#define EVRPC_STATUS_ERR_UNSTARTED 3
#define EVRPC_STATUS_ERR_HOOKABORTED 4
int error;
/* for looking at headers or other information */
struct evhttp_request *http_req;
};
struct evrpc_request_wrapper {
TAILQ_ENTRY(evrpc_request_wrapper) next;
/* pool on which this rpc request is being made */
struct evrpc_pool *pool;
/* connection on which the request is being sent */
struct evhttp_connection *evcon;
/* event for implementing request timeouts */
struct event ev_timeout;
/* the name of the rpc */
char *name;
/* callback */
void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
void *cb_arg;
void *request;
void *reply;
/* unmarshals the buffer into the proper request structure */
void (*request_marshal)(struct evbuffer *, void *);
/* removes all stored state in the reply */
void (*reply_clear)(void *);
/* marshals the reply into a buffer */
int (*reply_unmarshal)(void *, struct evbuffer*);
};
/** launches an RPC and sends it to the server
*
* EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
*
* @param name the name of the RPC
* @param pool the evrpc_pool that contains the connection objects over which
* the request should be sent.
* @param request a pointer to the RPC request structure - it contains the
* data to be sent to the server.
* @param reply a pointer to the RPC reply structure. It is going to be filled
* if the request was answered successfully
* @param cb the callback to invoke when the RPC request has been answered
* @param cbarg an additional argument to be passed to the client
* @return 0 on success, -1 on failure
*/
#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \
evrpc_send_request_##name(pool, request, reply, cb, cbarg)
int evrpc_make_request(struct evrpc_request_wrapper *);
/** creates an rpc connection pool
*
* a pool has a number of connections associated with it.
* rpc requests are always made via a pool.
*
* @param base a pointer to an struct event_based object; can be left NULL
* in singled-threaded applications
* @return a newly allocated struct evrpc_pool object
* @see evrpc_pool_free()
*/
struct evrpc_pool *evrpc_pool_new(struct event_base *base);
/** frees an rpc connection pool
*
* @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
* @see evrpc_pool_new()
*/
void evrpc_pool_free(struct evrpc_pool *pool);
/*
* adds a connection over which rpc can be dispatched. the connection
* object must have been newly created.
*/
void evrpc_pool_add_connection(struct evrpc_pool *,
struct evhttp_connection *);
/**
* Sets the timeout in secs after which a request has to complete. The
* RPC is completely aborted if it does not complete by then. Setting
* the timeout to 0 means that it never timeouts and can be used to
* implement callback type RPCs.
*
* Any connection already in the pool will be updated with the new
* timeout. Connections added to the pool after set_timeout has be
* called receive the pool timeout only if no timeout has been set
* for the connection itself.
*
* @param pool a pointer to a struct evrpc_pool object
* @param timeout_in_secs the number of seconds after which a request should
* timeout and a failure be returned to the callback.
*/
void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
/**
* Hooks for changing the input and output of RPCs; this can be used to
* implement compression, authentication, encryption, ...
*/
enum EVRPC_HOOK_TYPE {
INPUT, /**< apply the function to an input hook */
OUTPUT /**< apply the function to an output hook */
};
/** adds a processing hook to either an rpc base or rpc pool
*
* If a hook returns -1, the processing is aborted.
*
* The add functions return handles that can be used for removing hooks.
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param cb the callback to call when the hook is activated
* @param cb_arg an additional argument for the callback
* @return a handle to the hook so it can be removed later
* @see evrpc_remove_hook()
*/
void *evrpc_add_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg);
/** removes a previously added hook
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param handle a handle returned by evrpc_add_hook()
* @return 1 on success or 0 on failure
* @see evrpc_add_hook()
*/
int evrpc_remove_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
void *handle);
#ifdef __cplusplus
}
#endif
#endif /* _EVRPC_H_ */

52
extra/libevent/evsignal.h Normal file
View File

@ -0,0 +1,52 @@
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVSIGNAL_H_
#define _EVSIGNAL_H_
typedef void (*ev_sighandler_t)(int);
struct evsignal_info {
struct event_list signalqueue;
struct event ev_signal;
int ev_signal_pair[2];
int ev_signal_added;
volatile sig_atomic_t evsignal_caught;
sig_atomic_t evsigcaught[NSIG];
#ifdef HAVE_SIGACTION
struct sigaction **sh_old;
#else
ev_sighandler_t **sh_old;
#endif
int sh_old_max;
};
void evsignal_init(struct event_base *);
void evsignal_process(struct event_base *);
int evsignal_add(struct event *);
int evsignal_del(struct event *);
void evsignal_dealloc(struct event_base *);
#endif /* _EVSIGNAL_H_ */

198
extra/libevent/evutil.c Normal file
View File

@ -0,0 +1,198 @@
/*
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include "misc.h"
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#include <errno.h>
#include "evutil.h"
#include "log.h"
int
evutil_socketpair(int family, int type, int protocol, int fd[2])
{
#ifndef WIN32
return socketpair(family, type, protocol, fd);
#else
/* This code is originally from Tor. Used with permission. */
/* This socketpair does not work when localhost is down. So
* it's really not the same thing at all. But it's close enough
* for now, and really, when localhost is down sometimes, we
* have other problems too.
*/
int listener = -1;
int connector = -1;
int acceptor = -1;
struct sockaddr_in listen_addr;
struct sockaddr_in connect_addr;
int size;
int saved_errno = -1;
if (protocol
#ifdef AF_UNIX
|| family != AF_UNIX
#endif
) {
EVUTIL_SET_SOCKET_ERROR(WSAEAFNOSUPPORT);
return -1;
}
if (!fd) {
EVUTIL_SET_SOCKET_ERROR(WSAEINVAL);
return -1;
}
listener = (int)socket(AF_INET, type, 0);
if (listener < 0)
return -1;
memset(&listen_addr, 0, sizeof(listen_addr));
listen_addr.sin_family = AF_INET;
listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
listen_addr.sin_port = 0; /* kernel chooses port. */
if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
== -1)
goto tidy_up_and_fail;
if (listen(listener, 1) == -1)
goto tidy_up_and_fail;
connector = (int)socket(AF_INET, type, 0);
if (connector < 0)
goto tidy_up_and_fail;
/* We want to find out the port number to connect to. */
size = sizeof(connect_addr);
if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
goto tidy_up_and_fail;
if (size != sizeof (connect_addr))
goto abort_tidy_up_and_fail;
if (connect(connector, (struct sockaddr *) &connect_addr,
sizeof(connect_addr)) == -1)
goto tidy_up_and_fail;
size = sizeof(listen_addr);
acceptor = (int)accept(listener, (struct sockaddr *) &listen_addr, &size);
if (acceptor < 0)
goto tidy_up_and_fail;
if (size != sizeof(listen_addr))
goto abort_tidy_up_and_fail;
EVUTIL_CLOSESOCKET(listener);
/* Now check we are talking to ourself by matching port and host on the
two sockets. */
if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
goto tidy_up_and_fail;
if (size != sizeof (connect_addr)
|| listen_addr.sin_family != connect_addr.sin_family
|| listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
|| listen_addr.sin_port != connect_addr.sin_port)
goto abort_tidy_up_and_fail;
fd[0] = connector;
fd[1] = acceptor;
return 0;
abort_tidy_up_and_fail:
saved_errno = WSAECONNABORTED;
tidy_up_and_fail:
if (saved_errno < 0)
saved_errno = WSAGetLastError();
if (listener != -1)
EVUTIL_CLOSESOCKET(listener);
if (connector != -1)
EVUTIL_CLOSESOCKET(connector);
if (acceptor != -1)
EVUTIL_CLOSESOCKET(acceptor);
EVUTIL_SET_SOCKET_ERROR(saved_errno);
return -1;
#endif
}
int
evutil_make_socket_nonblocking(int fd)
{
#ifdef WIN32
{
unsigned long nonblocking = 1;
ioctlsocket(fd, FIONBIO, (unsigned long*) &nonblocking);
}
#else
if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
event_warn("fcntl(O_NONBLOCK)");
return -1;
}
#endif
return 0;
}
ev_int64_t
evutil_strtoll(const char *s, char **endptr, int base)
{
#ifdef HAVE_STRTOLL
return (ev_int64_t)strtoll(s, endptr, base);
#elif SIZEOF_LONG == 8
return (ev_int64_t)strtol(s, endptr, base);
#elif defined(WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
/* XXXX on old versions of MS APIs, we only support base
* 10. */
ev_int64_t r;
if (base != 10)
return 0;
r = (ev_int64_t) _atoi64(s);
while (isspace(*s))
++s;
while (isdigit(*s))
++s;
if (endptr)
*endptr = (char*) s;
return r;
#elif defined(WIN32)
return (ev_int64_t) _strtoi64(s, endptr, base);
#else
#error "I don't know how to parse 64-bit integers."
#endif
}

174
extra/libevent/evutil.h Normal file
View File

@ -0,0 +1,174 @@
/*
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVUTIL_H_
#define _EVUTIL_H_
/** @file evutil.h
Common convenience functions for cross-platform portability and
related socket manipulations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event-config.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef _EVENT_HAVE_STDINT_H
#include <stdint.h>
#elif defined(_EVENT_HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_UINT64_T
#define ev_uint64_t uint64_t
#define ev_int64_t int64_t
#elif defined(WIN32)
#define ev_uint64_t unsigned __int64
#define ev_int64_t __int64
#elif _EVENT_SIZEOF_LONG_LONG == 8
#define ev_uint64_t unsigned long long
#define ev_int64_t long long
#elif _EVENT_SIZEOF_LONG == 8
#define ev_uint64_t unsigned long
#define ev_int64_t long
#else
#error "No way to define ev_uint64_t"
#endif
#ifdef _EVENT_HAVE_UINT32_T
#define ev_uint32_t uint32_t
#elif defined(WIN32)
#define ev_uint32_t unsigned int
#elif _EVENT_SIZEOF_LONG == 4
#define ev_uint32_t unsigned long
#elif _EVENT_SIZEOF_INT == 4
#define ev_uint32_t unsigned int
#else
#error "No way to define ev_uint32_t"
#endif
#ifdef _EVENT_HAVE_UINT16_T
#define ev_uint16_t uint16_t
#elif defined(WIN32)
#define ev_uint16_t unsigned short
#elif _EVENT_SIZEOF_INT == 2
#define ev_uint16_t unsigned int
#elif _EVENT_SIZEOF_SHORT == 2
#define ev_uint16_t unsigned short
#else
#error "No way to define ev_uint16_t"
#endif
#ifdef _EVENT_HAVE_UINT8_T
#define ev_uint8_t uint8_t
#else
#define ev_uint8_t unsigned char
#endif
int evutil_socketpair(int d, int type, int protocol, int sv[2]);
int evutil_make_socket_nonblocking(int sock);
#ifdef WIN32
#define EVUTIL_CLOSESOCKET(s) closesocket(s)
#else
#define EVUTIL_CLOSESOCKET(s) close(s)
#endif
#ifdef WIN32
#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { WSASetLastError(errcode); } while (0)
#else
#define EVUTIL_SOCKET_ERROR() (errno)
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { errno = (errcode); } while (0)
#endif
/*
* Manipulation functions for struct timeval
*/
#ifdef _EVENT_HAVE_TIMERADD
#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
#else
#define evutil_timeradd(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
if ((vvp)->tv_usec >= 1000000) { \
(vvp)->tv_sec++; \
(vvp)->tv_usec -= 1000000; \
} \
} while (0)
#define evutil_timersub(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
if ((vvp)->tv_usec < 0) { \
(vvp)->tv_sec--; \
(vvp)->tv_usec += 1000000; \
} \
} while (0)
#endif /* !_EVENT_HAVE_HAVE_TIMERADD */
#ifdef _EVENT_HAVE_TIMERCLEAR
#define evutil_timerclear(tvp) timerclear(tvp)
#else
#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
#endif
#ifdef _EVENT_HAVE_TIMERCMP
#define evutil_timercmp(tvp, uvp, cmp) timercmp((tvp), (uvp), cmp)
#else
#define evutil_timercmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? \
((tvp)->tv_usec cmp (uvp)->tv_usec) : \
((tvp)->tv_sec cmp (uvp)->tv_sec))
#endif
#ifdef _EVENT_HAVE_TIMERISSET
#define evutil_timerisset(tvp) timerisset(tvp)
#else
#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#endif
/* big-int related functions */
ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
#ifdef __cplusplus
}
#endif
#endif /* _EVUTIL_H_ */

View File

@ -0,0 +1,133 @@
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* This header file contains definitions for dealing with HTTP requests
* that are internal to libevent. As user of the library, you should not
* need to know about these.
*/
#ifndef _HTTP_H_
#define _HTTP_H_
#define HTTP_CONNECT_TIMEOUT 45
#define HTTP_WRITE_TIMEOUT 50
#define HTTP_READ_TIMEOUT 50
#define HTTP_PREFIX "http://"
#define HTTP_DEFAULTPORT 80
enum evhttp_connection_error {
EVCON_HTTP_TIMEOUT,
EVCON_HTTP_EOF,
EVCON_HTTP_INVALID_HEADER
};
struct evbuffer;
struct addrinfo;
struct evhttp_request;
/* A stupid connection object - maybe make this a bufferevent later */
enum evhttp_connection_state {
EVCON_DISCONNECTED, /* not currently connected not trying either */
EVCON_CONNECTING, /* tries to currently connect */
EVCON_CONNECTED /* connection is established */
};
struct event_base;
struct evhttp_connection {
/* we use tailq only if they were created for an http server */
TAILQ_ENTRY(evhttp_connection) (next);
int fd;
struct event ev;
struct event close_ev;
struct evbuffer *input_buffer;
struct evbuffer *output_buffer;
char *bind_address; /* address to use for binding the src */
char *address; /* address to connect to */
u_short port;
int flags;
#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
int timeout; /* timeout in seconds for events */
int retry_cnt; /* retry count */
int retry_max; /* maximum number of retries */
enum evhttp_connection_state state;
/* for server connections, the http server they are connected with */
struct evhttp *http_server;
TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
void (*cb)(struct evhttp_connection *, void *);
void *cb_arg;
void (*closecb)(struct evhttp_connection *, void *);
void *closecb_arg;
struct event_base *base;
};
struct evhttp_cb {
TAILQ_ENTRY(evhttp_cb) next;
char *what;
void (*cb)(struct evhttp_request *req, void *);
void *cbarg;
};
/* both the http server as well as the rpc system need to queue connections */
TAILQ_HEAD(evconq, evhttp_connection);
struct evhttp {
struct event bind_ev;
TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
struct evconq connections;
int timeout;
void (*gencb)(struct evhttp_request *req, void *);
void *gencbarg;
struct event_base *base;
};
/* resets the connection; can be reused for more requests */
void evhttp_connection_reset(struct evhttp_connection *);
/* connects if necessary */
int evhttp_connection_connect(struct evhttp_connection *);
/* notifies the current request that it failed; resets connection */
void evhttp_connection_fail(struct evhttp_connection *,
enum evhttp_connection_error error);
void evhttp_get_request(struct evhttp *, int, struct sockaddr *, socklen_t);
int evhttp_hostportfile(char *, char **, u_short *, char **);
int evhttp_parse_lines(struct evhttp_request *, struct evbuffer*);
void evhttp_start_read(struct evhttp_connection *);
void evhttp_read_header(int, short, void *);
void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
void evhttp_write_buffer(struct evhttp_connection *,
void (*)(struct evhttp_connection *, void *), void *);
/* response sending HTML the data in the buffer */
void evhttp_response_code(struct evhttp_request *, int, const char *);
void evhttp_send_page(struct evhttp_request *, struct evbuffer *);
#endif /* _HTTP_H */

2512
extra/libevent/http.c Normal file

File diff suppressed because it is too large Load Diff

425
extra/libevent/kqueue.c Normal file
View File

@ -0,0 +1,425 @@
/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_WORKING_KQUEUE
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <sys/queue.h>
#include <sys/event.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
/* Some platforms apparently define the udata field of struct kevent as
* intptr_t, whereas others define it as void*. There doesn't seem to be an
* easy way to tell them apart via autoconf, so we need to use OS macros. */
#if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
#define PTR_TO_UDATA(x) ((intptr_t)(x))
#else
#define PTR_TO_UDATA(x) (x)
#endif
#include "event.h"
#include "event-internal.h"
#include "log.h"
#include "event-internal.h"
#define EVLIST_X_KQINKERNEL 0x1000
#define NEVENT 64
struct kqop {
struct kevent *changes;
int nchanges;
struct kevent *events;
int nevents;
int kq;
pid_t pid;
};
static void *kq_init (struct event_base *);
static int kq_add (void *, struct event *);
static int kq_del (void *, struct event *);
static int kq_dispatch (struct event_base *, void *, struct timeval *);
static int kq_insert (struct kqop *, struct kevent *);
static void kq_dealloc (struct event_base *, void *);
const struct eventop kqops = {
"kqueue",
kq_init,
kq_add,
kq_del,
kq_dispatch,
kq_dealloc,
1 /* need reinit */
};
static void *
kq_init(struct event_base *base)
{
int kq;
struct kqop *kqueueop;
/* Disable kqueue when this environment variable is set */
if (getenv("EVENT_NOKQUEUE"))
return (NULL);
if (!(kqueueop = calloc(1, sizeof(struct kqop))))
return (NULL);
/* Initalize the kernel queue */
if ((kq = kqueue()) == -1) {
event_warn("kqueue");
free (kqueueop);
return (NULL);
}
kqueueop->kq = kq;
kqueueop->pid = getpid();
/* Initalize fields */
kqueueop->changes = malloc(NEVENT * sizeof(struct kevent));
if (kqueueop->changes == NULL) {
free (kqueueop);
return (NULL);
}
kqueueop->events = malloc(NEVENT * sizeof(struct kevent));
if (kqueueop->events == NULL) {
free (kqueueop->changes);
free (kqueueop);
return (NULL);
}
kqueueop->nevents = NEVENT;
/* Check for Mac OS X kqueue bug. */
kqueueop->changes[0].ident = -1;
kqueueop->changes[0].filter = EVFILT_READ;
kqueueop->changes[0].flags = EV_ADD;
/*
* If kqueue works, then kevent will succeed, and it will
* stick an error in events[0]. If kqueue is broken, then
* kevent will fail.
*/
if (kevent(kq,
kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
kqueueop->events[0].ident != -1 ||
kqueueop->events[0].flags != EV_ERROR) {
event_warn("%s: detected broken kqueue; not using.", __func__);
free(kqueueop->changes);
free(kqueueop->events);
free(kqueueop);
close(kq);
return (NULL);
}
return (kqueueop);
}
static int
kq_insert(struct kqop *kqop, struct kevent *kev)
{
int nevents = kqop->nevents;
if (kqop->nchanges == nevents) {
struct kevent *newchange;
struct kevent *newresult;
nevents *= 2;
newchange = realloc(kqop->changes,
nevents * sizeof(struct kevent));
if (newchange == NULL) {
event_warn("%s: malloc", __func__);
return (-1);
}
kqop->changes = newchange;
newresult = realloc(kqop->events,
nevents * sizeof(struct kevent));
/*
* If we fail, we don't have to worry about freeing,
* the next realloc will pick it up.
*/
if (newresult == NULL) {
event_warn("%s: malloc", __func__);
return (-1);
}
kqop->events = newresult;
kqop->nevents = nevents;
}
memcpy(&kqop->changes[kqop->nchanges++], kev, sizeof(struct kevent));
event_debug(("%s: fd %d %s%s",
__func__, kev->ident,
kev->filter == EVFILT_READ ? "EVFILT_READ" : "EVFILT_WRITE",
kev->flags == EV_DELETE ? " (del)" : ""));
return (0);
}
static void
kq_sighandler(int sig)
{
/* Do nothing here */
}
static int
kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct kqop *kqop = arg;
struct kevent *changes = kqop->changes;
struct kevent *events = kqop->events;
struct event *ev;
struct timespec ts, *ts_p = NULL;
int i, res;
if (tv != NULL) {
TIMEVAL_TO_TIMESPEC(tv, &ts);
ts_p = &ts;
}
res = kevent(kqop->kq, changes, kqop->nchanges,
events, kqop->nevents, ts_p);
kqop->nchanges = 0;
if (res == -1) {
if (errno != EINTR) {
event_warn("kevent");
return (-1);
}
return (0);
}
event_debug(("%s: kevent reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
if (events[i].flags & EV_ERROR) {
/*
* Error messages that can happen, when a delete fails.
* EBADF happens when the file discriptor has been
* closed,
* ENOENT when the file discriptor was closed and
* then reopened.
* EINVAL for some reasons not understood; EINVAL
* should not be returned ever; but FreeBSD does :-\
* An error is also indicated when a callback deletes
* an event we are still processing. In that case
* the data field is set to ENOENT.
*/
if (events[i].data == EBADF ||
events[i].data == EINVAL ||
events[i].data == ENOENT)
continue;
errno = events[i].data;
return (-1);
}
ev = (struct event *)events[i].udata;
if (events[i].filter == EVFILT_READ) {
which |= EV_READ;
} else if (events[i].filter == EVFILT_WRITE) {
which |= EV_WRITE;
} else if (events[i].filter == EVFILT_SIGNAL) {
which |= EV_SIGNAL;
}
if (!which)
continue;
if (!(ev->ev_events & EV_PERSIST))
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
event_active(ev, which,
ev->ev_events & EV_SIGNAL ? events[i].data : 1);
}
return (0);
}
static int
kq_add(void *arg, struct event *ev)
{
struct kqop *kqop = arg;
struct kevent kev;
if (ev->ev_events & EV_SIGNAL) {
int nsignal = EVENT_SIGNAL(ev);
struct timespec timeout = { 0, 0 };
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_ADD;
if (!(ev->ev_events & EV_PERSIST))
kev.flags |= EV_ONESHOT;
kev.udata = PTR_TO_UDATA(ev);
/* Be ready for the signal if it is sent any time between
* now and the next call to kq_dispatch. */
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsignal_set_handler(ev->ev_base, nsignal,
kq_sighandler) == -1)
return (-1);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
return (0);
}
if (ev->ev_events & EV_READ) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_READ;
#ifdef NOTE_EOF
/* Make it behave like select() and poll() */
kev.fflags = NOTE_EOF;
#endif
kev.flags = EV_ADD;
if (!(ev->ev_events & EV_PERSIST))
kev.flags |= EV_ONESHOT;
kev.udata = PTR_TO_UDATA(ev);
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
}
if (ev->ev_events & EV_WRITE) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_WRITE;
kev.flags = EV_ADD;
if (!(ev->ev_events & EV_PERSIST))
kev.flags |= EV_ONESHOT;
kev.udata = PTR_TO_UDATA(ev);
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
}
return (0);
}
static int
kq_del(void *arg, struct event *ev)
{
struct kqop *kqop = arg;
struct kevent kev;
if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
return (0);
if (ev->ev_events & EV_SIGNAL) {
int nsignal = EVENT_SIGNAL(ev);
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_DELETE;
if (kq_insert(kqop, &kev) == -1)
return (-1);
if (_evsignal_restore_handler(ev->ev_base, nsignal) == -1)
return (-1);
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
return (0);
}
if (ev->ev_events & EV_READ) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_READ;
kev.flags = EV_DELETE;
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
}
if (ev->ev_events & EV_WRITE) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_WRITE;
kev.flags = EV_DELETE;
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
}
return (0);
}
static void
kq_dealloc(struct event_base *base, void *arg)
{
struct kqop *kqop = arg;
if (kqop->changes)
free(kqop->changes);
if (kqop->events)
free(kqop->events);
if (kqop->kq >= 0 && kqop->pid == getpid())
close(kqop->kq);
memset(kqop, 0, sizeof(struct kqop));
free(kqop);
}
#endif /* HAVE_WORKING_KQUEUE */

218
extra/libevent/log.c Normal file
View File

@ -0,0 +1,218 @@
/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* log.c
*
* Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
*
* Copyright (c) 2005 Nick Mathewson <nickm@freehaven.net>
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#include "misc.h"
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include "event.h"
#include "log.h"
static void _warn_helper(int severity, int log_errno, const char *fmt,
va_list ap);
static void event_log(int severity, const char *msg);
static int
event_vsnprintf(char *str, size_t size, const char *format, va_list args)
{
int r;
if (size == 0)
return -1;
#ifdef WIN32
r = _vsnprintf(str, size, format, args);
#else
r = vsnprintf(str, size, format, args);
#endif
str[size-1] = '\0';
if (r < 0 || ((size_t)r) >= size) {
/* different platforms behave differently on overflow;
* handle both kinds. */
return -1;
}
return r;
}
static int
event_snprintf(char *str, size_t size, const char *format, ...)
{
va_list ap;
int r;
va_start(ap, format);
r = event_vsnprintf(str, size, format, ap);
va_end(ap);
return r;
}
void
event_err(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, errno, fmt, ap);
va_end(ap);
exit(eval);
}
void
event_warn(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, errno, fmt, ap);
va_end(ap);
}
void
event_errx(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, -1, fmt, ap);
va_end(ap);
exit(eval);
}
void
event_warnx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, -1, fmt, ap);
va_end(ap);
}
void
event_msgx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_MSG, -1, fmt, ap);
va_end(ap);
}
void
_event_debugx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_DEBUG, -1, fmt, ap);
va_end(ap);
}
static void
_warn_helper(int severity, int log_errno, const char *fmt, va_list ap)
{
char buf[1024];
size_t len;
if (fmt != NULL)
event_vsnprintf(buf, sizeof(buf), fmt, ap);
else
buf[0] = '\0';
if (log_errno >= 0) {
len = strlen(buf);
if (len < sizeof(buf) - 3) {
event_snprintf(buf + len, sizeof(buf) - len, ": %s",
strerror(log_errno));
}
}
event_log(severity, buf);
}
static event_log_cb log_fn = NULL;
void
event_set_log_callback(event_log_cb cb)
{
log_fn = cb;
}
static void
event_log(int severity, const char *msg)
{
if (log_fn)
log_fn(severity, msg);
else {
const char *severity_str;
switch (severity) {
case _EVENT_LOG_DEBUG:
severity_str = "debug";
break;
case _EVENT_LOG_MSG:
severity_str = "msg";
break;
case _EVENT_LOG_WARN:
severity_str = "warn";
break;
case _EVENT_LOG_ERR:
severity_str = "err";
break;
default:
severity_str = "???";
break;
}
(void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
}
}

51
extra/libevent/log.h Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LOG_H_
#define _LOG_H_
#ifdef __GNUC__
#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
#else
#define EV_CHECK_FMT(a,b)
#endif
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
#ifdef USE_DEBUG
#define event_debug(x) _event_debugx x
#else
#define event_debug(x) do {;} while (0)
#endif
#undef EV_CHECK_FMT
#endif

138
extra/libevent/min_heap.h Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MIN_HEAP_H_
#define _MIN_HEAP_H_
#include "event.h"
typedef struct min_heap
{
struct event** p;
unsigned n, a;
} min_heap_t;
static inline void min_heap_ctor(min_heap_t* s);
static inline void min_heap_dtor(min_heap_t* s);
static inline void min_heap_elem_init(struct event* e);
static inline int min_heap_elem_greater(struct event *a, struct event *b);
static inline int min_heap_empty(min_heap_t* s);
static inline unsigned min_heap_size(min_heap_t* s);
static inline struct event* min_heap_top(min_heap_t* s);
static inline int min_heap_reserve(min_heap_t* s, unsigned n);
static inline int min_heap_push(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop(min_heap_t* s);
static inline int min_heap_erase(min_heap_t* s, struct event* e);
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
int min_heap_elem_greater(struct event *a, struct event *b)
{
return timercmp(&a->ev_timeout, &b->ev_timeout, >);
}
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
void min_heap_dtor(min_heap_t* s) { free(s->p); }
void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
unsigned min_heap_size(min_heap_t* s) { return s->n; }
struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
int min_heap_push(min_heap_t* s, struct event* e)
{
if(min_heap_reserve(s, s->n + 1))
return -1;
min_heap_shift_up_(s, s->n++, e);
return 0;
}
struct event* min_heap_pop(min_heap_t* s)
{
if(s->n)
{
struct event* e = *s->p;
e->min_heap_idx = -1;
min_heap_shift_down_(s, 0u, s->p[--s->n]);
return e;
}
return 0;
}
int min_heap_erase(min_heap_t* s, struct event* e)
{
if(((unsigned int)-1) != e->min_heap_idx)
{
min_heap_shift_down_(s, e->min_heap_idx, s->p[--s->n]);
e->min_heap_idx = -1;
return 0;
}
return -1;
}
int min_heap_reserve(min_heap_t* s, unsigned n)
{
if(s->a < n)
{
struct event** p;
unsigned a = s->a ? s->a * 2 : 8;
if(a < n)
a = n;
if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
return -1;
s->p = p;
s->a = a;
}
return 0;
}
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned parent = (hole_index - 1) / 2;
while(hole_index && min_heap_elem_greater(s->p[parent], e))
{
(s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
hole_index = parent;
parent = (hole_index - 1) / 2;
}
(s->p[hole_index] = e)->min_heap_idx = hole_index;
}
void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned min_child = 2 * (hole_index + 1);
while(min_child <= s->n)
{
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
if(!(min_heap_elem_greater(e, s->p[min_child])))
break;
(s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
hole_index = min_child;
min_child = 2 * (hole_index + 1);
}
min_heap_shift_up_(s, hole_index, e);
}
#endif /* _MIN_HEAP_H_ */

378
extra/libevent/poll.c Normal file
View File

@ -0,0 +1,378 @@
/* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_POLL
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#include <sys/queue.h>
#include <poll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
struct pollop {
int event_count; /* Highest number alloc */
int nfds; /* Size of event_* */
int fd_count; /* Size of idxplus1_by_fd */
struct pollfd *event_set;
struct event **event_r_back;
struct event **event_w_back;
int *idxplus1_by_fd; /* Index into event_set by fd; we add 1 so
* that 0 (which is easy to memset) can mean
* "no entry." */
};
static void *poll_init (struct event_base *);
static int poll_add (void *, struct event *);
static int poll_del (void *, struct event *);
static int poll_dispatch (struct event_base *, void *, struct timeval *);
static void poll_dealloc (struct event_base *, void *);
const struct eventop pollops = {
"poll",
poll_init,
poll_add,
poll_del,
poll_dispatch,
poll_dealloc,
0
};
static void *
poll_init(struct event_base *base)
{
struct pollop *pollop;
/* Disable poll when this environment variable is set */
if (getenv("EVENT_NOPOLL"))
return (NULL);
if (!(pollop = calloc(1, sizeof(struct pollop))))
return (NULL);
evsignal_init(base);
return (pollop);
}
#ifdef CHECK_INVARIANTS
static void
poll_check_ok(struct pollop *pop)
{
int i, idx;
struct event *ev;
for (i = 0; i < pop->fd_count; ++i) {
idx = pop->idxplus1_by_fd[i]-1;
if (idx < 0)
continue;
assert(pop->event_set[idx].fd == i);
if (pop->event_set[idx].events & POLLIN) {
ev = pop->event_r_back[idx];
assert(ev);
assert(ev->ev_events & EV_READ);
assert(ev->ev_fd == i);
}
if (pop->event_set[idx].events & POLLOUT) {
ev = pop->event_w_back[idx];
assert(ev);
assert(ev->ev_events & EV_WRITE);
assert(ev->ev_fd == i);
}
}
for (i = 0; i < pop->nfds; ++i) {
struct pollfd *pfd = &pop->event_set[i];
assert(pop->idxplus1_by_fd[pfd->fd] == i+1);
}
}
#else
#define poll_check_ok(pop)
#endif
static int
poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int res, i, msec = -1, nfds;
struct pollop *pop = arg;
poll_check_ok(pop);
if (tv != NULL)
msec = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
nfds = pop->nfds;
res = poll(pop->event_set, nfds, msec);
if (res == -1) {
if (errno != EINTR) {
event_warn("poll");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: poll reports %d", __func__, res));
if (res == 0)
return (0);
for (i = 0; i < nfds; i++) {
int what = pop->event_set[i].revents;
struct event *r_ev = NULL, *w_ev = NULL;
if (!what)
continue;
res = 0;
/* If the file gets closed notify */
if (what & (POLLHUP|POLLERR))
what |= POLLIN|POLLOUT;
if (what & POLLIN) {
res |= EV_READ;
r_ev = pop->event_r_back[i];
}
if (what & POLLOUT) {
res |= EV_WRITE;
w_ev = pop->event_w_back[i];
}
if (res == 0)
continue;
if (r_ev && (res & r_ev->ev_events)) {
event_active(r_ev, res & r_ev->ev_events, 1);
}
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
event_active(w_ev, res & w_ev->ev_events, 1);
}
}
return (0);
}
static int
poll_add(void *arg, struct event *ev)
{
struct pollop *pop = arg;
struct pollfd *pfd = NULL;
int i;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
return (0);
poll_check_ok(pop);
if (pop->nfds + 1 >= pop->event_count) {
struct pollfd *tmp_event_set;
struct event **tmp_event_r_back;
struct event **tmp_event_w_back;
int tmp_event_count;
if (pop->event_count < 32)
tmp_event_count = 32;
else
tmp_event_count = pop->event_count * 2;
/* We need more file descriptors */
tmp_event_set = realloc(pop->event_set,
tmp_event_count * sizeof(struct pollfd));
if (tmp_event_set == NULL) {
event_warn("realloc");
return (-1);
}
pop->event_set = tmp_event_set;
tmp_event_r_back = realloc(pop->event_r_back,
tmp_event_count * sizeof(struct event *));
if (tmp_event_r_back == NULL) {
/* event_set overallocated; that's okay. */
event_warn("realloc");
return (-1);
}
pop->event_r_back = tmp_event_r_back;
tmp_event_w_back = realloc(pop->event_w_back,
tmp_event_count * sizeof(struct event *));
if (tmp_event_w_back == NULL) {
/* event_set and event_r_back overallocated; that's
* okay. */
event_warn("realloc");
return (-1);
}
pop->event_w_back = tmp_event_w_back;
pop->event_count = tmp_event_count;
}
if (ev->ev_fd >= pop->fd_count) {
int *tmp_idxplus1_by_fd;
int new_count;
if (pop->fd_count < 32)
new_count = 32;
else
new_count = pop->fd_count * 2;
while (new_count <= ev->ev_fd)
new_count *= 2;
tmp_idxplus1_by_fd =
realloc(pop->idxplus1_by_fd, new_count * sizeof(int));
if (tmp_idxplus1_by_fd == NULL) {
event_warn("realloc");
return (-1);
}
pop->idxplus1_by_fd = tmp_idxplus1_by_fd;
memset(pop->idxplus1_by_fd + pop->fd_count,
0, sizeof(int)*(new_count - pop->fd_count));
pop->fd_count = new_count;
}
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
if (i >= 0) {
pfd = &pop->event_set[i];
} else {
i = pop->nfds++;
pfd = &pop->event_set[i];
pfd->events = 0;
pfd->fd = ev->ev_fd;
pop->event_w_back[i] = pop->event_r_back[i] = NULL;
pop->idxplus1_by_fd[ev->ev_fd] = i + 1;
}
pfd->revents = 0;
if (ev->ev_events & EV_WRITE) {
pfd->events |= POLLOUT;
pop->event_w_back[i] = ev;
}
if (ev->ev_events & EV_READ) {
pfd->events |= POLLIN;
pop->event_r_back[i] = ev;
}
poll_check_ok(pop);
return (0);
}
/*
* Nothing to be done here.
*/
static int
poll_del(void *arg, struct event *ev)
{
struct pollop *pop = arg;
struct pollfd *pfd = NULL;
int i;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
return (0);
poll_check_ok(pop);
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
if (i < 0)
return (-1);
/* Do we still want to read or write? */
pfd = &pop->event_set[i];
if (ev->ev_events & EV_READ) {
pfd->events &= ~POLLIN;
pop->event_r_back[i] = NULL;
}
if (ev->ev_events & EV_WRITE) {
pfd->events &= ~POLLOUT;
pop->event_w_back[i] = NULL;
}
poll_check_ok(pop);
if (pfd->events)
/* Another event cares about that fd. */
return (0);
/* Okay, so we aren't interested in that fd anymore. */
pop->idxplus1_by_fd[ev->ev_fd] = 0;
--pop->nfds;
if (i != pop->nfds) {
/*
* Shift the last pollfd down into the now-unoccupied
* position.
*/
memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
sizeof(struct pollfd));
pop->event_r_back[i] = pop->event_r_back[pop->nfds];
pop->event_w_back[i] = pop->event_w_back[pop->nfds];
pop->idxplus1_by_fd[pop->event_set[i].fd] = i + 1;
}
poll_check_ok(pop);
return (0);
}
static void
poll_dealloc(struct event_base *base, void *arg)
{
struct pollop *pop = arg;
evsignal_dealloc(base);
if (pop->event_set)
free(pop->event_set);
if (pop->event_r_back)
free(pop->event_r_back);
if (pop->event_w_back)
free(pop->event_w_back);
if (pop->idxplus1_by_fd)
free(pop->idxplus1_by_fd);
memset(pop, 0, sizeof(struct pollop));
free(pop);
}
#endif /* HAVE_POLL */

356
extra/libevent/select.c Normal file
View File

@ -0,0 +1,356 @@
/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_SELECT
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_time.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include <sys/queue.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
#ifndef howmany
#define howmany(x, y) (((x)+((y)-1))/(y))
#endif
struct selectop {
int event_fds; /* Highest fd in fd set */
int event_fdsz;
fd_set *event_readset_in;
fd_set *event_writeset_in;
fd_set *event_readset_out;
fd_set *event_writeset_out;
struct event **event_r_by_fd;
struct event **event_w_by_fd;
};
static void *select_init (struct event_base *);
static int select_add (void *, struct event *);
static int select_del (void *, struct event *);
static int select_dispatch (struct event_base *, void *, struct timeval *);
static void select_dealloc (struct event_base *, void *);
const struct eventop selectops = {
"select",
select_init,
select_add,
select_del,
select_dispatch,
select_dealloc,
0
};
static int select_resize(struct selectop *sop, int fdsz);
static void *
select_init(struct event_base *base)
{
struct selectop *sop;
/* Disable select when this environment variable is set */
if (getenv("EVENT_NOSELECT"))
return (NULL);
if (!(sop = calloc(1, sizeof(struct selectop))))
return (NULL);
select_resize(sop, howmany(32 + 1, NFDBITS)*sizeof(fd_mask));
evsignal_init(base);
return (sop);
}
#ifdef CHECK_INVARIANTS
static void
check_selectop(struct selectop *sop)
{
int i;
for (i = 0; i <= sop->event_fds; ++i) {
if (FD_ISSET(i, sop->event_readset_in)) {
assert(sop->event_r_by_fd[i]);
assert(sop->event_r_by_fd[i]->ev_events & EV_READ);
assert(sop->event_r_by_fd[i]->ev_fd == i);
} else {
assert(! sop->event_r_by_fd[i]);
}
if (FD_ISSET(i, sop->event_writeset_in)) {
assert(sop->event_w_by_fd[i]);
assert(sop->event_w_by_fd[i]->ev_events & EV_WRITE);
assert(sop->event_w_by_fd[i]->ev_fd == i);
} else {
assert(! sop->event_w_by_fd[i]);
}
}
}
#else
#define check_selectop(sop) do { (void) sop; } while (0)
#endif
static int
select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int res, i;
struct selectop *sop = arg;
check_selectop(sop);
memcpy(sop->event_readset_out, sop->event_readset_in,
sop->event_fdsz);
memcpy(sop->event_writeset_out, sop->event_writeset_in,
sop->event_fdsz);
res = select(sop->event_fds + 1, sop->event_readset_out,
sop->event_writeset_out, NULL, tv);
check_selectop(sop);
if (res == -1) {
if (errno != EINTR) {
event_warn("select");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: select reports %d", __func__, res));
check_selectop(sop);
for (i = 0; i <= sop->event_fds; ++i) {
struct event *r_ev = NULL, *w_ev = NULL;
res = 0;
if (FD_ISSET(i, sop->event_readset_out)) {
r_ev = sop->event_r_by_fd[i];
res |= EV_READ;
}
if (FD_ISSET(i, sop->event_writeset_out)) {
w_ev = sop->event_w_by_fd[i];
res |= EV_WRITE;
}
if (r_ev && (res & r_ev->ev_events)) {
event_active(r_ev, res & r_ev->ev_events, 1);
}
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
event_active(w_ev, res & w_ev->ev_events, 1);
}
}
check_selectop(sop);
return (0);
}
static int
select_resize(struct selectop *sop, int fdsz)
{
int n_events, n_events_old;
fd_set *readset_in = NULL;
fd_set *writeset_in = NULL;
fd_set *readset_out = NULL;
fd_set *writeset_out = NULL;
struct event **r_by_fd = NULL;
struct event **w_by_fd = NULL;
n_events = (fdsz/sizeof(fd_mask)) * NFDBITS;
n_events_old = (sop->event_fdsz/sizeof(fd_mask)) * NFDBITS;
if (sop->event_readset_in)
check_selectop(sop);
if ((readset_in = realloc(sop->event_readset_in, fdsz)) == NULL)
goto error;
sop->event_readset_in = readset_in;
if ((readset_out = realloc(sop->event_readset_out, fdsz)) == NULL)
goto error;
sop->event_readset_out = readset_out;
if ((writeset_in = realloc(sop->event_writeset_in, fdsz)) == NULL)
goto error;
sop->event_writeset_in = writeset_in;
if ((writeset_out = realloc(sop->event_writeset_out, fdsz)) == NULL)
goto error;
sop->event_writeset_out = writeset_out;
if ((r_by_fd = realloc(sop->event_r_by_fd,
n_events*sizeof(struct event*))) == NULL)
goto error;
sop->event_r_by_fd = r_by_fd;
if ((w_by_fd = realloc(sop->event_w_by_fd,
n_events * sizeof(struct event*))) == NULL)
goto error;
sop->event_w_by_fd = w_by_fd;
memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
fdsz - sop->event_fdsz);
memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
fdsz - sop->event_fdsz);
memset(sop->event_r_by_fd + n_events_old, 0,
(n_events-n_events_old) * sizeof(struct event*));
memset(sop->event_w_by_fd + n_events_old, 0,
(n_events-n_events_old) * sizeof(struct event*));
sop->event_fdsz = fdsz;
check_selectop(sop);
return (0);
error:
event_warn("malloc");
return (-1);
}
static int
select_add(void *arg, struct event *ev)
{
struct selectop *sop = arg;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
check_selectop(sop);
/*
* Keep track of the highest fd, so that we can calculate the size
* of the fd_sets for select(2)
*/
if (sop->event_fds < ev->ev_fd) {
int fdsz = sop->event_fdsz;
if (fdsz < sizeof(fd_mask))
fdsz = sizeof(fd_mask);
while (fdsz <
(howmany(ev->ev_fd + 1, NFDBITS) * sizeof(fd_mask)))
fdsz *= 2;
if (fdsz != sop->event_fdsz) {
if (select_resize(sop, fdsz)) {
check_selectop(sop);
return (-1);
}
}
sop->event_fds = ev->ev_fd;
}
if (ev->ev_events & EV_READ) {
FD_SET(ev->ev_fd, sop->event_readset_in);
sop->event_r_by_fd[ev->ev_fd] = ev;
}
if (ev->ev_events & EV_WRITE) {
FD_SET(ev->ev_fd, sop->event_writeset_in);
sop->event_w_by_fd[ev->ev_fd] = ev;
}
check_selectop(sop);
return (0);
}
/*
* Nothing to be done here.
*/
static int
select_del(void *arg, struct event *ev)
{
struct selectop *sop = arg;
check_selectop(sop);
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
if (sop->event_fds < ev->ev_fd) {
check_selectop(sop);
return (0);
}
if (ev->ev_events & EV_READ) {
FD_CLR(ev->ev_fd, sop->event_readset_in);
sop->event_r_by_fd[ev->ev_fd] = NULL;
}
if (ev->ev_events & EV_WRITE) {
FD_CLR(ev->ev_fd, sop->event_writeset_in);
sop->event_w_by_fd[ev->ev_fd] = NULL;
}
check_selectop(sop);
return (0);
}
static void
select_dealloc(struct event_base *base, void *arg)
{
struct selectop *sop = arg;
evsignal_dealloc(base);
if (sop->event_readset_in)
free(sop->event_readset_in);
if (sop->event_writeset_in)
free(sop->event_writeset_in);
if (sop->event_readset_out)
free(sop->event_readset_out);
if (sop->event_writeset_out)
free(sop->event_writeset_out);
if (sop->event_r_by_fd)
free(sop->event_r_by_fd);
if (sop->event_w_by_fd)
free(sop->event_w_by_fd);
memset(sop, 0, sizeof(struct selectop));
free(sop);
}
#endif /* HAVE_SELECT */

304
extra/libevent/signal.c Normal file
View File

@ -0,0 +1,304 @@
/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <winsock2.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <errno.h>
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <assert.h>
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "evutil.h"
#include "log.h"
struct event_base *evsignal_base = NULL;
static void evsignal_handler(int sig);
/* Callback for when the signal handler write a byte to our signaling socket */
static void
evsignal_cb(int fd, short what, void *arg)
{
static char signals[100];
#ifdef WIN32
SSIZE_T n;
#else
ssize_t n;
#endif
n = recv(fd, signals, sizeof(signals), 0);
if (n == -1)
event_err(1, "%s: read", __func__);
}
#ifdef HAVE_SETFD
#define FD_CLOSEONEXEC(x) do { \
if (fcntl(x, F_SETFD, 1) == -1) \
event_warn("fcntl(%d, F_SETFD)", x); \
} while (0)
#else
#define FD_CLOSEONEXEC(x)
#endif
void
evsignal_init(struct event_base *base)
{
/*
* Our signal handler is going to write to one end of the socket
* pair to wake up our event loop. The event loop then scans for
* signals that got delivered.
*/
if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1)
event_err(1, "%s: socketpair", __func__);
FD_CLOSEONEXEC(base->sig.ev_signal_pair[0]);
FD_CLOSEONEXEC(base->sig.ev_signal_pair[1]);
base->sig.sh_old = NULL;
base->sig.sh_old_max = 0;
base->sig.evsignal_caught = 0;
memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
event_set(&base->sig.ev_signal, base->sig.ev_signal_pair[1],
EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal);
base->sig.ev_signal.ev_base = base;
base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
}
/* Helper: set the signal handler for evsignal to handler in base, so that
* we can restore the original handler when we clear the current one. */
int
_evsignal_set_handler(struct event_base *base,
int evsignal, void (*handler)(int))
{
#ifdef HAVE_SIGACTION
struct sigaction sa;
#else
ev_sighandler_t sh;
#endif
struct evsignal_info *sig = &base->sig;
void *p;
/*
* resize saved signal handler array up to the highest signal number.
* a dynamic array is used to keep footprint on the low side.
*/
if (evsignal >= sig->sh_old_max) {
event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
__func__, evsignal, sig->sh_old_max));
sig->sh_old_max = evsignal + 1;
p = realloc(sig->sh_old, sig->sh_old_max * sizeof *sig->sh_old);
if (p == NULL) {
event_warn("realloc");
return (-1);
}
sig->sh_old = p;
}
/* allocate space for previous handler out of dynamic array */
sig->sh_old[evsignal] = malloc(sizeof *sig->sh_old[evsignal]);
if (sig->sh_old[evsignal] == NULL) {
event_warn("malloc");
return (-1);
}
/* save previous handler and setup new handler */
#ifdef HAVE_SIGACTION
memset(&sa, 0, sizeof(sa));
sa.sa_handler = handler;
sa.sa_flags |= SA_RESTART;
sigfillset(&sa.sa_mask);
if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
event_warn("sigaction");
free(sig->sh_old[evsignal]);
return (-1);
}
#else
if ((sh = signal(evsignal, handler)) == SIG_ERR) {
event_warn("signal");
free(sig->sh_old[evsignal]);
return (-1);
}
*sig->sh_old[evsignal] = sh;
#endif
return (0);
}
int
evsignal_add(struct event *ev)
{
int evsignal;
struct event_base *base = ev->ev_base;
struct evsignal_info *sig = &ev->ev_base->sig;
if (ev->ev_events & (EV_READ|EV_WRITE))
event_errx(1, "%s: EV_SIGNAL incompatible use", __func__);
evsignal = EVENT_SIGNAL(ev);
event_debug(("%s: %p: changing signal handler", __func__, ev));
if (_evsignal_set_handler(base, evsignal, evsignal_handler) == -1)
return (-1);
/* catch signals if they happen quickly */
evsignal_base = base;
if (!sig->ev_signal_added) {
sig->ev_signal_added = 1;
event_add(&sig->ev_signal, NULL);
}
return (0);
}
int
_evsignal_restore_handler(struct event_base *base, int evsignal)
{
int ret = 0;
struct evsignal_info *sig = &base->sig;
#ifdef HAVE_SIGACTION
struct sigaction *sh;
#else
ev_sighandler_t *sh;
#endif
/* restore previous handler */
sh = sig->sh_old[evsignal];
sig->sh_old[evsignal] = NULL;
#ifdef HAVE_SIGACTION
if (sigaction(evsignal, sh, NULL) == -1) {
event_warn("sigaction");
ret = -1;
}
#else
if (signal(evsignal, *sh) == SIG_ERR) {
event_warn("signal");
ret = -1;
}
#endif
free(sh);
return ret;
}
int
evsignal_del(struct event *ev)
{
event_debug(("%s: %p: restoring signal handler", __func__, ev));
return _evsignal_restore_handler(ev->ev_base, EVENT_SIGNAL(ev));
}
static void
evsignal_handler(int sig)
{
int save_errno = errno;
if(evsignal_base == NULL) {
event_warn(
"%s: received signal %d, but have no base configured",
__func__, sig);
return;
}
evsignal_base->sig.evsigcaught[sig]++;
evsignal_base->sig.evsignal_caught = 1;
#ifndef HAVE_SIGACTION
signal(sig, evsignal_handler);
#endif
/* Wake up our notification mechanism */
send(evsignal_base->sig.ev_signal_pair[0], "a", 1, 0);
errno = save_errno;
}
void
evsignal_process(struct event_base *base)
{
struct event *ev;
sig_atomic_t ncalls;
base->sig.evsignal_caught = 0;
TAILQ_FOREACH(ev, &base->sig.signalqueue, ev_signal_next) {
ncalls = base->sig.evsigcaught[EVENT_SIGNAL(ev)];
if (ncalls) {
if (!(ev->ev_events & EV_PERSIST))
event_del(ev);
event_active(ev, EV_SIGNAL, ncalls);
base->sig.evsigcaught[EVENT_SIGNAL(ev)] = 0;
}
}
}
void
evsignal_dealloc(struct event_base *base)
{
if(base->sig.ev_signal_added) {
event_del(&base->sig.ev_signal);
base->sig.ev_signal_added = 0;
}
assert(TAILQ_EMPTY(&base->sig.signalqueue));
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
base->sig.ev_signal_pair[0] = -1;
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
base->sig.ev_signal_pair[1] = -1;
base->sig.sh_old_max = 0;
/* per index frees are handled in evsignal_del() */
free(base->sig.sh_old);
}

View File

@ -0,0 +1,23 @@
#ifndef _STRLCPY_INTERNAL_H_
#define _STRLCPY_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif /* HAVE_CONFIG_H */
#ifndef HAVE_STRLCPY
#include <string.h>
size_t _event_strlcpy(char *dst, const char *src, size_t siz);
#define strlcpy _event_strlcpy
#endif
#ifdef __cplusplus
}
#endif
#endif

76
extra/libevent/strlcpy.c Normal file
View File

@ -0,0 +1,76 @@
/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */
/*
* Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
#endif /* LIBC_SCCS and not lint */
#include <sys/types.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif /* HAVE_CONFIG_H */
#ifndef HAVE_STRLCPY
#include "strlcpy-internal.h"
/*
* Copy src to string dst of size siz. At most siz-1 characters
* will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
*/
size_t
_event_strlcpy(dst, src, siz)
char *dst;
const char *src;
size_t siz;
{
register char *d = dst;
register const char *s = src;
register size_t n = siz;
/* Copy as many bytes as will fit */
if (n != 0 && --n != 0) {
do {
if ((*d++ = *s++) == 0)
break;
} while (--n != 0);
}
/* Not enough room in dst, add NUL and traverse rest of src */
if (n == 0) {
if (siz != 0)
*d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
return(s - src - 1); /* count does not include NUL */
}
#endif

View File

@ -436,3 +436,7 @@ inline ulonglong double2ulonglong(double d)
#define HAVE_UCA_COLLATIONS 1
#define HAVE_BOOL 1
#ifndef EMBEDDED_LIBRARY
#define HAVE_LIBEVENT 1
#define HAVE_POOL_OF_THREADS 1
#endif

View File

@ -33,6 +33,7 @@ extern my_bool _dbug_on_;
extern my_bool _db_keyword_(struct _db_code_state_ *, const char *, int);
extern int _db_explain_(struct _db_code_state_ *cs, char *buf, size_t len);
extern int _db_explain_init_(char *buf, size_t len);
extern int _db_is_pushed_(void);
extern void _db_setjmp_(void);
extern void _db_longjmp_(void);
extern void _db_process_(const char *name);

View File

@ -223,7 +223,9 @@ typedef struct st_typelib {
unsigned int *type_lengths;
} TYPELIB;
extern my_ulonglong find_typeset(char *x, TYPELIB *typelib,int *error_position);
extern int find_type_or_exit(const char *x, TYPELIB *typelib,
extern int find_type_with_warning(const char *x, TYPELIB *typelib,
const char *option);
extern uint find_type_or_exit(const char *x, TYPELIB *typelib,
const char *option);
extern int find_type(char *x, const TYPELIB *typelib, unsigned int full_name);
extern void make_type(char *to,unsigned int nr,TYPELIB *typelib);

View File

@ -27,7 +27,9 @@ typedef struct st_typelib { /* Different types saved here */
} TYPELIB;
extern my_ulonglong find_typeset(char *x, TYPELIB *typelib,int *error_position);
extern int find_type_or_exit(const char *x, TYPELIB *typelib,
extern int find_type_with_warning(const char *x, TYPELIB *typelib,
const char *option);
extern uint find_type_or_exit(const char *x, TYPELIB *typelib,
const char *option);
extern int find_type(char *x, const TYPELIB *typelib, unsigned int full_name);
extern void make_type(char *to,unsigned int nr,TYPELIB *typelib);

View File

@ -88,6 +88,7 @@ my_bool vio_peer_addr(Vio* vio, char *buf, uint16 *port);
/* Remotes in_addr */
void vio_in_addr(Vio *vio, struct in_addr *in);
my_bool vio_poll_read(Vio *vio,uint timeout);
ssize_t vio_pending(Vio *vio);
#ifdef HAVE_OPENSSL
#include <openssl/opensslv.h>

View File

@ -98,6 +98,7 @@ INC_LIB= $(top_builddir)/regex/libregex.a \
$(top_builddir)/vio/libvio.a \
@NDB_SCI_LIBS@ \
@mysql_plugin_libs@ \
$(libevent_inc_libs) \
$(yassl_inc_libs)
if HAVE_YASSL

View File

@ -0,0 +1,4 @@
-- require r/have_pool_of_threads.require
disable_query_log;
show variables like 'thread_handling';
enable_query_log;

View File

@ -1200,11 +1200,23 @@ sub command_line_setup {
{
# Indicate that we are using debugger
$glob_debugger= 1;
$opt_testcase_timeout= 60*60*24; # Don't abort debugging with timeout
$opt_suite_timeout= $opt_testcase_timeout;
$opt_retry= 1;
$opt_retry_failure= 1;
if ( using_extern() )
{
mtr_error("Can't use --extern when using debugger");
}
}
if ($opt_debug)
{
$opt_testcase_timeout= 60*60*24; # Don't abort debugging with timeout
$opt_suite_timeout= $opt_testcase_timeout;
$opt_retry= 1;
$opt_retry_failure= 1;
}
# --------------------------------------------------------------------------
# Check timeout arguments

View File

@ -1,7 +1,7 @@
CREATE TABLE t1(a int) engine=innodb;
START TRANSACTION;
insert into t1 values(9);
SET SESSION debug="d,crash_commit_before";
SET GLOBAL debug="d,crash_commit_before";
COMMIT;
ERROR HY000: Lost connection to MySQL server during query
SHOW CREATE TABLE t1;

View File

@ -0,0 +1,2 @@
Variable_name Value
thread_handling pool-of-threads

File diff suppressed because it is too large Load Diff

View File

@ -2,12 +2,12 @@ CREATE TABLE t1(id INT);
INSERT INTO t1 VALUES (1),(2),(3),(4);
INSERT INTO t1 SELECT a.id FROM t1 a,t1 b,t1 c,t1 d;
SET @orig_debug=@@debug;
SET SESSION debug="d,subselect_exec_fail";
SET GLOBAL debug="d,subselect_exec_fail";
SELECT SUM(EXISTS(SELECT RAND() FROM t1)) FROM t1;
SUM(EXISTS(SELECT RAND() FROM t1))
0
SELECT REVERSE(EXISTS(SELECT RAND() FROM t1));
REVERSE(EXISTS(SELECT RAND() FROM t1))
0
SET SESSION debug=@orig_debug;
SET GLOBAL debug=@orig_debug;
DROP TABLE t1;

View File

@ -12,7 +12,7 @@ START TRANSACTION;
insert into t1 values(9);
# Setup the mysqld to crash at certain point
SET SESSION debug="d,crash_commit_before";
SET GLOBAL debug="d,crash_commit_before";
# Write file to make mysql-test-run.pl expect crash and restart
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect

View File

@ -43,7 +43,7 @@ LOCK TABLE t1 WRITE;
--echo # connection con1
connect (con1,localhost,root,,);
let $con1_id= `SELECT CONNECTION_ID()`;
SET SESSION debug="+d,sleep_open_and_lock_after_open";
SET GLOBAL debug="+d,sleep_open_and_lock_after_open";
send INSERT INTO t1 VALUES (1);
--echo # connection default
connection default;
@ -74,7 +74,7 @@ UNLOCK TABLES;
--echo # connection con1
connection con1;
reap;
SET SESSION debug="-d,sleep_open_and_lock_after_open";
SET GLOBAL debug="-d,sleep_open_and_lock_after_open";
disconnect con1;
--echo # connection default
connection default;

View File

@ -0,0 +1 @@
--test-ignore-wrong-options --thread-handling=pool-of-threads

View File

@ -0,0 +1,7 @@
# Start with thread_handling=pool-of-threads
# and run a number of tests
-- source include/have_pool_of_threads.inc
-- source include/common-tests.inc

View File

@ -10,8 +10,8 @@ INSERT INTO t1 VALUES (1),(2),(3),(4);
INSERT INTO t1 SELECT a.id FROM t1 a,t1 b,t1 c,t1 d;
# Setup the mysqld to crash at certain point
SET @orig_debug=@@debug;
SET SESSION debug="d,subselect_exec_fail";
SET GLOBAL debug="d,subselect_exec_fail";
SELECT SUM(EXISTS(SELECT RAND() FROM t1)) FROM t1;
SELECT REVERSE(EXISTS(SELECT RAND() FROM t1));
SET SESSION debug=@orig_debug;
SET GLOBAL debug=@orig_debug;
DROP TABLE t1;

View File

@ -22,7 +22,7 @@
static const char field_separator=',';
int find_type_or_exit(const char *x, TYPELIB *typelib, const char *option)
int find_type_with_warning(const char *x, TYPELIB *typelib, const char *option)
{
int res;
const char **ptr;
@ -38,12 +38,20 @@ int find_type_or_exit(const char *x, TYPELIB *typelib, const char *option)
while (*++ptr)
fprintf(stderr, ",'%s'", *ptr);
fprintf(stderr, "\n");
exit(1);
}
return res;
}
uint find_type_or_exit(const char *x, TYPELIB *typelib, const char *option)
{
int res;
if ((res= find_type_with_warning(x, typelib, option)) <= 0)
exit(1);
return (uint) res;
}
/*
Search after a string in a list of strings. Endspace in x is not compared.

View File

@ -22,7 +22,8 @@ MYSQLLIBdir= $(pkglibdir)
pkgplugindir = $(pkglibdir)/plugin
INCLUDES = @ZLIB_INCLUDES@ \
-I$(top_builddir)/include -I$(top_srcdir)/include \
-I$(top_srcdir)/regex -I$(srcdir) $(openssl_includes)
-I$(top_srcdir)/regex -I$(srcdir) $(openssl_includes) \
$(libevent_includes)
WRAPLIBS= @WRAPLIBS@
SUBDIRS = share
libexec_PROGRAMS = mysqld
@ -41,7 +42,7 @@ mysqld_DEPENDENCIES= @mysql_plugin_libs@ $(SUPPORTING_LIBS) libndb.la
LDADD = $(SUPPORTING_LIBS) @ZLIB_LIBS@ @NDB_SCI_LIBS@
mysqld_LDADD = libndb.la \
@MYSQLD_EXTRA_LDFLAGS@ \
@pstack_libs@ \
@pstack_libs@ $(libevent_libs) \
@mysql_plugin_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
$(yassl_libs) $(openssl_libs) @MYSQLD_EXTRA_LIBS@

View File

@ -377,8 +377,7 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
if (!hton)
goto end;
switch (hton->state)
{
switch (hton->state) {
case SHOW_OPTION_NO:
case SHOW_OPTION_DISABLED:
break;
@ -435,6 +434,9 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
{
sql_print_error("Plugin '%s' init function returned error.",
plugin->name.str);
/* Free data, so that we don't refer to it in ha_finalize_handlerton */
my_free(hton, MYF(0));
plugin->data= 0;
goto err;
}
}
@ -463,6 +465,8 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
if (idx == (int) DB_TYPE_DEFAULT)
{
sql_print_warning("Too many storage engines!");
my_free(hton, MYF(0));
plugin->data= 0;
DBUG_RETURN(1);
}
if (hton->db_type != DB_TYPE_UNKNOWN)

View File

@ -1023,6 +1023,9 @@ void time_out_user_resource_limits(THD *thd, USER_CONN *uc);
void decrease_user_connections(USER_CONN *uc);
void thd_init_client_charset(THD *thd, uint cs_number);
bool setup_connection_thread_globals(THD *thd);
bool login_connection(THD *thd);
void end_connection(THD *thd);
void prepare_new_connection_state(THD* thd);
int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
@ -1912,6 +1915,7 @@ extern ulong query_cache_size, query_cache_min_res_unit;
extern ulong slow_launch_threads, slow_launch_time;
extern ulong table_cache_size, table_def_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong extra_max_connections;
extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
extern ulong what_to_log,flush_time;
@ -1933,7 +1937,7 @@ extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size;
extern ulong tc_log_page_waits;
extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb;
extern uint test_flags,select_errors,ha_open_options;
extern uint protocol_version, mysqld_port, dropping_tables;
extern uint protocol_version, mysqld_port, mysqld_extra_port, dropping_tables;
extern uint delay_key_write_options;
#endif /* MYSQL_SERVER */
#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
@ -1957,7 +1961,8 @@ extern bool opt_disable_networking, opt_skip_show_db;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop, shutdown_in_progress;
extern uint volatile thread_count, thread_running, global_read_lock;
extern uint connection_count;
extern ulong thread_created;
extern uint connection_count, extra_connection_count;
extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types;
extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
extern my_bool opt_slave_compressed_protocol, use_temp_pool;

View File

@ -357,8 +357,9 @@ static bool volatile select_thread_in_use, signal_thread_in_use;
static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0;
static my_bool opt_ignore_wrong_options= 0;
static uint kill_cached_threads, wake_thread;
static ulong thread_created;
ulong thread_created;
static ulong max_used_connections;
static ulong my_bind_addr; /**< the address we bind to */
static volatile ulong cached_thread_count= 0;
@ -469,6 +470,7 @@ const char *opt_binlog_format= binlog_format_names[opt_binlog_format_id];
static bool calling_initgroups= FALSE; /**< Used in SIGSEGV handler. */
#endif
uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
uint mysqld_extra_port;
uint mysqld_port_timeout;
uint delay_key_write_options, protocol_version;
uint lower_case_table_names;
@ -495,6 +497,7 @@ ulong delayed_insert_errors,flush_time;
ulong specialflag=0;
ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
ulong extra_max_connections;
uint max_user_connections= 0;
/**
Limit of the total number of prepared statements in the server.
@ -648,7 +651,7 @@ static int defaults_argc;
static char **defaults_argv;
static char *opt_bin_logname;
static my_socket unix_sock,ip_sock;
static my_socket unix_sock, base_ip_sock, extra_ip_sock;
struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD()
#ifndef EMBEDDED_LIBRARY
@ -708,7 +711,7 @@ my_bool opt_enable_shared_memory;
HANDLE smem_event_connect_request= 0;
#endif
scheduler_functions thread_scheduler;
scheduler_functions thread_scheduler, extra_thread_scheduler;
#define SSL_VARS_NOT_STATIC
#include "sslopt-vars.h"
@ -735,7 +738,7 @@ struct st_VioSSLFd *ssl_acceptor_fd;
Number of currently active user connections. The variable is protected by
LOCK_connection_count.
*/
uint connection_count= 0;
uint connection_count= 0, extra_connection_count= 0;
/* Function declarations */
@ -831,11 +834,17 @@ static void close_connections(void)
DBUG_PRINT("quit",("Closing sockets"));
if (!opt_disable_networking )
{
if (ip_sock != INVALID_SOCKET)
if (base_ip_sock != INVALID_SOCKET)
{
(void) shutdown(ip_sock, SHUT_RDWR);
(void) closesocket(ip_sock);
ip_sock= INVALID_SOCKET;
(void) shutdown(base_ip_sock, SHUT_RDWR);
(void) closesocket(base_ip_sock);
base_ip_sock= INVALID_SOCKET;
}
if (extra_ip_sock != INVALID_SOCKET)
{
(void) shutdown(extra_ip_sock, SHUT_RDWR);
(void) closesocket(extra_ip_sock);
extra_ip_sock= INVALID_SOCKET;
}
}
#ifdef __NT__
@ -969,42 +978,39 @@ static void close_connections(void)
}
static void close_server_sock()
static void close_socket(my_socket sock, const char *info)
{
#ifdef HAVE_CLOSE_SERVER_SOCK
DBUG_ENTER("close_server_sock");
my_socket tmp_sock;
tmp_sock=ip_sock;
if (tmp_sock != INVALID_SOCKET)
DBUG_ENTER("close_socket");
if (sock != INVALID_SOCKET)
{
ip_sock=INVALID_SOCKET;
DBUG_PRINT("info",("calling shutdown on TCP/IP socket"));
VOID(shutdown(tmp_sock, SHUT_RDWR));
DBUG_PRINT("info", ("calling shutdown on %s socket", info));
(void) shutdown(sock, SHUT_RDWR);
#if defined(__NETWARE__)
/*
The following code is disabled for normal systems as it causes MySQL
to hang on AIX 4.3 during shutdown
*/
DBUG_PRINT("info",("calling closesocket on TCP/IP socket"));
VOID(closesocket(tmp_sock));
DBUG_PRINT("info", ("calling closesocket on %s socket", info));
(void) closesocket(tmp_sock);
#endif
}
tmp_sock=unix_sock;
if (tmp_sock != INVALID_SOCKET)
{
unix_sock=INVALID_SOCKET;
DBUG_PRINT("info",("calling shutdown on unix socket"));
VOID(shutdown(tmp_sock, SHUT_RDWR));
#if defined(__NETWARE__)
/*
The following code is disabled for normal systems as it may cause MySQL
to hang on AIX 4.3 during shutdown
*/
DBUG_PRINT("info",("calling closesocket on unix/IP socket"));
VOID(closesocket(tmp_sock));
#endif
DBUG_VOID_RETURN;
}
static void close_server_sock()
{
#ifdef HAVE_CLOSE_SERVER_SOCK
DBUG_ENTER("close_server_sock");
close_socket(base_ip_sock, "TCP/IP");
close_socket(extra_ip_sock, "TCP/IP");
close_socket(unix_sock, "unix/IP");
if (unix_sock != INVALID_SOCKET)
VOID(unlink(mysqld_unix_port));
}
base_ip_sock= extra_ip_sock= unix_sock= INVALID_SOCKET;
DBUG_VOID_RETURN;
#endif
}
@ -1592,28 +1598,22 @@ static void set_root(const char *path)
#endif
}
static void network_init(void)
/**
Activate usage of a tcp port
*/
static my_socket activate_tcp_port(uint port)
{
struct sockaddr_in IPaddr;
#ifdef HAVE_SYS_UN_H
struct sockaddr_un UNIXaddr;
#endif
my_socket ip_sock;
int arg=1;
int ret;
uint waited;
uint this_wait;
uint retry;
DBUG_ENTER("network_init");
LINT_INIT(ret);
DBUG_ENTER("activate_tcp_port");
DBUG_PRINT("enter",("port: %u", port));
if (thread_scheduler.init())
unireg_abort(1); /* purecov: inspected */
set_ports();
if (mysqld_port != 0 && !opt_disable_networking && !opt_bootstrap)
{
DBUG_PRINT("general",("IP Socket is %d",mysqld_port));
ip_sock = socket(AF_INET, SOCK_STREAM, 0);
if (ip_sock == INVALID_SOCKET)
{
@ -1624,7 +1624,7 @@ static void network_init(void)
bzero((char*) &IPaddr, sizeof(IPaddr));
IPaddr.sin_family = AF_INET;
IPaddr.sin_addr.s_addr = my_bind_addr;
IPaddr.sin_port = (unsigned short) htons((unsigned short) mysqld_port);
IPaddr.sin_port = (unsigned short) htons((unsigned short) port);
#ifndef __WIN__
/*
@ -1641,6 +1641,7 @@ static void network_init(void)
Retry at second: 1, 3, 7, 13, 22, 35, 52, 74, ...
Limit the sequence by mysqld_port_timeout (set --port-open-timeout=#).
*/
for (waited= 0, retry= 1; ; retry++, waited+= this_wait)
{
if (((ret= bind(ip_sock, my_reinterpret_cast(struct sockaddr *) (&IPaddr),
@ -1648,7 +1649,7 @@ static void network_init(void)
(socket_errno != SOCKET_EADDRINUSE) ||
(waited >= mysqld_port_timeout))
break;
sql_print_information("Retrying bind on TCP/IP port %u", mysqld_port);
sql_print_information("Retrying bind on TCP/IP port %u", port);
this_wait= retry * retry / 3 + 1;
sleep(this_wait);
}
@ -1656,7 +1657,8 @@ static void network_init(void)
{
DBUG_PRINT("error",("Got error: %d from bind",socket_errno));
sql_perror("Can't start server: Bind on TCP/IP port");
sql_print_error("Do you already have another mysqld server running on port: %d ?",mysqld_port);
sql_print_error("Do you already have another mysqld server running on "
"port: %u ?", port);
unireg_abort(1);
}
if (listen(ip_sock,(int) back_log) < 0)
@ -1666,6 +1668,30 @@ static void network_init(void)
socket_errno);
unireg_abort(1);
}
DBUG_RETURN(ip_sock);
}
static void network_init(void)
{
#ifdef HAVE_SYS_UN_H
struct sockaddr_un UNIXaddr;
#endif
int arg=1;
DBUG_ENTER("network_init");
LINT_INIT(ret);
if (thread_scheduler.init())
unireg_abort(1); /* purecov: inspected */
set_ports();
if (!opt_disable_networking && !opt_bootstrap)
{
if (mysqld_port)
base_ip_sock= activate_tcp_port(mysqld_port);
if (mysqld_extra_port)
extra_ip_sock= activate_tcp_port(mysqld_extra_port);
}
#ifdef __NT__
@ -1829,7 +1855,7 @@ void unlink_thd(THD *thd)
thd->cleanup();
pthread_mutex_lock(&LOCK_connection_count);
--connection_count;
(*thd->scheduler->connection_count)--;
pthread_mutex_unlock(&LOCK_connection_count);
(void) pthread_mutex_lock(&LOCK_thread_count);
@ -2438,15 +2464,17 @@ and this may fail.\n\n");
(ulong) dflt_key_cache->key_cache_mem_size);
fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size);
fprintf(stderr, "max_used_connections=%lu\n", max_used_connections);
fprintf(stderr, "max_threads=%u\n", thread_scheduler.max_threads);
fprintf(stderr, "max_threads=%u\n", thread_scheduler.max_threads +
(uint) extra_max_connections);
fprintf(stderr, "threads_connected=%u\n", thread_count);
fprintf(stderr, "It is possible that mysqld could use up to \n\
key_buffer_size + (read_buffer_size + sort_buffer_size)*max_threads = %lu K\n\
bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size +
(global_system_variables.read_buff_size +
global_system_variables.sortbuff_size) *
thread_scheduler.max_threads +
max_connections * sizeof(THD)) / 1024);
(thread_scheduler.max_threads + extra_max_connections) +
(max_connections + extra_max_connections)* sizeof(THD))
/ 1024);
fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
#if defined(HAVE_LINUXTHREADS)
@ -2692,7 +2720,7 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
This should actually be '+ max_number_of_slaves' instead of +10,
but the +10 should be quite safe.
*/
init_thr_alarm(thread_scheduler.max_threads +
init_thr_alarm(thread_scheduler.max_threads + extra_max_connections +
global_system_variables.max_insert_delayed_threads + 10);
if (test_flags & TEST_SIGINT)
{
@ -3005,10 +3033,8 @@ sizeof(load_default_groups)/sizeof(load_default_groups[0]);
The default value is taken from either opt_date_time_formats[] or
the ISO format (ANSI SQL)
@retval
0 ok
@retval
1 error
@retval 0 ok
@retval 1 error
*/
static bool init_global_datetime_format(timestamp_type format_type,
@ -3303,7 +3329,8 @@ static int init_common_variables(const char *conf_file_name, int argc,
uint files, wanted_files, max_open_files;
/* MyISAM requires two file handles per table. */
wanted_files= 10+max_connections+table_cache_size*2;
wanted_files= (10 + max_connections + extra_max_connections +
table_cache_size*2);
/*
We are trying to allocate no less than max_connections*5 file
handles (i.e. we are trying to set the limit so that they will
@ -3314,7 +3341,8 @@ static int init_common_variables(const char *conf_file_name, int argc,
can't get max_connections*5 but still got no less than was
requested (value of wanted_files).
*/
max_open_files= max(max(wanted_files, max_connections*5),
max_open_files= max(max(wanted_files,
(max_connections + extra_max_connections)*5),
open_files_limit);
files= my_set_max_open_files(max_open_files);
@ -4585,10 +4613,8 @@ static char *add_quoted_string(char *to, const char *from, char *to_end)
@param file_path Path to this program
@param startup_option Startup option to mysqld
@retval
0 option handled
@retval
1 Could not handle option
@retval 0 option handled
@retval 1 Could not handle option
*/
static bool
@ -4851,7 +4877,7 @@ void create_thread_to_handle_connection(THD *thd)
(void) pthread_mutex_unlock(&LOCK_thread_count);
pthread_mutex_lock(&LOCK_connection_count);
--connection_count;
(*thd->scheduler->connection_count)--;
pthread_mutex_unlock(&LOCK_connection_count);
statistic_increment(aborted_connects,&LOCK_status);
@ -4900,7 +4926,8 @@ static void create_new_thread(THD *thd)
pthread_mutex_lock(&LOCK_connection_count);
if (connection_count >= max_connections + 1 || abort_loop)
if (*thd->scheduler->connection_count >=
*thd->scheduler->max_connections + 1|| abort_loop)
{
pthread_mutex_unlock(&LOCK_connection_count);
@ -4910,10 +4937,10 @@ static void create_new_thread(THD *thd)
DBUG_VOID_RETURN;
}
++connection_count;
++*thd->scheduler->connection_count;
if (connection_count > max_used_connections)
max_used_connections= connection_count;
if (connection_count + extra_connection_count > max_used_connections)
max_used_connections= connection_count + extra_connection_count;
pthread_mutex_unlock(&LOCK_connection_count);
@ -4930,7 +4957,7 @@ static void create_new_thread(THD *thd)
thread_count++;
thread_scheduler.add_connection(thd);
thd->scheduler->add_connection(thd);
DBUG_VOID_RETURN;
}
@ -4945,7 +4972,8 @@ inline void kill_broken_server()
#if !defined(__NETWARE__)
unix_sock == INVALID_SOCKET ||
#endif
(!opt_disable_networking && ip_sock == INVALID_SOCKET))
(!opt_disable_networking &&
(base_ip_sock == INVALID_SOCKET || extra_ip_sock != INVALID_SOCKET)))
{
select_thread_in_use = 0;
/* The following call will never return */
@ -4964,26 +4992,38 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
{
my_socket sock,new_sock;
uint error_count=0;
uint max_used_connection= (uint) (max(ip_sock,unix_sock)+1);
uint max_used_connection;
fd_set readFDs,clientFDs;
THD *thd;
struct sockaddr_in cAddr;
int ip_flags=0,socket_flags=0,flags;
int base_ip_flags=0, extra_ip_flags= 0, socket_flags=0, flags;
st_vio *vio_tmp;
DBUG_ENTER("handle_connections_sockets");
max_used_connection= (uint) (max(base_ip_sock, unix_sock));
max_used_connection= (uint) (max(extra_ip_sock, (int) max_used_connection));
max_used_connection++;
LINT_INIT(new_sock);
(void) my_pthread_getprio(pthread_self()); // For debugging
FD_ZERO(&clientFDs);
if (ip_sock != INVALID_SOCKET)
if (base_ip_sock != INVALID_SOCKET)
{
FD_SET(ip_sock,&clientFDs);
FD_SET(base_ip_sock, &clientFDs);
#ifdef HAVE_FCNTL
ip_flags = fcntl(ip_sock, F_GETFL, 0);
base_ip_flags = fcntl(base_ip_sock, F_GETFL, 0);
#endif
}
if (extra_ip_sock != INVALID_SOCKET)
{
FD_SET(extra_ip_sock, &clientFDs);
#ifdef HAVE_FCNTL
extra_ip_flags = fcntl(extra_ip_sock, F_GETFL, 0);
#endif
}
#ifdef HAVE_SYS_UN_H
FD_SET(unix_sock,&clientFDs);
#ifdef HAVE_FCNTL
@ -5021,14 +5061,22 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
#ifdef HAVE_SYS_UN_H
if (FD_ISSET(unix_sock,&readFDs))
{
sock = unix_sock;
sock= unix_sock;
flags= socket_flags;
}
else
#endif
{
sock = ip_sock;
flags= ip_flags;
if (FD_ISSET(base_ip_sock,&readFDs))
{
sock= base_ip_sock;
flags= base_ip_flags;
}
else
{
sock= extra_ip_sock;
flags= extra_ip_flags;
}
}
#if !defined(NO_FCNTL_NONBLOCK)
@ -5081,7 +5129,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
#ifdef HAVE_LIBWRAP
{
if (sock == ip_sock)
if (sock == base_ip_sock || sock == extra_ip_sock)
{
struct request_info req;
signal(SIGCHLD, SIG_DFL);
@ -5161,6 +5209,11 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
if (sock == unix_sock)
thd->security_ctx->host=(char*) my_localhost;
if (sock == extra_ip_sock)
{
thd->extra_port= 1;
thd->scheduler= &extra_thread_scheduler;
}
create_new_thread(thd);
}
@ -5502,6 +5555,7 @@ enum options_mysqld
OPT_SKIP_GRANT, OPT_SKIP_LOCK,
OPT_ENABLE_LOCK, OPT_USE_LOCKING,
OPT_SOCKET, OPT_UPDATE_LOG,
OPT_EXTRA_PORT,
OPT_BIN_LOG, OPT_SKIP_RESOLVE,
OPT_SKIP_NETWORKING, OPT_BIN_LOG_INDEX,
OPT_BIND_ADDRESS, OPT_PID_FILE,
@ -5660,6 +5714,7 @@ enum options_mysqld
OPT_MIN_EXAMINED_ROW_LIMIT,
OPT_LOG_SLOW_SLAVE_STATEMENTS,
OPT_DEBUG_CRC, OPT_DEBUG_ON, OPT_OLD_MODE,
OPT_TEST_IGNORE_WRONG_OPTIONS,
OPT_SLAVE_EXEC_MODE,
OPT_DEADLOCK_SEARCH_DEPTH_SHORT,
OPT_DEADLOCK_SEARCH_DEPTH_LONG,
@ -5884,6 +5939,15 @@ struct my_option my_long_options[] =
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
/* We must always support the next option to make scripts like mysqltest
easier to do */
{"extra-port", OPT_EXTRA_PORT,
"Extra port number to use for tcp-connections in a one-thread-per-connection manner. 0 means don't use another port",
(uchar**) &mysqld_extra_port,
(uchar**) &mysqld_extra_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"extra-max-connections", OPT_MAX_CONNECTIONS,
"The number of connections on 'extra-port.",
(uchar**) &extra_max_connections,
(uchar**) &extra_max_connections, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 100000,
0, 1, 0},
{"gdb", OPT_DEBUGGING,
"Set up signals usable for debugging",
(uchar**) &opt_debugging, (uchar**) &opt_debugging,
@ -6471,6 +6535,10 @@ log and this option does nothing anymore.",
(uchar**) &use_temp_pool, (uchar**) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1,
0, 0, 0, 0, 0},
{"test-ignore-wrong-options", OPT_TEST_IGNORE_WRONG_OPTIONS,
"Ignore wrong enums values in command line arguments. Usefull only for test scripts",
(uchar**) &opt_ignore_wrong_options, (uchar**) &opt_ignore_wrong_options,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"timed_mutexes", OPT_TIMED_MUTEXES,
"Specify whether to time mutexes (only InnoDB mutexes are currently supported)",
(uchar**) &timed_mutexes, (uchar**) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
@ -7648,7 +7716,7 @@ static int mysql_init_variables(void)
slave_exec_mode_options= (uint)
find_bit_type_or_exit(slave_exec_mode_str, &slave_exec_mode_typelib, NULL);
opt_specialflag= SPECIAL_ENGLISH;
unix_sock= ip_sock= INVALID_SOCKET;
unix_sock= base_ip_sock= extra_ip_sock= INVALID_SOCKET;
mysql_home_ptr= mysql_home;
pidfile_name_ptr= pidfile_name;
log_error_file_ptr= log_error_file;
@ -7820,6 +7888,38 @@ static int mysql_init_variables(void)
}
/**
Find type for option
If opt_ignore_wrong_options is set ignore wrong values
otherwise exit
@return
@retval 0 ok ; *result is updated
@retval 1 error ; *result is not touched
*/
static my_bool find_opt_type(const char *x, TYPELIB *typelib,
const char *option, int *result)
{
int res;
if (opt_ignore_wrong_options)
{
if ((res= find_type_with_warning(x, typelib, option)) <= 0)
return 1;
}
else
res= find_type_or_exit(x, typelib, option);
*result= res;
return 0;
}
/**
Get next option from the command line
*/
my_bool
mysqld_get_one_option(int optid,
const struct my_option *opt __attribute__((unused)),
@ -7923,10 +8023,12 @@ mysqld_get_one_option(int optid,
case (int) OPT_INIT_RPL_ROLE:
{
int role;
role= find_type_or_exit(argument, &rpl_role_typelib, opt->name);
if (!find_opt_type(argument, &rpl_role_typelib, opt->name, &role))
{
rpl_status = (role == 1) ? RPL_AUTH_MASTER : RPL_IDLE_SLAVE;
break;
}
}
case (int)OPT_REPLICATE_IGNORE_DB:
{
rpl_filter->add_ignore_db(argument);
@ -7979,8 +8081,10 @@ mysqld_get_one_option(int optid,
case OPT_BINLOG_FORMAT:
{
int id;
id= find_type_or_exit(argument, &binlog_format_typelib, opt->name);
if (!find_opt_type(argument, &binlog_format_typelib, opt->name, &id))
{
global_system_variables.binlog_format= opt_binlog_format_id= id - 1;
}
break;
}
case (int)OPT_BINLOG_DO_DB:
@ -8093,7 +8197,7 @@ mysqld_get_one_option(int optid,
exit(1);
#endif
opt_disable_networking=1;
mysqld_port=0;
mysqld_port= mysqld_extra_port= 0;
break;
case (int) OPT_SKIP_SHOW_DB:
opt_skip_show_db=1;
@ -8192,7 +8296,7 @@ mysqld_get_one_option(int optid,
else
{
int type;
type= find_type_or_exit(argument, &delay_key_write_typelib, opt->name);
if (!find_opt_type(argument, &delay_key_write_typelib, opt->name, &type))
delay_key_write_options= (uint) type-1;
}
break;
@ -8203,7 +8307,7 @@ mysqld_get_one_option(int optid,
case OPT_TX_ISOLATION:
{
int type;
type= find_type_or_exit(argument, &tx_isolation_typelib, opt->name);
if (!find_opt_type(argument, &tx_isolation_typelib, opt->name, &type))
global_system_variables.tx_isolation= (type-1);
break;
}
@ -8233,7 +8337,7 @@ mysqld_get_one_option(int optid,
break;
case OPT_NDB_DISTRIBUTION:
int id;
id= find_type_or_exit(argument, &ndb_distribution_typelib, opt->name);
if (!find_opt_type(argument, &ndb_distribution_typelib, opt->name, &id))
opt_ndb_distribution_id= (enum ndb_distribution)(id-1);
break;
case OPT_NDB_EXTRA_LOGGING:
@ -8274,9 +8378,8 @@ mysqld_get_one_option(int optid,
myisam_concurrent_insert= 0; /* --skip-concurrent-insert */
break;
case OPT_TC_HEURISTIC_RECOVER:
tc_heuristic_recover= find_type_or_exit(argument,
&tc_heuristic_recover_typelib,
opt->name);
find_opt_type(argument, &tc_heuristic_recover_typelib,
opt->name, (int*) &tc_heuristic_recover);
break;
case OPT_MYISAM_STATS_METHOD:
{
@ -8285,8 +8388,9 @@ mysqld_get_one_option(int optid,
LINT_INIT(method_conv);
myisam_stats_method_str= argument;
method= find_type_or_exit(argument, &myisam_stats_method_typelib,
opt->name);
if (!find_opt_type(argument, &myisam_stats_method_typelib,
opt->name, &method))
{
switch (method-1) {
case 2:
method_conv= MI_STATS_METHOD_IGNORE_NULLS;
@ -8300,6 +8404,7 @@ mysqld_get_one_option(int optid,
break;
}
global_system_variables.myisam_stats_method= method_conv;
}
break;
}
case OPT_SQL_MODE:
@ -8317,8 +8422,9 @@ mysqld_get_one_option(int optid,
break;
case OPT_THREAD_HANDLING:
{
global_system_variables.thread_handling=
find_type_or_exit(argument, &thread_handling_typelib, opt->name)-1;
int id;
if (!find_opt_type(argument, &thread_handling_typelib, opt->name, &id))
global_system_variables.thread_handling= id - 1;
break;
}
case OPT_FT_BOOLEAN_SYNTAX:
@ -8338,6 +8444,10 @@ mysqld_get_one_option(int optid,
lower_case_table_names= argument ? atoi(argument) : 1;
lower_case_table_names_used= 1;
break;
case OPT_TEST_IGNORE_WRONG_OPTIONS:
/* Used for testing options */
opt_ignore_wrong_options= 1;
break;
}
return 0;
}
@ -8483,14 +8593,19 @@ static void get_options(int *argc,char **argv)
#ifdef EMBEDDED_LIBRARY
one_thread_scheduler(&thread_scheduler);
one_thread_scheduler(&extra_thread_scheduler);
#else
if (global_system_variables.thread_handling <=
SCHEDULER_ONE_THREAD_PER_CONNECTION)
one_thread_per_connection_scheduler(&thread_scheduler);
one_thread_per_connection_scheduler(&thread_scheduler, &max_connections,
&connection_count);
else if (global_system_variables.thread_handling == SCHEDULER_NO_THREADS)
one_thread_scheduler(&thread_scheduler);
else
pool_of_threads_scheduler(&thread_scheduler); /* purecov: tested */
one_thread_per_connection_scheduler(&extra_thread_scheduler,
&extra_max_connections,
&extra_connection_count);
#endif
}
@ -8715,12 +8830,9 @@ skip: ;
@param dir_name Directory to test
@retval
-1 Don't know (Test failed)
@retval
0 File system is case sensitive
@retval
1 File system is case insensitive
@retval -1 Don't know (Test failed)
@retval 0 File system is case sensitive
@retval 1 File system is case insensitive
*/
static int test_if_case_insensitive(const char *dir_name)

View File

@ -22,6 +22,9 @@
#endif
#include <mysql_priv.h>
#if MYSQL_VERSION_ID >= 60000
#include "sql_audit.h"
#endif
/*
'Dummy' functions to be used when we don't need any handling for a scheduler
@ -29,7 +32,7 @@
*/
static bool init_dummy(void) {return 0;}
static void post_kill_dummy(THD* thd) {}
static void post_kill_dummy(THD *thd) {}
static void end_dummy(void) {}
static bool end_thread_dummy(THD *thd, bool cache_thread) { return 0; }
@ -63,9 +66,12 @@ static bool no_threads_end(THD *thd, bool put_in_cache)
Initailize scheduler for --thread-handling=no-threads
*/
void one_thread_scheduler(scheduler_functions* func)
void one_thread_scheduler(scheduler_functions *func)
{
func->max_threads= 1;
max_connections= 1;
func->max_connections= &max_connections;
func->connection_count= &connection_count;
#ifndef EMBEDDED_LIBRARY
func->add_connection= handle_connection_in_main_thread;
#endif
@ -79,10 +85,674 @@ void one_thread_scheduler(scheduler_functions* func)
*/
#ifndef EMBEDDED_LIBRARY
void one_thread_per_connection_scheduler(scheduler_functions* func)
void one_thread_per_connection_scheduler(scheduler_functions *func,
ulong *arg_max_connections,
uint *arg_connection_count)
{
func->max_threads= max_connections;
func->max_threads= *arg_max_connections + 1;
func->max_connections= arg_max_connections;
func->connection_count= arg_connection_count;
func->add_connection= create_thread_to_handle_connection;
func->end_thread= one_thread_per_connection_end;
}
#endif /* EMBEDDED_LIBRARY */
#if defined(HAVE_LIBEVENT) && HAVE_POOL_OF_THREADS == 1
#include "event.h"
static struct event_base *base;
static uint created_threads, killed_threads;
static bool kill_pool_threads;
static struct event thd_add_event;
static struct event thd_kill_event;
static pthread_mutex_t LOCK_thd_add; /* protects thds_need_adding */
static LIST *thds_need_adding; /* list of thds to add to libevent queue */
static int thd_add_pair[2]; /* pipe to signal add a connection to libevent*/
static int thd_kill_pair[2]; /* pipe to signal kill a connection in libevent */
/*
LOCK_event_loop protects the non-thread safe libevent calls (event_add and
event_del) and thds_need_processing and thds_waiting_for_io.
*/
static pthread_mutex_t LOCK_event_loop;
static LIST *thds_need_processing; /* list of thds that needs some processing */
static LIST *thds_waiting_for_io; /* list of thds with added events */
pthread_handler_t libevent_thread_proc(void *arg);
static void libevent_end();
static bool libevent_needs_immediate_processing(THD *thd);
static void libevent_connection_close(THD *thd);
static bool libevent_should_close_connection(THD* thd);
static void libevent_thd_add(THD* thd);
void libevent_io_callback(int Fd, short Operation, void *ctx);
void libevent_add_thd_callback(int Fd, short Operation, void *ctx);
void libevent_kill_thd_callback(int Fd, short Operation, void *ctx);
/*
Create a pipe and set to non-blocking.
Returns TRUE if there is an error.
*/
static bool init_socketpair(int sock_pair[])
{
sock_pair[0]= sock_pair[1]= -1;
return (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, sock_pair) < 0 ||
evutil_make_socket_nonblocking(sock_pair[0]) == -1 ||
evutil_make_socket_nonblocking(sock_pair[1]) == -1);
}
static void close_socketpair(int sock_pair[])
{
if (sock_pair[0] != -1)
EVUTIL_CLOSESOCKET(sock_pair[0]);
if (sock_pair[1] != -1)
EVUTIL_CLOSESOCKET(sock_pair[1]);
}
/*
thd_scheduler keeps the link between THD and events.
It's embedded in the THD class.
*/
thd_scheduler::thd_scheduler()
: logged_in(FALSE), io_event(NULL), thread_attached(FALSE)
{
#ifndef DBUG_OFF
dbug_explain[0]= '\0';
set_explain= FALSE;
#endif
}
thd_scheduler::~thd_scheduler()
{
my_free(io_event, MYF(MY_ALLOW_ZERO_PTR));
}
bool thd_scheduler::init(THD *parent_thd)
{
io_event=
(struct event*)my_malloc(sizeof(*io_event),MYF(MY_ZEROFILL|MY_WME));
if (!io_event)
{
sql_print_error("Memory allocation error in thd_scheduler::init\n");
return TRUE;
}
event_set(io_event, (int)parent_thd->net.vio->sd, EV_READ,
libevent_io_callback, (void*)parent_thd);
list.data= parent_thd;
return FALSE;
}
/*
Attach/associate the connection with the OS thread, for command processing.
*/
bool thd_scheduler::thread_attach()
{
DBUG_ASSERT(!thread_attached);
THD* thd = (THD*)list.data;
if (libevent_should_close_connection(thd) ||
setup_connection_thread_globals(thd))
{
return TRUE;
}
my_errno= 0;
thd->mysys_var->abort= 0;
thread_attached= TRUE;
#ifndef DBUG_OFF
/*
When we attach the thread for a connection for the first time,
we know that there is no session value set yet. Thus
the initial setting of set_explain to FALSE is OK.
*/
if (set_explain)
DBUG_SET(dbug_explain);
#endif
return FALSE;
}
/*
Detach/disassociate the connection with the OS thread.
*/
void thd_scheduler::thread_detach()
{
if (thread_attached)
{
THD* thd = (THD*)list.data;
pthread_mutex_lock(&thd->LOCK_delete);
thd->mysys_var= NULL;
pthread_mutex_unlock(&thd->LOCK_delete);
thread_attached= FALSE;
#ifndef DBUG_OFF
/*
If during the session @@session.dbug was assigned, the
dbug options/state has been pushed. Check if this is the
case, to be able to restore the state when we attach this
logical connection to a physical thread.
*/
if (_db_is_pushed_())
{
set_explain= TRUE;
if (DBUG_EXPLAIN(dbug_explain, sizeof(dbug_explain)))
sql_print_error("thd_scheduler: DBUG_EXPLAIN buffer is too small");
}
/* DBUG_POP() is a no-op in case there is no session state */
DBUG_POP();
#endif
}
}
/**
Create all threads for the thread pool
NOTES
After threads are created we wait until all threads has signaled that
they have started before we return
RETURN
0 ok
1 We got an error creating the thread pool
In this case we will abort all created threads
*/
static bool libevent_init(void)
{
uint i;
DBUG_ENTER("libevent_init");
base= (struct event_base *) event_init();
created_threads= 0;
killed_threads= 0;
kill_pool_threads= FALSE;
pthread_mutex_init(&LOCK_event_loop, NULL);
pthread_mutex_init(&LOCK_thd_add, NULL);
/* set up sockets used to add new thds to the event pool */
if (init_socketpair(thd_add_pair))
{
sql_print_error("init_socketpair(thd_add_spair) error in libevent_init");
DBUG_RETURN(1);
}
/* set up sockets used to kill thds in the event queue */
if (init_socketpair(thd_kill_pair))
{
sql_print_error("init_socketpair(thd_kill_pair) error in libevent_init");
close_socketpair(thd_add_pair);
DBUG_RETURN(1);
}
event_set(&thd_add_event, thd_add_pair[0], EV_READ|EV_PERSIST,
libevent_add_thd_callback, NULL);
event_set(&thd_kill_event, thd_kill_pair[0], EV_READ|EV_PERSIST,
libevent_kill_thd_callback, NULL);
if (event_add(&thd_add_event, NULL) || event_add(&thd_kill_event, NULL))
{
sql_print_error("thd_add_event event_add error in libevent_init");
libevent_end();
DBUG_RETURN(1);
}
/* Set up the thread pool */
created_threads= killed_threads= 0;
pthread_mutex_lock(&LOCK_thread_count);
for (i= 0; i < thread_pool_size; i++)
{
pthread_t thread;
int error;
if ((error= pthread_create(&thread, &connection_attrib,
libevent_thread_proc, 0)))
{
sql_print_error("Can't create completion port thread (error %d)",
error);
pthread_mutex_unlock(&LOCK_thread_count);
libevent_end(); // Cleanup
DBUG_RETURN(TRUE);
}
}
/* Wait until all threads are created */
while (created_threads != thread_pool_size)
pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
pthread_mutex_unlock(&LOCK_thread_count);
DBUG_PRINT("info", ("%u threads created", (uint) thread_pool_size));
DBUG_RETURN(FALSE);
}
/*
This is called when data is ready on the socket.
NOTES
This is only called by the thread that owns LOCK_event_loop.
We add the thd that got the data to thds_need_processing, and
cause the libevent event_loop() to terminate. Then this same thread will
return from event_loop and pick the thd value back up for processing.
*/
void libevent_io_callback(int, short, void *ctx)
{
safe_mutex_assert_owner(&LOCK_event_loop);
THD *thd= (THD*)ctx;
thds_waiting_for_io= list_delete(thds_waiting_for_io, &thd->event_scheduler.list);
thds_need_processing= list_add(thds_need_processing, &thd->event_scheduler.list);
}
/*
This is called when we have a thread we want to be killed.
NOTES
This is only called by the thread that owns LOCK_event_loop.
*/
void libevent_kill_thd_callback(int Fd, short, void*)
{
safe_mutex_assert_owner(&LOCK_event_loop);
/* clear the pending events */
char c;
while (recv(Fd, &c, sizeof(c), 0) == sizeof(c))
{}
LIST* list= thds_waiting_for_io;
while (list)
{
THD *thd= (THD*)list->data;
list= list_rest(list);
if (thd->killed == THD::KILL_CONNECTION)
{
/*
Delete from libevent and add to the processing queue.
*/
event_del(thd->event_scheduler.io_event);
thds_waiting_for_io= list_delete(thds_waiting_for_io,
&thd->event_scheduler.list);
thds_need_processing= list_add(thds_need_processing,
&thd->event_scheduler.list);
}
}
}
/*
This is used to add connections to the pool. This callback is invoked from
the libevent event_loop() call whenever the thd_add_pair[1] has a byte
written to it.
NOTES
This is only called by the thread that owns LOCK_event_loop.
*/
void libevent_add_thd_callback(int Fd, short, void *)
{
safe_mutex_assert_owner(&LOCK_event_loop);
/* clear the pending events */
char c;
while (recv(Fd, &c, sizeof(c), 0) == sizeof(c))
{}
pthread_mutex_lock(&LOCK_thd_add);
while (thds_need_adding)
{
/* pop the first thd off the list */
THD* thd= (THD*)thds_need_adding->data;
thds_need_adding= list_delete(thds_need_adding, thds_need_adding);
pthread_mutex_unlock(&LOCK_thd_add);
if (!thd->event_scheduler.logged_in || libevent_should_close_connection(thd))
{
/*
Add thd to thds_need_processing list. If it needs closing we'll close
it outside of event_loop().
*/
thds_need_processing= list_add(thds_need_processing,
&thd->event_scheduler.list);
}
else
{
/* Add to libevent */
if (event_add(thd->event_scheduler.io_event, NULL))
{
sql_print_error("event_add error in libevent_add_thd_callback");
libevent_connection_close(thd);
}
else
{
thds_waiting_for_io= list_add(thds_waiting_for_io,
&thd->event_scheduler.list);
}
}
pthread_mutex_lock(&LOCK_thd_add);
}
pthread_mutex_unlock(&LOCK_thd_add);
}
/**
Notify the thread pool about a new connection
NOTES
LOCK_thread_count is locked on entry. This function MUST unlock it!
*/
static void libevent_add_connection(THD *thd)
{
DBUG_ENTER("libevent_add_connection");
DBUG_PRINT("enter", ("thd: %p thread_id: %lu",
thd, thd->thread_id));
if (thd->event_scheduler.init(thd))
{
sql_print_error("Scheduler init error in libevent_add_new_connection");
pthread_mutex_unlock(&LOCK_thread_count);
libevent_connection_close(thd);
DBUG_VOID_RETURN;
}
threads.append(thd);
libevent_thd_add(thd);
pthread_mutex_unlock(&LOCK_thread_count);
DBUG_VOID_RETURN;
}
/**
@brief Signal a waiting connection it's time to die.
@details This function will signal libevent the THD should be killed.
Either the global LOCK_thd_count or the THD's LOCK_delete must be locked
upon entry.
@param[in] thd The connection to kill
*/
static void libevent_post_kill_notification(THD *)
{
/*
Note, we just wake up libevent with an event that a THD should be killed,
It will search its list of thds for thd->killed == KILL_CONNECTION to
find the THDs it should kill.
So we don't actually tell it which one and we don't actually use the
THD being passed to us, but that's just a design detail that could change
later.
*/
char c= 0;
send(thd_kill_pair[1], &c, sizeof(c), 0);
}
/*
Close and delete a connection.
*/
static void libevent_connection_close(THD *thd)
{
DBUG_ENTER("libevent_connection_close");
DBUG_PRINT("enter", ("thd: %p", thd));
thd->killed= THD::KILL_CONNECTION; // Avoid error messages
if (thd->net.vio->sd >= 0) // not already closed
{
end_connection(thd);
close_connection(thd, 0, 1);
}
thd->event_scheduler.thread_detach();
unlink_thd(thd); /* locks LOCK_thread_count and deletes thd */
pthread_mutex_unlock(&LOCK_thread_count);
DBUG_VOID_RETURN;
}
/*
Returns true if we should close and delete a THD connection.
*/
static bool libevent_should_close_connection(THD* thd)
{
return thd->net.error ||
thd->net.vio == 0 ||
thd->killed == THD::KILL_CONNECTION;
}
/*
libevent_thread_proc is the outer loop of each thread in the thread pool.
These procs only return/terminate on shutdown (kill_pool_threads == true).
*/
pthread_handler_t libevent_thread_proc(void *arg)
{
if (init_new_connection_handler_thread())
{
my_thread_global_end();
sql_print_error("libevent_thread_proc: my_thread_init() failed");
exit(1);
}
DBUG_ENTER("libevent_thread_proc");
/*
Signal libevent_init() when all threads has been created and are ready to
receive events.
*/
(void) pthread_mutex_lock(&LOCK_thread_count);
created_threads++;
thread_created++;
if (created_threads == thread_pool_size)
(void) pthread_cond_signal(&COND_thread_count);
(void) pthread_mutex_unlock(&LOCK_thread_count);
for (;;)
{
THD *thd= NULL;
(void) pthread_mutex_lock(&LOCK_event_loop);
/* get thd(s) to process */
while (!thds_need_processing)
{
if (kill_pool_threads)
{
/* the flag that we should die has been set */
(void) pthread_mutex_unlock(&LOCK_event_loop);
goto thread_exit;
}
event_loop(EVLOOP_ONCE);
}
/* pop the first thd off the list */
thd= (THD*)thds_need_processing->data;
thds_need_processing= list_delete(thds_need_processing,
thds_need_processing);
(void) pthread_mutex_unlock(&LOCK_event_loop);
/* now we process the connection (thd) */
/* set up the thd<->thread links. */
thd->thread_stack= (char*) &thd;
if (thd->event_scheduler.thread_attach())
{
libevent_connection_close(thd);
continue;
}
/* is the connection logged in yet? */
if (!thd->event_scheduler.logged_in)
{
DBUG_PRINT("info", ("init new connection. sd: %d",
thd->net.vio->sd));
lex_start(thd);
if (login_connection(thd))
{
/* Failed to log in */
libevent_connection_close(thd);
continue;
}
else
{
/* login successful */
#if MYSQL_VERSION_ID >= 60000
MYSQL_CONNECTION_START(thd->thread_id, thd->security_ctx->priv_user,
(char *) thd->security_ctx->host_or_ip);
#endif
thd->event_scheduler.logged_in= TRUE;
prepare_new_connection_state(thd);
if (!libevent_needs_immediate_processing(thd))
continue; /* New connection is now waiting for data in libevent*/
}
}
do
{
/* Process a query */
if (do_command(thd))
{
libevent_connection_close(thd);
break;
}
} while (libevent_needs_immediate_processing(thd));
}
thread_exit:
DBUG_PRINT("exit", ("ending thread"));
(void) pthread_mutex_lock(&LOCK_thread_count);
killed_threads++;
pthread_cond_broadcast(&COND_thread_count);
(void) pthread_mutex_unlock(&LOCK_thread_count);
my_thread_end();
pthread_exit(0);
DBUG_RETURN(0); /* purify: deadcode */
}
/*
Returns TRUE if the connection needs immediate processing and FALSE if
instead it's queued for libevent processing or closed,
*/
static bool libevent_needs_immediate_processing(THD *thd)
{
if (libevent_should_close_connection(thd))
{
libevent_connection_close(thd);
return FALSE;
}
/*
If more data in the socket buffer, return TRUE to process another command.
Note: we cannot add for event processing because the whole request might
already be buffered and we wouldn't receive an event.
*/
if (vio_pending(thd->net.vio) > 0)
return TRUE;
thd->event_scheduler.thread_detach();
libevent_thd_add(thd);
return FALSE;
}
/*
Adds a THD to queued for libevent processing.
This call does not actually register the event with libevent.
Instead, it places the THD onto a queue and signals libevent by writing
a byte into thd_add_pair, which will cause our libevent_add_thd_callback to
be invoked which will find the THD on the queue and add it to libevent.
*/
static void libevent_thd_add(THD* thd)
{
char c= 0;
/* release any audit resources, this thd is going to sleep */
#if MYSQL_VERSION_ID >= 60000
mysql_audit_release(thd);
#endif
pthread_mutex_lock(&LOCK_thd_add);
/* queue for libevent */
thds_need_adding= list_add(thds_need_adding, &thd->event_scheduler.list);
/* notify libevent */
send(thd_add_pair[1], &c, sizeof(c), 0);
pthread_mutex_unlock(&LOCK_thd_add);
}
/**
Wait until all pool threads have been deleted for clean shutdown
*/
static void libevent_end()
{
DBUG_ENTER("libevent_end");
DBUG_PRINT("enter", ("created_threads: %d killed_threads: %u",
created_threads, killed_threads));
/*
check if initialized. This may not be the case if get an error at
startup
*/
if (!base)
DBUG_VOID_RETURN;
(void) pthread_mutex_lock(&LOCK_thread_count);
kill_pool_threads= TRUE;
while (killed_threads != created_threads)
{
/* wake up the event loop */
char c= 0;
send(thd_add_pair[1], &c, sizeof(c), 0);
pthread_cond_wait(&COND_thread_count, &LOCK_thread_count);
}
(void) pthread_mutex_unlock(&LOCK_thread_count);
event_del(&thd_add_event);
close_socketpair(thd_add_pair);
event_del(&thd_kill_event);
close_socketpair(thd_kill_pair);
event_base_free(base);
base= 0;
(void) pthread_mutex_destroy(&LOCK_event_loop);
(void) pthread_mutex_destroy(&LOCK_thd_add);
DBUG_VOID_RETURN;
}
void pool_of_threads_scheduler(scheduler_functions* func)
{
func->max_threads= thread_pool_size;
func->max_connections= &max_connections;
func->connection_count= &connection_count;
func->init= libevent_init;
func->end= libevent_end;
func->post_kill_notification= libevent_post_kill_notification;
func->add_connection= libevent_add_connection;
}
#endif

View File

@ -28,7 +28,8 @@ class THD;
class scheduler_functions
{
public:
uint max_threads;
uint max_threads, *connection_count;
ulong *max_connections;
bool (*init)(void);
bool (*init_new_connection_thread)(void);
void (*add_connection)(THD *thd);
@ -45,16 +46,45 @@ enum scheduler_types
SCHEDULER_POOL_OF_THREADS
};
void one_thread_per_connection_scheduler(scheduler_functions* func);
void one_thread_per_connection_scheduler(scheduler_functions *func,
ulong *arg_max_connections,
uint *arg_connection_count);
void one_thread_scheduler(scheduler_functions* func);
enum pool_command_op
#if defined(HAVE_LIBEVENT) && !defined(EMBEDDED_LIBRARY)
#define HAVE_POOL_OF_THREADS 1
struct event;
class thd_scheduler
{
NOT_IN_USE_OP= 0, NORMAL_OP= 1, CONNECT_OP, KILL_OP, DIE_OP
public:
bool logged_in;
struct event* io_event;
LIST list;
bool thread_attached; /* Indicates if THD is attached to the OS thread */
#ifndef DBUG_OFF
char dbug_explain[256];
bool set_explain;
#endif
thd_scheduler();
~thd_scheduler();
bool init(THD* parent_thd);
bool thread_attach();
void thread_detach();
};
void pool_of_threads_scheduler(scheduler_functions* func);
#else
#define HAVE_POOL_OF_THREADS 0 /* For easyer tests */
#define pool_of_threads_scheduler(A) one_thread_per_connection_scheduler(A)
class thd_scheduler
{};
#endif

View File

@ -265,6 +265,13 @@ static sys_var_long_ptr sys_delayed_queue_size(&vars, "delayed_queue_size",
static sys_var_event_scheduler sys_event_scheduler(&vars, "event_scheduler");
#endif
static sys_var_const sys_extra_port(&vars, "extra_port",
OPT_GLOBAL, SHOW_INT,
(uchar*) &mysqld_extra_port);
static sys_var_long_ptr sys_extra_max_connections(&vars,
"extra_max_connections",
&extra_max_connections,
fix_max_connections);
static sys_var_long_ptr sys_expire_logs_days(&vars, "expire_logs_days",
&expire_logs_days);
static sys_var_bool_ptr sys_flush(&vars, "flush", &myisam_flush);
@ -1357,7 +1364,7 @@ static int check_max_delayed_threads(THD *thd, set_var *var)
static void fix_max_connections(THD *thd, enum_var_type type)
{
#ifndef EMBEDDED_LIBRARY
resize_thr_alarm(max_connections +
resize_thr_alarm(max_connections + extra_max_connections +
global_system_variables.max_insert_delayed_threads + 10);
#endif
}

View File

@ -572,6 +572,8 @@ THD::THD()
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
stmt_arena= this;
thread_stack= 0;
scheduler= &thread_scheduler; // Will be fixed later
extra_port= 0;
catalog= (char*)"std"; // the only catalog we have for now
main_security_ctx.init();
security_ctx= &main_security_ctx;

View File

@ -1260,6 +1260,7 @@ public:
struct st_mysql_stmt *current_stmt;
#endif
NET net; // client connection descriptor
scheduler_functions *scheduler; // Scheduler for this connection
MEM_ROOT warn_root; // For warnings and errors
Protocol *protocol; // Current protocol
Protocol_text protocol_text; // Normal protocol
@ -1722,6 +1723,8 @@ public:
bool locked, some_tables_deleted;
bool last_cuted_field;
bool no_errors, password;
bool extra_port; /* If extra connection */
/**
Set to TRUE if execution of the current compound statement
can not continue. In particular, disables activation of
@ -2206,7 +2209,7 @@ public:
*p_db_length= db_length;
return FALSE;
}
thd_scheduler scheduler;
thd_scheduler event_scheduler;
public:
/**

View File

@ -402,11 +402,15 @@ check_user(THD *thd, enum enum_server_command command,
if (check_count)
{
pthread_mutex_lock(&LOCK_connection_count);
bool count_ok= connection_count <= max_connections ||
(thd->main_security_ctx.master_access & SUPER_ACL);
VOID(pthread_mutex_unlock(&LOCK_connection_count));
bool count_ok= 1;
if (!(thd->main_security_ctx.master_access & SUPER_ACL))
{
pthread_mutex_lock(&LOCK_connection_count);
count_ok= (*thd->scheduler->connection_count <=
*thd->scheduler->max_connections);
VOID(pthread_mutex_unlock(&LOCK_connection_count));
}
if (!count_ok)
{ // too many connections
my_error(ER_CON_COUNT_ERROR, MYF(0));
@ -917,7 +921,7 @@ bool setup_connection_thread_globals(THD *thd)
{
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
statistic_increment(aborted_connects,&LOCK_status);
thread_scheduler.end_thread(thd, 0);
thd->scheduler->end_thread(thd, 0);
return 1; // Error
}
return 0;
@ -939,8 +943,7 @@ bool setup_connection_thread_globals(THD *thd)
1 error
*/
static bool login_connection(THD *thd)
bool login_connection(THD *thd)
{
NET *net= &thd->net;
int error;
@ -978,7 +981,7 @@ static bool login_connection(THD *thd)
This mainly updates status variables
*/
static void end_connection(THD *thd)
void end_connection(THD *thd)
{
NET *net= &thd->net;
plugin_thdvar_cleanup(thd);
@ -1011,7 +1014,7 @@ static void end_connection(THD *thd)
Initialize THD to handle queries
*/
static void prepare_new_connection_state(THD* thd)
void prepare_new_connection_state(THD* thd)
{
Security_context *sctx= thd->security_ctx;
@ -1081,7 +1084,7 @@ pthread_handler_t handle_one_connection(void *arg)
{
close_connection(thd, ER_OUT_OF_RESOURCES, 1);
statistic_increment(aborted_connects,&LOCK_status);
thread_scheduler.end_thread(thd,0);
thd->scheduler->end_thread(thd,0);
return 0;
}
if (launch_time >= slow_launch_time*1000000L)
@ -1119,7 +1122,7 @@ pthread_handler_t handle_one_connection(void *arg)
end_thread:
close_connection(thd, 0, 1);
if (thread_scheduler.end_thread(thd,1))
if (thd->scheduler->end_thread(thd,1))
return 0; // Probably no-threads
/*

View File

@ -637,3 +637,21 @@ int vio_close_shared_memory(Vio * vio)
}
#endif /* HAVE_SMEM */
#endif /* __WIN__ */
/**
Number of bytes in the read buffer.
@return number of bytes in the read buffer or < 0 if error.
*/
ssize_t vio_pending(Vio *vio)
{
if (vio->read_pos < vio->read_end)
return vio->read_end - vio->read_pos;
#ifdef HAVE_OPENSSL
if (vio->ssl_arg)
return SSL_pending((SSL*) vio->ssl_arg);
#endif
return 0;
}