1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-08 17:42:12 +03:00
2000-01-04  Ulrich Drepper  <drepper@cygnus.com>

	* rt/aio.h (struct aioinit): Replace one aio_reserved value with
	aio_idle_time.
	* rt/aio_misc.c: Rewrite to not automatically terminate worker threads
	after the operation is done.  Let them linger for a user-definable
	time.
	* rt/aio_cancel.c: Likewise.
	Patch by Willian M Shubert <william.m.shubert@intel.com>.

	* sysdeps/i386/i686/add_n.S: New file.
This commit is contained in:
Ulrich Drepper
2000-01-05 02:12:13 +00:00
parent 1d2fc9b3c5
commit a3bfd99994
5 changed files with 263 additions and 158 deletions

View File

@@ -1,3 +1,15 @@
2000-01-04 Ulrich Drepper <drepper@cygnus.com>
* rt/aio.h (struct aioinit): Replace one aio_reserved value with
aio_idle_time.
* rt/aio_misc.c: Rewrite to not automatically terminate worker threads
after the operation is done. Let them linger for a user-definable
time.
* rt/aio_cancel.c: Likewise.
Patch by Willian M Shubert <william.m.shubert@intel.com>.
* sysdeps/i386/i686/add_n.S: New file.
2000-01-02 Philip Blundell <philb@gnu.org> 2000-01-02 Philip Blundell <philb@gnu.org>
* sysdeps/unix/sysv/linux/arm/ioperm.c: Use sysctl by preference * sysdeps/unix/sysv/linux/arm/ioperm.c: Use sysctl by preference

View File

@@ -5,14 +5,14 @@
pthread_cond_{wait,timedwait}). pthread_cond_{wait,timedwait}).
Cancellation won't eat a signal in any of these functions Cancellation won't eat a signal in any of these functions
(*required* by POSIX and Single Unix Spec!). (*required* by POSIX and Single Unix Spec!).
* condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a * condvar.c: Spontaneous wakeup on pthread_cond_timedwait won't eat a
simultaneous condition variable signal (not required by POSIX simultaneous condition variable signal (not required by POSIX
or Single Unix Spec, but nice). or Single Unix Spec, but nice).
* spinlock.c: __pthread_lock queues back any received restarts * spinlock.c: __pthread_lock queues back any received restarts
that don't belong to it instead of assuming ownership of lock that don't belong to it instead of assuming ownership of lock
upon any restart; fastlock can no longer be acquired by two threads upon any restart; fastlock can no longer be acquired by two threads
simultaneously. simultaneously.
* restart.h: restarts queue even on kernels that don't have * restart.h: Restarts queue even on kernels that don't have
queued real time signals (2.0, early 2.1), thanks to atomic counter, queued real time signals (2.0, early 2.1), thanks to atomic counter,
avoiding a rare race condition in pthread_cond_timedwait. avoiding a rare race condition in pthread_cond_timedwait.

View File

@@ -1,4 +1,4 @@
/* Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc. /* Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
@@ -93,7 +93,9 @@ struct aioinit
int aio_usedba; /* Not used. */ int aio_usedba; /* Not used. */
int aio_debug; /* Not used. */ int aio_debug; /* Not used. */
int aio_numusers; /* Not used. */ int aio_numusers; /* Not used. */
int aio_reserved[2]; int aio_idle_time; /* Number of seconds before idle thread
terminates. */
int aio_reserved;
}; };
#endif #endif

View File

@@ -1,5 +1,5 @@
/* Cancel requests associated with given file descriptor. /* Cancel requests associated with given file descriptor.
Copyright (C) 1997, 1998 Free Software Foundation, Inc. Copyright (C) 1997, 1998, 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
@@ -60,12 +60,18 @@ aio_cancel (fildes, aiocbp)
{ {
last = req; last = req;
req = req->next_prio; req = req->next_prio;
if (req == NULL)
{
pthread_mutex_unlock (&__aio_requests_mutex);
__set_errno (EINVAL);
return -1;
}
} }
/* Don't remove the entry if a thread is already working on it. */ /* Don't remove the entry if a thread is already working on it. */
if (req->running == allocated) if (req->running == allocated)
result = AIO_NOTCANCELED; result = AIO_NOTCANCELED;
else else if (req->running == yes)
{ {
/* We can remove the entry. */ /* We can remove the entry. */
if (last != NULL) if (last != NULL)

View File

@@ -1,5 +1,5 @@
/* Handle general operations. /* Handle general operations.
Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc. Copyright (C) 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
@@ -19,15 +19,19 @@
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
#include <aio.h> #include <aio.h>
#include <assert.h>
#include <errno.h> #include <errno.h>
#include <limits.h> #include <limits.h>
#include <pthread.h> #include <pthread.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <unistd.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/time.h>
#include "aio_misc.h" #include "aio_misc.h"
static void add_request_to_runlist (struct requestlist *newrequest);
/* Pool of request list entries. */ /* Pool of request list entries. */
static struct requestlist **pool; static struct requestlist **pool;
@@ -55,6 +59,9 @@ static struct requestlist *requests;
/* Number of threads currently running. */ /* Number of threads currently running. */
static int nthreads; static int nthreads;
/* Number of threads waiting for work to arrive. */
static int idle_thread_count;
/* These are the values used to optimize the use of AIO. The user can /* These are the values used to optimize the use of AIO. The user can
overwrite them by using the `aio_init' function. */ overwrite them by using the `aio_init' function. */
@@ -66,13 +73,19 @@ static struct aioinit optim =
0, 0,
0, 0,
0, 0,
{ 0, } 1,
0
}; };
/* Since the list is global we need a mutex protecting it. */ /* Since the list is global we need a mutex protecting it. */
pthread_mutex_t __aio_requests_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; pthread_mutex_t __aio_requests_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
/* When you add a request to the list and there are idle threads present,
you signal this condition variable. When a thread finishes work, it waits
on this condition variable for a time before it actually exits. */
pthread_cond_t __aio_new_request_notification = PTHREAD_COND_INITIALIZER;
/* Functions to handle request list pool. */ /* Functions to handle request list pool. */
static struct requestlist * static struct requestlist *
@@ -85,6 +98,8 @@ get_elem (void)
struct requestlist *new_row; struct requestlist *new_row;
size_t new_size; size_t new_size;
assert(sizeof(struct aiocb) == sizeof(struct aiocb64));
/* Compute new size. */ /* Compute new size. */
new_size = pool_size ? pool_size + ENTRIES_PER_ROW : optim.aio_num; new_size = pool_size ? pool_size + ENTRIES_PER_ROW : optim.aio_num;
@@ -210,6 +225,9 @@ __aio_init (const struct aioinit *init)
: init->aio_num & ~ENTRIES_PER_ROW); : init->aio_num & ~ENTRIES_PER_ROW);
} }
if (init->aio_idle_time != 0)
optim.aio_idle_time = init->aio_idle_time;
/* Release the mutex. */ /* Release the mutex. */
pthread_mutex_unlock (&__aio_requests_mutex); pthread_mutex_unlock (&__aio_requests_mutex);
} }
@@ -299,6 +317,7 @@ __aio_enqueue_request (aiocb_union *aiocbp, int operation)
} }
else else
{ {
running = yes;
/* Enqueue this request for a new descriptor. */ /* Enqueue this request for a new descriptor. */
if (last == NULL) if (last == NULL)
{ {
@@ -320,7 +339,7 @@ __aio_enqueue_request (aiocb_union *aiocbp, int operation)
newp->next_prio = NULL; newp->next_prio = NULL;
} }
if (running == no) if (running == yes)
{ {
/* We try to create a new thread for this file descriptor. The /* We try to create a new thread for this file descriptor. The
function which gets called will handle all available requests function which gets called will handle all available requests
@@ -330,8 +349,8 @@ __aio_enqueue_request (aiocb_union *aiocbp, int operation)
If no new thread can be created or if the specified limit of If no new thread can be created or if the specified limit of
threads for AIO is reached we queue the request. */ threads for AIO is reached we queue the request. */
/* See if we can create a thread. */ /* See if we need to and are able to create a thread. */
if (nthreads < optim.aio_threads) if (nthreads < optim.aio_threads && idle_thread_count == 0)
{ {
pthread_t thid; pthread_t thid;
pthread_attr_t attr; pthread_attr_t attr;
@@ -358,24 +377,14 @@ __aio_enqueue_request (aiocb_union *aiocbp, int operation)
} }
/* Enqueue the request in the run queue if it is not yet running. */ /* Enqueue the request in the run queue if it is not yet running. */
if (running < yes && result == 0) if (running == yes && result == 0)
{ {
if (runlist == NULL || runlist->aiocbp->aiocb.__abs_prio < prio) add_request_to_runlist (newp);
{
newp->next_run = runlist;
runlist = newp;
}
else
{
runp = runlist;
while (runp->next_run != NULL /* If there is a thread waiting for work, then let it know that we
&& runp->next_run->aiocbp->aiocb.__abs_prio >= prio) have just given it something to do. */
runp = runp->next_run; if (idle_thread_count > 0)
pthread_cond_signal (&__aio_new_request_notification);
newp->next_run = runp->next_run;
runp->next_run = newp;
}
} }
if (result == 0) if (result == 0)
@@ -407,6 +416,15 @@ handle_fildes_io (void *arg)
pthread_getschedparam (self, &policy, &param); pthread_getschedparam (self, &policy, &param);
do do
{
/* If runp is NULL, then we were created to service the work queue
in general, not to handle any particular request. In that case we
skip the "do work" stuff on the first pass, and go directly to the
"get work off the work queue" part of this loop, which is near the
end. */
if (runp == NULL)
pthread_mutex_lock (&__aio_requests_mutex);
else
{ {
/* Update our variables. */ /* Update our variables. */
aiocbp = runp->aiocbp; aiocbp = runp->aiocbp;
@@ -427,8 +445,8 @@ handle_fildes_io (void *arg)
{ {
if (aiocbp->aiocb.aio_lio_opcode & 128) if (aiocbp->aiocb.aio_lio_opcode & 128)
aiocbp->aiocb.__return_value = aiocbp->aiocb.__return_value =
TEMP_FAILURE_RETRY (__pread64 (fildes, TEMP_FAILURE_RETRY (__pread64 (fildes, (void *)
(void *) aiocbp->aiocb64.aio_buf, aiocbp->aiocb64.aio_buf,
aiocbp->aiocb64.aio_nbytes, aiocbp->aiocb64.aio_nbytes,
aiocbp->aiocb64.aio_offset)); aiocbp->aiocb64.aio_offset));
else else
@@ -452,14 +470,14 @@ handle_fildes_io (void *arg)
{ {
if (aiocbp->aiocb.aio_lio_opcode & 128) if (aiocbp->aiocb.aio_lio_opcode & 128)
aiocbp->aiocb.__return_value = aiocbp->aiocb.__return_value =
TEMP_FAILURE_RETRY (__pwrite64 (fildes, TEMP_FAILURE_RETRY (__pwrite64 (fildes, (const void *)
(const void *) aiocbp->aiocb64.aio_buf, aiocbp->aiocb64.aio_buf,
aiocbp->aiocb64.aio_nbytes, aiocbp->aiocb64.aio_nbytes,
aiocbp->aiocb64.aio_offset)); aiocbp->aiocb64.aio_offset));
else else
aiocbp->aiocb.__return_value = aiocbp->aiocb.__return_value =
TEMP_FAILURE_RETRY (pwrite (fildes, TEMP_FAILURE_RETRY (pwrite (fildes, (const void *)
(const void *) aiocbp->aiocb.aio_buf, aiocbp->aiocb.aio_buf,
aiocbp->aiocb.aio_nbytes, aiocbp->aiocb.aio_nbytes,
aiocbp->aiocb.aio_offset)); aiocbp->aiocb.aio_offset));
@@ -474,9 +492,11 @@ handle_fildes_io (void *arg)
aiocbp->aiocb64.aio_nbytes)); aiocbp->aiocb64.aio_nbytes));
} }
else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC) else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC)
aiocbp->aiocb.__return_value = TEMP_FAILURE_RETRY (fdatasync (fildes)); aiocbp->aiocb.__return_value =
TEMP_FAILURE_RETRY (fdatasync (fildes));
else if (aiocbp->aiocb.aio_lio_opcode == LIO_SYNC) else if (aiocbp->aiocb.aio_lio_opcode == LIO_SYNC)
aiocbp->aiocb.__return_value = TEMP_FAILURE_RETRY (fsync (fildes)); aiocbp->aiocb.__return_value =
TEMP_FAILURE_RETRY (fsync (fildes));
else else
{ {
/* This is an invalid opcode. */ /* This is an invalid opcode. */
@@ -519,38 +539,75 @@ handle_fildes_io (void *arg)
runp->last_fd->next_fd = runp->next_prio; runp->last_fd->next_fd = runp->next_prio;
else else
requests = runp->next_prio; requests = runp->next_prio;
add_request_to_runlist (runp->next_prio);
} }
/* Free the old element. */ /* Free the old element. */
__aio_free_request (runp); __aio_free_request (runp);
}
runp = runlist; runp = runlist;
if (runp != NULL)
{
/* We must not run requests which are not marked `running'. */
if (runp->running == yes)
runlist = runp->next_run;
else
{
struct requestlist *old;
do /* If the runlist is empty, then we sleep for a while, waiting for
something to arrive in it. */
if (runp == NULL && optim.aio_idle_time >= 0)
{ {
old = runp; struct timeval now;
runp = runp->next_run; struct timespec wakeup_time;
}
while (runp != NULL && runp->running != yes);
if (runp != NULL) ++idle_thread_count;
old->next_run = runp->next_run; gettimeofday (&now, NULL);
wakeup_time.tv_sec = now.tv_sec + optim.aio_idle_time;
wakeup_time.tv_nsec = now.tv_usec * 1000;
if (wakeup_time.tv_nsec > 1000000000)
{
wakeup_time.tv_nsec -= 1000000000;
++wakeup_time.tv_sec;
} }
pthread_cond_timedwait (&__aio_new_request_notification,
&__aio_requests_mutex,
&wakeup_time);
--idle_thread_count;
runp = runlist;
} }
/* If no request to work on we will stop the thread. */
if (runp == NULL) if (runp == NULL)
--nthreads; --nthreads;
else else
{
assert (runp->running == yes);
runp->running = allocated; runp->running = allocated;
runlist = runp->next_run;
/* If we have a request to process, and there's still another in
the run list, then we need to either wake up or create a new
thread to service the request that is still in the run list. */
if (runlist != NULL)
{
/* There are at least two items in the work queue to work on.
If there are other idle threads, then we should wake them
up for these other work elements; otherwise, we should try
to create a new thread. */
if (idle_thread_count > 0)
pthread_cond_signal (&__aio_new_request_notification);
else if (nthreads < optim.aio_threads)
{
pthread_t thid;
pthread_attr_t attr;
/* Make sure the thread is created detached. */
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
/* Now try to start a thread. If we fail, no big deal,
because we know that there is at least one thread (us)
that is working on AIO operations. */
if (pthread_create (&thid, &attr, handle_fildes_io, NULL)
== 0)
++nthreads;
}
}
}
/* Release the mutex. */ /* Release the mutex. */
pthread_mutex_unlock (&__aio_requests_mutex); pthread_mutex_unlock (&__aio_requests_mutex);
@@ -577,5 +634,33 @@ free_res (void)
free (pool); free (pool);
} }
text_set_element (__libc_subfreeres, free_res); text_set_element (__libc_subfreeres, free_res);
/* Add newrequest to the runlist. The __abs_prio flag of newrequest must
be correctly set to do this. Also, you had better set newrequest's
"running" flag to "yes" before you release your lock or you'll throw an
assertion. */
static void
add_request_to_runlist (struct requestlist *newrequest)
{
int prio = newrequest->aiocbp->aiocb.__abs_prio;
struct requestlist *runp;
if (runlist == NULL || runlist->aiocbp->aiocb.__abs_prio < prio)
{
newrequest->next_run = runlist;
runlist = newrequest;
}
else
{
runp = runlist;
while (runp->next_run != NULL
&& runp->next_run->aiocbp->aiocb.__abs_prio >= prio)
runp = runp->next_run;
newrequest->next_run = runp->next_run;
runp->next_run = newrequest;
}
}