summaryrefslogtreecommitdiffstats
path: root/source4/lib
diff options
context:
space:
mode:
Diffstat (limited to 'source4/lib')
-rw-r--r--source4/lib/events/config.m419
-rw-r--r--source4/lib/events/config.mk26
-rw-r--r--source4/lib/events/events.c98
-rw-r--r--source4/lib/events/events.h13
-rw-r--r--source4/lib/events/events_aio.c259
-rw-r--r--source4/lib/events/events_epoll.c407
-rw-r--r--source4/lib/events/events_internal.h23
-rw-r--r--source4/lib/events/events_select.c289
-rw-r--r--source4/lib/events/events_standard.c132
-rw-r--r--source4/lib/events/events_timed.c136
10 files changed, 1137 insertions, 265 deletions
diff --git a/source4/lib/events/config.m4 b/source4/lib/events/config.m4
index 6e4095d5b8d..8c61f7ed428 100644
--- a/source4/lib/events/config.m4
+++ b/source4/lib/events/config.m4
@@ -1,2 +1,19 @@
AC_CHECK_HEADERS(sys/epoll.h)
-AC_CHECK_FUNCS(epoll_create)
+
+# check for native Linux AIO interface
+SMB_ENABLE(EVENTS_AIO, NO)
+AC_CHECK_HEADERS(libaio.h)
+AC_CHECK_LIB_EXT(aio, AIO_LIBS, io_getevents)
+if test x"$ac_cv_header_libaio_h" = x"yes" -a x"$ac_cv_lib_ext_aio_io_getevents" = x"yes";then
+ SMB_ENABLE(EVENTS_AIO,YES)
+ AC_DEFINE(HAVE_LINUX_AIO, 1, [Whether Linux AIO is available])
+fi
+SMB_EXT_LIB(LIBAIO_LINUX, $AIO_LIBS)
+
+# check for native Linux AIO interface
+SMB_ENABLE(EVENTS_EPOLL, NO)
+AC_CHECK_HEADERS(sys/epoll.h)
+if test x"$ac_cv_header_sys_epoll_h" = x"yes";then
+ SMB_ENABLE(EVENTS_EPOLL,YES)
+fi
+
diff --git a/source4/lib/events/config.mk b/source4/lib/events/config.mk
index 5f770b971b5..5e871cb0ac7 100644
--- a/source4/lib/events/config.mk
+++ b/source4/lib/events/config.mk
@@ -1,7 +1,29 @@
##############################
+[MODULE::EVENTS_EPOLL]
+OBJ_FILES = events_epoll.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = events_epoll_init
+##############################
+
+##############################
+[MODULE::EVENTS_SELECT]
+OBJ_FILES = events_select.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = events_select_init
+##############################
+
+##############################
+[MODULE::EVENTS_STANDARD]
+OBJ_FILES = events_standard.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = events_standard_init
+##############################
+
+
+##############################
# Start SUBSYSTEM LIBEVENTS
[SUBSYSTEM::LIBEVENTS]
-OBJ_FILES = events.o events_standard.o
-PUBLIC_DEPENDENCIES = LIBTALLOC
+OBJ_FILES = events.o events_timed.o
+PUBLIC_DEPENDENCIES = LIBTALLOC EVENTS_STANDARD EVENTS_EPOLL
# End SUBSYSTEM LIBEVENTS
##############################
diff --git a/source4/lib/events/events.c b/source4/lib/events/events.c
index 585fcb3112a..52b431befae 100644
--- a/source4/lib/events/events.c
+++ b/source4/lib/events/events.c
@@ -57,6 +57,63 @@
#include "includes.h"
#include "lib/events/events.h"
#include "lib/events/events_internal.h"
+#include "lib/util/dlinklist.h"
+#include "build.h"
+
+struct event_ops_list {
+ struct event_ops_list *next, *prev;
+ const char *name;
+ const struct event_ops *ops;
+};
+
+/* list of registered event backends */
+static struct event_ops_list *event_backends;
+
+/*
+ register an events backend
+*/
+NTSTATUS event_register_backend(const char *name, const struct event_ops *ops)
+{
+ struct event_ops_list *e;
+ e = talloc(talloc_autofree_context(), struct event_ops_list);
+ NT_STATUS_HAVE_NO_MEMORY(e);
+ e->name = name;
+ e->ops = ops;
+ DLIST_ADD(event_backends, e);
+ return NT_STATUS_OK;
+}
+
+/*
+ initialise backends if not already done
+*/
+static void event_backend_init(void)
+{
+ init_module_fn static_init[] = STATIC_LIBEVENTS_MODULES;
+ init_module_fn *shared_init;
+ if (event_backends) return;
+ shared_init = load_samba_modules(NULL, "LIBEVENTS");
+ run_init_functions(static_init);
+ run_init_functions(shared_init);
+}
+
+/*
+ list available backends
+*/
+const char **event_backend_list(TALLOC_CTX *mem_ctx)
+{
+ const char **list = NULL;
+ struct event_ops_list *e;
+
+ event_backend_init();
+
+ for (e=event_backends;e;e=e->next) {
+ list = str_list_add(list, e->name);
+ }
+
+ talloc_steal(mem_ctx, list);
+
+ return list;
+}
/*
create a event_context structure for a specific implemementation.
@@ -69,7 +126,8 @@
NOTE: use event_context_init() inside of samba!
*/
-struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx, const struct event_ops *ops, void *private_data)
+static struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx,
+ const struct event_ops *ops)
{
struct event_context *ev;
int ret;
@@ -79,7 +137,7 @@ struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx, const struct e
ev->ops = ops;
- ret = ev->ops->context_init(ev, private_data);
+ ret = ev->ops->context_init(ev);
if (ret != 0) {
talloc_free(ev);
return NULL;
@@ -93,10 +151,29 @@ struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx, const struct e
call, and all subsequent calls pass this event_context as the first
element. Event handlers also receive this as their first argument.
*/
+struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char *name)
+{
+ struct event_ops_list *e;
+
+ event_backend_init();
+
+ for (e=event_backends;e;e=e->next) {
+ if (strcmp(name, e->name) == 0) {
+ return event_context_init_ops(mem_ctx, e->ops);
+ }
+ }
+ return NULL;
+}
+
+
+/*
+ create a event_context structure. This must be the first events
+ call, and all subsequent calls pass this event_context as the first
+ element. Event handlers also receive this as their first argument.
+*/
struct event_context *event_context_init(TALLOC_CTX *mem_ctx)
{
- const struct event_ops *ops = event_standard_get_ops();
- return event_context_init_ops(mem_ctx, ops, NULL);
+ return event_context_init_byname(mem_ctx, "standard");
}
/*
@@ -111,6 +188,19 @@ struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
}
/*
+ add a disk aio event
+*/
+struct aio_event *event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data)
+{
+ if (ev->ops->add_aio == NULL) return NULL;
+ return ev->ops->add_aio(ev, mem_ctx, iocb, handler, private_data);
+}
+
+/*
return the fd event flags
*/
uint16_t event_get_fd_flags(struct fd_event *fde)
diff --git a/source4/lib/events/events.h b/source4/lib/events/events.h
index 8c1b7cda214..62f1d8e9df5 100644
--- a/source4/lib/events/events.h
+++ b/source4/lib/events/events.h
@@ -27,15 +27,19 @@ struct event_context;
struct event_ops;
struct fd_event;
struct timed_event;
+struct aio_event;
/* event handler types */
typedef void (*event_fd_handler_t)(struct event_context *, struct fd_event *,
uint16_t , void *);
typedef void (*event_timed_handler_t)(struct event_context *, struct timed_event *,
struct timeval , void *);
+typedef void (*event_aio_handler_t)(struct event_context *, struct aio_event *,
+ int , void *);
struct event_context *event_context_init(TALLOC_CTX *mem_ctx);
-struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx, const struct event_ops *ops, void *private_data);
+struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char *name);
+const char **event_backend_list(TALLOC_CTX *mem_ctx);
struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
int fd, uint16_t flags, event_fd_handler_t handler,
@@ -46,6 +50,13 @@ struct timed_event *event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ct
event_timed_handler_t handler,
void *private);
+struct iocb;
+struct aio_event *event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private);
+
int event_loop_once(struct event_context *ev);
int event_loop_wait(struct event_context *ev);
diff --git a/source4/lib/events/events_aio.c b/source4/lib/events/events_aio.c
index 97305fdeb3f..1c2735c2002 100644
--- a/source4/lib/events/events_aio.c
+++ b/source4/lib/events/events_aio.c
@@ -39,8 +39,10 @@
#include "lib/events/events_internal.h"
#include <libaio.h>
-#define MAX_AIO_QUEUE_DEPTH 10
+#define MAX_AIO_QUEUE_DEPTH 100
+#ifndef IOCB_CMD_EPOLL_WAIT
#define IOCB_CMD_EPOLL_WAIT 9
+#endif
struct aio_event_context {
/* a pointer back to the generic event_context */
@@ -49,22 +51,24 @@ struct aio_event_context {
/* number of registered fd event handlers */
int num_fd_events;
- /* list of timed events */
- struct timed_event *timed_events;
-
uint32_t destruction_count;
io_context_t ioctx;
- struct io_event events[MAX_AIO_QUEUE_DEPTH];
- struct epoll_event epevent;
+ struct epoll_event epevent[MAX_AIO_QUEUE_DEPTH];
struct iocb *epoll_iocb;
int epoll_fd;
+ int is_epoll_set;
};
-static void aio_event_loop_timer(struct aio_event_context *aio_ev);
+struct aio_event {
+ struct event_context *event_ctx;
+ struct iocb iocb;
+ void *private_data;
+ event_aio_handler_t handler;
+};
/*
map from EVENT_FD_* to EPOLLIN/EPOLLOUT
@@ -82,6 +86,7 @@ static uint32_t epoll_map_flags(uint16_t flags)
*/
static int aio_ctx_destructor(struct aio_event_context *aio_ev)
{
+ io_queue_release(aio_ev->ioctx);
close(aio_ev->epoll_fd);
aio_ev->epoll_fd = -1;
return 0;
@@ -187,20 +192,24 @@ static void epoll_change_event(struct aio_event_context *aio_ev, struct fd_event
static int setup_epoll_wait(struct aio_event_context *aio_ev)
{
- struct io_event r;
-
+ if (aio_ev->is_epoll_set) {
+ return 0;
+ }
memset(aio_ev->epoll_iocb, 0, sizeof(*aio_ev->epoll_iocb));
aio_ev->epoll_iocb->aio_fildes = aio_ev->epoll_fd;
aio_ev->epoll_iocb->aio_lio_opcode = IOCB_CMD_EPOLL_WAIT;
aio_ev->epoll_iocb->aio_reqprio = 0;
- aio_ev->epoll_iocb->u.c.nbytes = 1;
+ aio_ev->epoll_iocb->u.c.nbytes = MAX_AIO_QUEUE_DEPTH;
aio_ev->epoll_iocb->u.c.offset = -1;
- aio_ev->epoll_iocb->u.c.buf = &aio_ev->epevent;
+ aio_ev->epoll_iocb->u.c.buf = aio_ev->epevent;
+ aio_ev->is_epoll_set = 1;
if (io_submit(aio_ev->ioctx, 1, &aio_ev->epoll_iocb) != 1) {
return -1;
}
+
+ return 0;
}
@@ -212,6 +221,7 @@ static int aio_event_loop(struct aio_event_context *aio_ev, struct timeval *tval
int ret, i;
uint32_t destruction_count = aio_ev->destruction_count;
struct timespec timeout;
+ struct io_event events[8];
if (aio_ev->epoll_fd == -1) return -1;
@@ -221,50 +231,74 @@ static int aio_event_loop(struct aio_event_context *aio_ev, struct timeval *tval
timeout.tv_nsec *= 1000;
}
- setup_epoll_wait(aio_ev);
+ if (setup_epoll_wait(aio_ev) < 0)
+ return -1;
- ret = io_getevents(aio_ev->ioctx, 1, MAX_AIO_QUEUE_DEPTH,
- aio_ev->events, tvalp?&timeout:NULL);
+ ret = io_getevents(aio_ev->ioctx, 1, 8,
+ events, tvalp?&timeout:NULL);
if (ret == -EINTR) {
return 0;
}
if (ret == 0 && tvalp) {
- aio_event_loop_timer(aio_ev);
+ common_event_loop_timer(aio_ev->ev);
return 0;
}
for (i=0;i<ret;i++) {
- struct iocb *finished = aio_ev->events[i].obj;
+ struct io_event *event = &events[i];
+ struct iocb *finished = event->obj;
+
switch (finished->aio_lio_opcode) {
+ case IO_CMD_PWRITE:
+ case IO_CMD_PREAD: {
+ struct aio_event *ae = talloc_get_type(finished->data,
+ struct aio_event);
+ if (ae) {
+ talloc_set_destructor(ae, NULL);
+ ae->handler(ae->event_ctx, ae,
+ event->res, ae->private_data);
+ talloc_free(ae);
+ }
+ break;
+ }
case IOCB_CMD_EPOLL_WAIT: {
struct epoll_event *ep = (struct epoll_event *)finished->u.c.buf;
- struct fd_event *fde = talloc_get_type(ep->data.ptr,
- struct fd_event);
+ struct fd_event *fde;
uint16_t flags = 0;
+ int j;
- if (fde == NULL) {
- return -1;
- }
- if (ep->events & (EPOLLHUP|EPOLLERR)) {
- fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
- if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
- epoll_del_event(aio_ev, fde);
- continue;
+// DEBUG(0,("EVENT finished=%p fde=%p ep=%p\n", finished, fde, ep));
+ //printf("GOT %d aio epoll events\n", event->res);
+
+ aio_ev->is_epoll_set = 0;
+
+ for (j=0; j<event->res; j++, ep++) {
+ fde = talloc_get_type(ep->data.ptr,
+ struct fd_event);
+ if (fde == NULL) {
+ return -1;
}
- flags |= EVENT_FD_READ;
- }
- if (ep->events & EPOLLIN) flags |= EVENT_FD_READ;
- if (ep->events & EPOLLOUT) flags |= EVENT_FD_WRITE;
- if (flags) {
- fde->handler(aio_ev->ev, fde, flags, fde->private_data);
- if (destruction_count != aio_ev->destruction_count) {
- return 0;
+ if (ep->events & (EPOLLHUP|EPOLLERR)) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
+ epoll_del_event(aio_ev, fde);
+ continue;
+ }
+ flags |= EVENT_FD_READ;
+ }
+ if (ep->events & EPOLLIN) flags |= EVENT_FD_READ;
+ if (ep->events & EPOLLOUT) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(aio_ev->ev, fde, flags, fde->private_data);
}
}
break;
}
}
+ if (destruction_count != aio_ev->destruction_count) {
+ return 0;
+ }
}
return 0;
@@ -273,7 +307,7 @@ static int aio_event_loop(struct aio_event_context *aio_ev, struct timeval *tval
/*
create a aio_event_context structure.
*/
-static int aio_event_context_init(struct event_context *ev, void *private_data)
+static int aio_event_context_init(struct event_context *ev)
{
struct aio_event_context *aio_ev;
@@ -373,93 +407,6 @@ static void aio_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
}
/*
- destroy a timed event
-*/
-static int aio_event_timed_destructor(struct timed_event *te)
-{
- struct aio_event_context *aio_ev = talloc_get_type(te->event_ctx->additional_data,
- struct aio_event_context);
- DLIST_REMOVE(aio_ev->timed_events, te);
- return 0;
-}
-
-static int aio_event_timed_deny_destructor(struct timed_event *te)
-{
- return -1;
-}
-
-/*
- add a timed event
- return NULL on failure (memory allocation error)
-*/
-static struct timed_event *aio_event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
- struct timeval next_event,
- event_timed_handler_t handler,
- void *private_data)
-{
- struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
- struct aio_event_context);
- struct timed_event *te, *last_te, *cur_te;
-
- te = talloc(mem_ctx?mem_ctx:ev, struct timed_event);
- if (te == NULL) return NULL;
-
- te->event_ctx = ev;
- te->next_event = next_event;
- te->handler = handler;
- te->private_data = private_data;
- te->additional_data = NULL;
-
- /* keep the list ordered */
- last_te = NULL;
- for (cur_te = aio_ev->timed_events; cur_te; cur_te = cur_te->next) {
- /* if the new event comes before the current one break */
- if (!timeval_is_zero(&cur_te->next_event) &&
- timeval_compare(&te->next_event,
- &cur_te->next_event) < 0) {
- break;
- }
-
- last_te = cur_te;
- }
-
- DLIST_ADD_AFTER(aio_ev->timed_events, te, last_te);
-
- talloc_set_destructor(te, aio_event_timed_destructor);
-
- return te;
-}
-
-/*
- a timer has gone off - call it
-*/
-static void aio_event_loop_timer(struct aio_event_context *aio_ev)
-{
- struct timeval t = timeval_current();
- struct timed_event *te = aio_ev->timed_events;
-
- if (te == NULL) {
- return;
- }
-
- /* deny the handler to free the event */
- talloc_set_destructor(te, aio_event_timed_deny_destructor);
-
- /* We need to remove the timer from the list before calling the
- * handler because in a semi-async inner event loop called from the
- * handler we don't want to come across this event again -- vl */
- DLIST_REMOVE(aio_ev->timed_events, te);
-
- te->handler(aio_ev->ev, te, t, te->private_data);
-
- /* The destructor isn't necessary anymore, we've already removed the
- * event from the list. */
- talloc_set_destructor(te, NULL);
-
- talloc_free(te);
-}
-
-/*
do a single event loop using the events defined in ev
*/
static int aio_event_loop_once(struct event_context *ev)
@@ -468,19 +415,11 @@ static int aio_event_loop_once(struct event_context *ev)
struct aio_event_context);
struct timeval tval;
- /* work out the right timeout for all timed events */
- if (aio_ev->timed_events) {
- struct timeval t = timeval_current();
- tval = timeval_until(&t, &aio_ev->timed_events->next_event);
- if (timeval_is_zero(&tval)) {
- aio_event_loop_timer(aio_ev);
- return 0;
- }
- } else {
- /* have a default tick time of 30 seconds. This guarantees
- that code that uses its own timeout checking will be
- able to proceeed eventually */
- tval = timeval_set(30, 0);
+ tval = common_event_loop_delay(ev);
+
+ if (timeval_is_zero(&tval)) {
+ common_event_loop_timer(ev);
+ return 0;
}
return aio_event_loop(aio_ev, &tval);
@@ -502,17 +441,61 @@ static int aio_event_loop_wait(struct event_context *ev)
return 0;
}
+/*
+ called when a disk IO event needs to be cancelled
+*/
+static int aio_destructor(struct aio_event *ae)
+{
+ struct event_context *ev = ae->event_ctx;
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct io_event result;
+ io_cancel(aio_ev->ioctx, &ae->iocb, &result);
+ /* TODO: handle errors from io_cancel()! */
+ return 0;
+}
+
+/* submit an aio disk IO event */
+static struct aio_event *aio_event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data)
+{
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct iocb *iocbp;
+ struct aio_event *ae = talloc(mem_ctx?mem_ctx:ev, struct aio_event);
+ if (ae == NULL) return NULL;
+
+ ae->event_ctx = ev;
+ ae->iocb = *iocb;
+ ae->handler = handler;
+ ae->private_data = private_data;
+ iocbp = &ae->iocb;
+
+ if (io_submit(aio_ev->ioctx, 1, &iocbp) != 1) {
+ talloc_free(ae);
+ return NULL;
+ }
+ ae->iocb.data = ae;
+ talloc_set_destructor(ae, aio_destructor);
+
+ return ae;
+}
+
static const struct event_ops aio_event_ops = {
.context_init = aio_event_context_init,
.add_fd = aio_event_add_fd,
+ .add_aio = aio_event_add_aio,
.get_fd_flags = aio_event_get_fd_flags,
.set_fd_flags = aio_event_set_fd_flags,
- .add_timed = aio_event_add_timed,
+ .add_timed = common_event_add_timed,
.loop_once = aio_event_loop_once,
.loop_wait = aio_event_loop_wait,
};
-const struct event_ops *event_aio_get_ops(void)
+NTSTATUS events_aio_init(void)
{
- return &aio_event_ops;
+ return event_register_backend("aio", &aio_event_ops);
}
diff --git a/source4/lib/events/events_epoll.c b/source4/lib/events/events_epoll.c
new file mode 100644
index 00000000000..cd894d8dff1
--- /dev/null
+++ b/source4/lib/events/events_epoll.c
@@ -0,0 +1,407 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ main select loop and event handling - epoll implementation
+
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h" /* needed for WITH_EPOLL */
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+struct epoll_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* number of registered fd event handlers */
+ int num_fd_events;
+
+ /* this is changed by the destructors for the fd event
+ type. It is used to detect event destruction by event
+ handlers, which means the code that is calling the event
+ handler needs to assume that the linked list is no longer
+ valid
+ */
+ uint32_t destruction_count;
+
+ /* when using epoll this is the handle from epoll_create */
+ int epoll_fd;
+};
+
+/*
+ called when a epoll call fails, and we should fallback
+ to using select
+*/
+static void epoll_fallback_to_select(struct epoll_event_context *epoll_ev, const char *reason)
+{
+ DEBUG(0,("%s (%s) - falling back to select()\n", reason, strerror(errno)));
+ close(epoll_ev->epoll_fd);
+ epoll_ev->epoll_fd = -1;
+ talloc_set_destructor(epoll_ev, NULL);
+}
+
+/*
+ map from EVENT_FD_* to EPOLLIN/EPOLLOUT
+*/
+static uint32_t epoll_map_flags(uint16_t flags)
+{
+ uint32_t ret = 0;
+ if (flags & EVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
+ if (flags & EVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
+ return ret;
+}
+
+/*
+ free the epoll fd
+*/
+static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
+{
+ close(epoll_ev->epoll_fd);
+ epoll_ev->epoll_fd = -1;
+ return 0;
+}
+
+/*
+ init the epoll fd
+*/
+static void epoll_init_ctx(struct epoll_event_context *epoll_ev)
+{
+ epoll_ev->epoll_fd = epoll_create(64);
+ talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
+}
+
+#define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
+#define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
+#define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
+
+/*
+ add the epoll event to the given fd_event
+*/
+static void epoll_add_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if we don't want events yet, don't add an epoll_event */
+ if (fde->flags == 0) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(epoll_ev, "EPOLL_CTL_ADD failed");
+ }
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+/*
+ delete the epoll event for given fd_event
+*/
+static void epoll_del_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if there's no epoll_event, we don't need to delete it */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+}
+
+/*
+ change the epoll event to the given fd_event
+*/
+static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(epoll_ev, "EPOLL_CTL_MOD failed");
+ }
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+static void epoll_change_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ BOOL got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
+ BOOL want_read = (fde->flags & EVENT_FD_READ);
+ BOOL want_write= (fde->flags & EVENT_FD_WRITE);
+
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* there's already an event */
+ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
+ if (want_read || (want_write && !got_error)) {
+ epoll_mod_event(epoll_ev, fde);
+ return;
+ }
+ /*
+ * if we want to match the select behavior, we need to remove the epoll_event
+ * when the caller isn't interested in events.
+ *
+ * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
+ */
+ epoll_del_event(epoll_ev, fde);
+ return;
+ }
+
+ /* there's no epoll_event attached to the fde */
+ if (want_read || (want_write && !got_error)) {
+ epoll_add_event(epoll_ev, fde);
+ return;
+ }
+}
+
+/*
+ event loop handling using epoll
+*/
+static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
+{
+ int ret, i;
+#define MAXEVENTS 8
+ struct epoll_event events[MAXEVENTS];
+ uint32_t destruction_count = epoll_ev->destruction_count;
+ int timeout = -1;
+
+ if (epoll_ev->epoll_fd == -1) return -1;
+
+ if (tvalp) {
+ /* it's better to trigger timed events a bit later than to early */
+ timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
+ }
+
+ ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
+
+ if (ret == -1 && errno != EINTR) {
+ epoll_fallback_to_select(epoll_ev, "epoll_wait() failed");
+ return -1;
+ }
+
+ if (ret == 0 && tvalp) {
+ common_event_loop_timer(epoll_ev->ev);
+ return 0;
+ }
+
+ for (i=0;i<ret;i++) {
+ struct fd_event *fde = talloc_get_type(events[i].data.ptr,
+ struct fd_event);
+ uint16_t flags = 0;
+
+ if (fde == NULL) {
+ epoll_fallback_to_select(epoll_ev, "epoll_wait() gave bad data");
+ return -1;
+ }
+ if (events[i].events & (EPOLLHUP|EPOLLERR)) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
+ /*
+ * if we only wait for EVENT_FD_WRITE, we should not tell the
+ * event handler about it, and remove the epoll_event,
+ * as we only report errors when waiting for read events,
+ * to match the select() behavior
+ */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
+ epoll_del_event(epoll_ev, fde);
+ continue;
+ }
+ flags |= EVENT_FD_READ;
+ }
+ if (events[i].events & EPOLLIN) flags |= EVENT_FD_READ;
+ if (events[i].events & EPOLLOUT) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != epoll_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ create a epoll_event_context structure.
+*/
+static int epoll_event_context_init(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev;
+
+ epoll_ev = talloc_zero(ev, struct epoll_event_context);
+ if (!epoll_ev) return -1;
+ epoll_ev->ev = ev;
+ epoll_ev->epoll_fd = -1;
+
+ epoll_init_ctx(epoll_ev);
+
+ ev->additional_data = epoll_ev;
+ return 0;
+}
+
+/*
+ destroy an fd_event
+*/
+static int epoll_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+
+ epoll_ev->num_fd_events--;
+ epoll_ev->destruction_count++;
+
+ epoll_del_event(epoll_ev, fde);
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *epoll_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ struct fd_event *fde;
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ epoll_ev->num_fd_events++;
+ talloc_set_destructor(fde, epoll_event_fd_destructor);
+
+ epoll_add_event(epoll_ev, fde);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t epoll_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void epoll_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct epoll_event_context *epoll_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
+
+ fde->flags = flags;
+
+ epoll_change_event(epoll_ev, fde);
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int epoll_event_loop_once(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_delay(ev);
+
+ if (timeval_is_zero(&tval)) {
+ common_event_loop_timer(ev);
+ return 0;
+ }
+
+ return epoll_event_loop(epoll_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int epoll_event_loop_wait(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ while (epoll_ev->num_fd_events) {
+ if (epoll_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct event_ops epoll_event_ops = {
+ .context_init = epoll_event_context_init,
+ .add_fd = epoll_event_add_fd,
+ .get_fd_flags = epoll_event_get_fd_flags,
+ .set_fd_flags = epoll_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .loop_once = epoll_event_loop_once,
+ .loop_wait = epoll_event_loop_wait,
+};
+
+NTSTATUS events_epoll_init(void)
+{
+ return event_register_backend("epoll", &epoll_event_ops);
+}
diff --git a/source4/lib/events/events_internal.h b/source4/lib/events/events_internal.h
index de4e5f6a5af..dc321769e0a 100644
--- a/source4/lib/events/events_internal.h
+++ b/source4/lib/events/events_internal.h
@@ -24,7 +24,7 @@
struct event_ops {
/* conntext init */
- int (*context_init)(struct event_context *ev, void *private_data);
+ int (*context_init)(struct event_context *ev);
/* fd_event functions */
struct fd_event *(*add_fd)(struct event_context *ev,
@@ -41,6 +41,12 @@ struct event_ops {
struct timeval next_event,
event_timed_handler_t handler,
void *private_data);
+ /* disk aio event functions */
+ struct aio_event *(*add_aio)(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data);
/* loop functions */
int (*loop_once)(struct event_context *ev);
@@ -71,11 +77,24 @@ struct timed_event {
void *additional_data;
};
+/* aio event is private to the aio backend */
+struct aio_event;
+
struct event_context {
/* the specific events implementation */
const struct event_ops *ops;
+
+ /* list of timed events - used by common code */
+ struct timed_event *timed_events;
+
/* this is private for the events_ops implementation */
void *additional_data;
};
-const struct event_ops *event_standard_get_ops(void);
+
+NTSTATUS event_register_backend(const char *name, const struct event_ops *ops);
+
+struct timed_event *common_event_add_timed(struct event_context *, TALLOC_CTX *,
+ struct timeval, event_timed_handler_t, void *);
+void common_event_loop_timer(struct event_context *);
+struct timeval common_event_loop_delay(struct event_context *);
diff --git a/source4/lib/events/events_select.c b/source4/lib/events/events_select.c
new file mode 100644
index 00000000000..5f4ecba5ca1
--- /dev/null
+++ b/source4/lib/events/events_select.c
@@ -0,0 +1,289 @@
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/*
+ This is SAMBA's default event loop code
+
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+struct select_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* list of filedescriptor events */
+ struct fd_event *fd_events;
+
+ /* list of timed events */
+ struct timed_event *timed_events;
+
+ /* the maximum file descriptor number in fd_events */
+ int maxfd;
+
+ /* information for exiting from the event loop */
+ int exit_code;
+
+ /* this is changed by the destructors for the fd event
+ type. It is used to detect event destruction by event
+ handlers, which means the code that is calling the event
+ handler needs to assume that the linked list is no longer
+ valid
+ */
+ uint32_t destruction_count;
+};
+
+/*
+ create a select_event_context structure.
+*/
+static int select_event_context_init(struct event_context *ev)
+{
+ struct select_event_context *select_ev;
+
+ select_ev = talloc_zero(ev, struct select_event_context);
+ if (!select_ev) return -1;
+ select_ev->ev = ev;
+
+ ev->additional_data = select_ev;
+ return 0;
+}
+
+/*
+ recalculate the maxfd
+*/
+static void calc_maxfd(struct select_event_context *select_ev)
+{
+ struct fd_event *fde;
+
+ select_ev->maxfd = 0;
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ if (fde->fd > select_ev->maxfd) {
+ select_ev->maxfd = fde->fd;
+ }
+ }
+}
+
+
+/* to mark the ev->maxfd invalid
+ * this means we need to recalculate it
+ */
+#define EVENT_INVALID_MAXFD (-1)
+
+/*
+ destroy an fd_event
+*/
+static int select_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+
+ if (select_ev->maxfd == fde->fd) {
+ select_ev->maxfd = EVENT_INVALID_MAXFD;
+ }
+
+ DLIST_REMOVE(select_ev->fd_events, fde);
+ select_ev->destruction_count++;
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *select_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ struct fd_event *fde;
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ DLIST_ADD(select_ev->fd_events, fde);
+ if (fde->fd > select_ev->maxfd) {
+ select_ev->maxfd = fde->fd;
+ }
+ talloc_set_destructor(fde, select_event_fd_destructor);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t select_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void select_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct select_event_context *select_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ select_ev = talloc_get_type(ev->additional_data, struct select_event_context);
+
+ fde->flags = flags;
+}
+
+/*
+ event loop handling using select()
+*/
+static int select_event_loop_select(struct select_event_context *select_ev, struct timeval *tvalp)
+{
+ fd_set r_fds, w_fds;
+ struct fd_event *fde;
+ int selrtn;
+ uint32_t destruction_count = select_ev->destruction_count;
+
+ /* we maybe need to recalculate the maxfd */
+ if (select_ev->maxfd == EVENT_INVALID_MAXFD) {
+ calc_maxfd(select_ev);
+ }
+
+ FD_ZERO(&r_fds);
+ FD_ZERO(&w_fds);
+
+ /* setup any fd events */
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ if (fde->flags & EVENT_FD_READ) {
+ FD_SET(fde->fd, &r_fds);
+ }
+ if (fde->flags & EVENT_FD_WRITE) {
+ FD_SET(fde->fd, &w_fds);
+ }
+ }
+
+ selrtn = select(select_ev->maxfd+1, &r_fds, &w_fds, NULL, tvalp);
+
+ if (selrtn == -1 && errno == EBADF) {
+ /* the socket is dead! this should never
+ happen as the socket should have first been
+ made readable and that should have removed
+ the event, so this must be a bug. This is a
+ fatal error. */
+ DEBUG(0,("ERROR: EBADF on select_event_loop_once\n"));
+ select_ev->exit_code = EBADF;
+ return -1;
+ }
+
+ if (selrtn == 0 && tvalp) {
+ common_event_loop_timer(select_ev->ev);
+ return 0;
+ }
+
+ if (selrtn > 0) {
+ /* at least one file descriptor is ready - check
+ which ones and call the handler, being careful to allow
+ the handler to remove itself when called */
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ uint16_t flags = 0;
+
+ if (FD_ISSET(fde->fd, &r_fds)) flags |= EVENT_FD_READ;
+ if (FD_ISSET(fde->fd, &w_fds)) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(select_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != select_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int select_event_loop_once(struct event_context *ev)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_delay(ev);
+
+ if (timeval_is_zero(&tval)) {
+ common_event_loop_timer(ev);
+ return 0;
+ }
+
+ return select_event_loop_select(select_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int select_event_loop_wait(struct event_context *ev)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ select_ev->exit_code = 0;
+
+ while (select_ev->fd_events && select_ev->exit_code == 0) {
+ if (select_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return select_ev->exit_code;
+}
+
+static const struct event_ops select_event_ops = {
+ .context_init = select_event_context_init,
+ .add_fd = select_event_add_fd,
+ .get_fd_flags = select_event_get_fd_flags,
+ .set_fd_flags = select_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .loop_once = select_event_loop_once,
+ .loop_wait = select_event_loop_wait,
+};
+
+NTSTATUS events_select_init(void)
+{
+ return event_register_backend("select", &select_event_ops);
+}
diff --git a/source4/lib/events/events_standard.c b/source4/lib/events/events_standard.c
index 12595d8518c..b0f5259fa7e 100644
--- a/source4/lib/events/events_standard.c
+++ b/source4/lib/events/events_standard.c
@@ -42,9 +42,6 @@ struct std_event_context {
/* list of filedescriptor events */
struct fd_event *fd_events;
- /* list of timed events */
- struct timed_event *timed_events;
-
/* the maximum file descriptor number in fd_events */
int maxfd;
@@ -63,8 +60,6 @@ struct std_event_context {
int epoll_fd;
};
-static void std_event_loop_timer(struct std_event_context *std_ev);
-
/* use epoll if it is available */
#if WITH_EPOLL
/*
@@ -103,9 +98,8 @@ static int epoll_ctx_destructor(struct std_event_context *std_ev)
/*
init the epoll fd
*/
-static void epoll_init_ctx(struct std_event_context *std_ev, BOOL try_epoll)
+static void epoll_init_ctx(struct std_event_context *std_ev)
{
- if (!try_epoll) return;
std_ev->epoll_fd = epoll_create(64);
talloc_set_destructor(std_ev, epoll_ctx_destructor);
}
@@ -243,7 +237,7 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
}
if (ret == 0 && tvalp) {
- std_event_loop_timer(std_ev);
+ common_event_loop_timer(std_ev->ev);
return 0;
}
@@ -283,7 +277,7 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
return 0;
}
#else
-#define epoll_init_ctx(std_ev,try_epoll) if (try_epoll) {/* fix unused variable warning*/}
+#define epoll_init_ctx(std_ev)
#define epoll_add_event(std_ev,fde)
#define epoll_del_event(std_ev,fde)
#define epoll_change_event(std_ev,fde)
@@ -293,18 +287,16 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
/*
create a std_event_context structure.
*/
-static int std_event_context_init(struct event_context *ev, void *private_data)
+static int std_event_context_init(struct event_context *ev)
{
struct std_event_context *std_ev;
- BOOL *_try_epoll = private_data;
- BOOL try_epoll = (_try_epoll == NULL ? True : *_try_epoll);
std_ev = talloc_zero(ev, struct std_event_context);
if (!std_ev) return -1;
std_ev->ev = ev;
std_ev->epoll_fd = -1;
- epoll_init_ctx(std_ev, try_epoll);
+ epoll_init_ctx(std_ev);
ev->additional_data = std_ev;
return 0;
@@ -415,93 +407,6 @@ static void std_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
}
/*
- destroy a timed event
-*/
-static int std_event_timed_destructor(struct timed_event *te)
-{
- struct std_event_context *std_ev = talloc_get_type(te->event_ctx->additional_data,
- struct std_event_context);
- DLIST_REMOVE(std_ev->timed_events, te);
- return 0;
-}
-
-static int std_event_timed_deny_destructor(struct timed_event *te)
-{
- return -1;
-}
-
-/*
- add a timed event
- return NULL on failure (memory allocation error)
-*/
-static struct timed_event *std_event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
- struct timeval next_event,
- event_timed_handler_t handler,
- void *private_data)
-{
- struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
- struct std_event_context);
- struct timed_event *te, *last_te, *cur_te;
-
- te = talloc(mem_ctx?mem_ctx:ev, struct timed_event);
- if (te == NULL) return NULL;
-
- te->event_ctx = ev;
- te->next_event = next_event;
- te->handler = handler;
- te->private_data = private_data;
- te->additional_data = NULL;
-
- /* keep the list ordered */
- last_te = NULL;
- for (cur_te = std_ev->timed_events; cur_te; cur_te = cur_te->next) {
- /* if the new event comes before the current one break */
- if (!timeval_is_zero(&cur_te->next_event) &&
- timeval_compare(&te->next_event,
- &cur_te->next_event) < 0) {
- break;
- }
-
- last_te = cur_te;
- }
-
- DLIST_ADD_AFTER(std_ev->timed_events, te, last_te);
-
- talloc_set_destructor(te, std_event_timed_destructor);
-
- return te;
-}
-
-/*
- a timer has gone off - call it
-*/
-static void std_event_loop_timer(struct std_event_context *std_ev)
-{
- struct timeval t = timeval_current();
- struct timed_event *te = std_ev->timed_events;
-
- if (te == NULL) {
- return;
- }
-
- /* deny the handler to free the event */
- talloc_set_destructor(te, std_event_timed_deny_destructor);
-
- /* We need to remove the timer from the list before calling the
- * handler because in a semi-async inner event loop called from the
- * handler we don't want to come across this event again -- vl */
- DLIST_REMOVE(std_ev->timed_events, te);
-
- te->handler(std_ev->ev, te, t, te->private_data);
-
- /* The destructor isn't necessary anymore, we've already removed the
- * event from the list. */
- talloc_set_destructor(te, NULL);
-
- talloc_free(te);
-}
-
-/*
event loop handling using select()
*/
static int std_event_loop_select(struct std_event_context *std_ev, struct timeval *tvalp)
@@ -543,7 +448,7 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
}
if (selrtn == 0 && tvalp) {
- std_event_loop_timer(std_ev);
+ common_event_loop_timer(std_ev->ev);
return 0;
}
@@ -577,19 +482,11 @@ static int std_event_loop_once(struct event_context *ev)
struct std_event_context);
struct timeval tval;
- /* work out the right timeout for all timed events */
- if (std_ev->timed_events) {
- struct timeval t = timeval_current();
- tval = timeval_until(&t, &std_ev->timed_events->next_event);
- if (timeval_is_zero(&tval)) {
- std_event_loop_timer(std_ev);
- return 0;
- }
- } else {
- /* have a default tick time of 30 seconds. This guarantees
- that code that uses its own timeout checking will be
- able to proceeed eventually */
- tval = timeval_set(30, 0);
+ tval = common_event_loop_delay(ev);
+
+ if (timeval_is_zero(&tval)) {
+ common_event_loop_timer(ev);
+ return 0;
}
if (epoll_event_loop(std_ev, &tval) == 0) {
@@ -622,12 +519,13 @@ static const struct event_ops std_event_ops = {
.add_fd = std_event_add_fd,
.get_fd_flags = std_event_get_fd_flags,
.set_fd_flags = std_event_set_fd_flags,
- .add_timed = std_event_add_timed,
+ .add_timed = common_event_add_timed,
.loop_once = std_event_loop_once,
.loop_wait = std_event_loop_wait,
};
-const struct event_ops *event_standard_get_ops(void)
+
+NTSTATUS events_standard_init(void)
{
- return &std_event_ops;
+ return event_register_backend("standard", &std_event_ops);
}
diff --git a/source4/lib/events/events_timed.c b/source4/lib/events/events_timed.c
new file mode 100644
index 00000000000..6b0919d0f17
--- /dev/null
+++ b/source4/lib/events/events_timed.c
@@ -0,0 +1,136 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ common events code for timed events
+
+ Copyright (C) Andrew Tridgell 2003-2006
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+/*
+ destroy a timed event
+*/
+static int common_event_timed_destructor(struct timed_event *te)
+{
+ struct event_context *ev = talloc_get_type(te->event_ctx->additional_data,
+ struct event_context);
+ DLIST_REMOVE(ev->timed_events, te);
+ return 0;
+}
+
+static int common_event_timed_deny_destructor(struct timed_event *te)
+{
+ return -1;
+}
+
+/*
+ add a timed event
+ return NULL on failure (memory allocation error)
+*/
+struct timed_event *common_event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private_data)
+{
+ struct timed_event *te, *last_te, *cur_te;
+
+ te = talloc(mem_ctx?mem_ctx:ev, struct timed_event);
+ if (te == NULL) return NULL;
+
+ te->event_ctx = ev;
+ te->next_event = next_event;
+ te->handler = handler;
+ te->private_data = private_data;
+ te->additional_data = NULL;
+
+ /* keep the list ordered */
+ last_te = NULL;
+ for (cur_te = ev->timed_events; cur_te; cur_te = cur_te->next) {
+ /* if the new event comes before the current one break */
+ if (!timeval_is_zero(&cur_te->next_event) &&
+ timeval_compare(&te->next_event,
+ &cur_te->next_event) < 0) {
+ break;
+ }
+
+ last_te = cur_te;
+ }
+
+ DLIST_ADD_AFTER(ev->timed_events, te, last_te);
+
+ talloc_set_destructor(te, common_event_timed_destructor);
+
+ return te;
+}
+
+/*
+ a timer has gone off - call it
+*/
+void common_event_loop_timer(struct event_context *ev)
+{
+ struct timeval t = timeval_current();
+ struct timed_event *te = ev->timed_events;
+
+ if (te == NULL) {
+ return;
+ }
+
+ /* deny the handler to free the event */
+ talloc_set_destructor(te, common_event_timed_deny_destructor);
+
+ /* We need to remove the timer from the list before calling the
+ * handler because in a semi-async inner event loop called from the
+ * handler we don't want to come across this event again -- vl */
+ DLIST_REMOVE(ev->timed_events, te);
+
+ te->handler(ev, te, t, te->private_data);
+
+ /* The destructor isn't necessary anymore, we've already removed the
+ * event from the list. */
+ talloc_set_destructor(te, NULL);
+
+ talloc_free(te);
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+struct timeval common_event_loop_delay(struct event_context *ev)
+{
+ struct timeval tval;
+
+ /* work out the right timeout for all timed events */
+ if (ev->timed_events) {
+ struct timeval t = timeval_current();
+ tval = timeval_until(&t, &ev->timed_events->next_event);
+ } else {
+ /* have a default tick time of 30 seconds. This guarantees
+ that code that uses its own timeout checking will be
+ able to proceeed eventually */
+ tval = timeval_set(30, 0);
+ }
+
+ return tval;
+}
+