2021-04-10 22:18:45 +03:00
|
|
|
/* $NetBSD: event-internal.h,v 1.5 2021/04/10 19:18:45 rillig Exp $ */
|
2021-04-07 06:36:48 +03:00
|
|
|
|
2009-11-02 13:00:52 +03:00
|
|
|
/*
|
2013-04-11 20:43:19 +04:00
|
|
|
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
|
|
|
|
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
|
2009-11-02 13:00:52 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2017-02-01 02:17:39 +03:00
|
|
|
#ifndef EVENT_INTERNAL_H_INCLUDED_
|
|
|
|
#define EVENT_INTERNAL_H_INCLUDED_
|
2009-11-02 13:00:52 +03:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
#include "event2/event-config.h"
|
2017-02-01 02:17:39 +03:00
|
|
|
#include "evconfig-private.h"
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
#include <time.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include "event2/event_struct.h"
|
|
|
|
#include "minheap-internal.h"
|
|
|
|
#include "evsignal-internal.h"
|
|
|
|
#include "mm-internal.h"
|
|
|
|
#include "defer-internal.h"
|
2009-11-02 13:00:52 +03:00
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/* map union members back */
|
|
|
|
|
|
|
|
/* mutually exclusive */
|
2017-02-01 02:17:39 +03:00
|
|
|
#define ev_signal_next ev_.ev_signal.ev_signal_next
|
|
|
|
#define ev_io_next ev_.ev_io.ev_io_next
|
|
|
|
#define ev_io_timeout ev_.ev_io.ev_timeout
|
2013-04-11 20:43:19 +04:00
|
|
|
|
|
|
|
/* used only by signals */
|
2017-02-01 02:17:39 +03:00
|
|
|
#define ev_ncalls ev_.ev_signal.ev_ncalls
|
|
|
|
#define ev_pncalls ev_.ev_signal.ev_pncalls
|
|
|
|
|
|
|
|
#define ev_pri ev_evcallback.evcb_pri
|
|
|
|
#define ev_flags ev_evcallback.evcb_flags
|
|
|
|
#define ev_closure ev_evcallback.evcb_closure
|
|
|
|
#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
|
|
|
|
#define ev_arg ev_evcallback.evcb_arg
|
|
|
|
|
|
|
|
/** @name Event closure codes
|
2013-04-11 20:43:19 +04:00
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
Possible values for evcb_closure in struct event_callback
|
|
|
|
|
|
|
|
@{
|
|
|
|
*/
|
|
|
|
/** A regular event. Uses the evcb_callback callback */
|
|
|
|
#define EV_CLOSURE_EVENT 0
|
|
|
|
/** A signal event. Uses the evcb_callback callback */
|
|
|
|
#define EV_CLOSURE_EVENT_SIGNAL 1
|
|
|
|
/** A persistent non-signal event. Uses the evcb_callback callback */
|
|
|
|
#define EV_CLOSURE_EVENT_PERSIST 2
|
|
|
|
/** A simple callback. Uses the evcb_selfcb callback. */
|
|
|
|
#define EV_CLOSURE_CB_SELF 3
|
|
|
|
/** A finalizing callback. Uses the evcb_cbfinalize callback. */
|
|
|
|
#define EV_CLOSURE_CB_FINALIZE 4
|
|
|
|
/** A finalizing event. Uses the evcb_evfinalize callback. */
|
|
|
|
#define EV_CLOSURE_EVENT_FINALIZE 5
|
|
|
|
/** A finalizing event that should get freed after. Uses the evcb_evfinalize
|
|
|
|
* callback. */
|
|
|
|
#define EV_CLOSURE_EVENT_FINALIZE_FREE 6
|
|
|
|
/** @} */
|
2013-04-11 20:43:19 +04:00
|
|
|
|
|
|
|
/** Structure to define the backend of a given event_base. */
|
2009-11-02 13:00:52 +03:00
|
|
|
struct eventop {
|
2013-04-11 20:43:19 +04:00
|
|
|
/** The name of this backend. */
|
2009-11-02 13:00:52 +03:00
|
|
|
const char *name;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Function to set up an event_base to use this backend. It should
|
|
|
|
* create a new structure holding whatever information is needed to
|
|
|
|
* run the backend, and return it. The returned pointer will get
|
|
|
|
* stored by event_init into the event_base.evbase field. On failure,
|
|
|
|
* this function should return NULL. */
|
2009-11-02 13:00:52 +03:00
|
|
|
void *(*init)(struct event_base *);
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Enable reading/writing on a given fd or signal. 'events' will be
|
|
|
|
* the events that we're trying to enable: one or more of EV_READ,
|
|
|
|
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
|
|
|
|
* were enabled on this fd previously. 'fdinfo' will be a structure
|
|
|
|
* associated with the fd by the evmap; its size is defined by the
|
|
|
|
* fdinfo field below. It will be set to 0 the first time the fd is
|
|
|
|
* added. The function should return 0 on success and -1 on error.
|
|
|
|
*/
|
|
|
|
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
|
|
|
/** As "add", except 'events' contains the events we mean to disable. */
|
|
|
|
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
|
|
|
/** Function to implement the core of an event loop. It must see which
|
|
|
|
added events are ready, and cause event_active to be called for each
|
|
|
|
active event (usually via event_io_active or such). It should
|
|
|
|
return 0 on success and -1 on error.
|
|
|
|
*/
|
|
|
|
int (*dispatch)(struct event_base *, struct timeval *);
|
|
|
|
/** Function to clean up and free our data from the event_base. */
|
|
|
|
void (*dealloc)(struct event_base *);
|
|
|
|
/** Flag: set if we need to reinitialize the event base after we fork.
|
|
|
|
*/
|
2009-11-02 13:00:52 +03:00
|
|
|
int need_reinit;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Bit-array of supported event_method_features that this backend can
|
|
|
|
* provide. */
|
|
|
|
enum event_method_feature features;
|
|
|
|
/** Length of the extra information we should record for each fd that
|
|
|
|
has one or more active events. This information is recorded
|
|
|
|
as part of the evmap entry for each fd, and passed as an argument
|
|
|
|
to the add and del functions above.
|
|
|
|
*/
|
|
|
|
size_t fdinfo_len;
|
2009-11-02 13:00:52 +03:00
|
|
|
};
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
#ifdef _WIN32
|
2013-04-11 20:43:19 +04:00
|
|
|
/* If we're on win32, then file descriptors are not nice low densely packed
|
|
|
|
integers. Instead, they are pointer-like windows handles, and we want to
|
|
|
|
use a hashtable instead of an array to map fds to events.
|
|
|
|
*/
|
|
|
|
#define EVMAP_USE_HT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* #define HT_CACHE_HASH_VALS */
|
|
|
|
|
|
|
|
#ifdef EVMAP_USE_HT
|
2017-02-01 02:17:39 +03:00
|
|
|
#define HT_NO_CACHE_HASH_VALUES
|
2013-04-11 20:43:19 +04:00
|
|
|
#include "ht-internal.h"
|
|
|
|
struct event_map_entry;
|
|
|
|
HT_HEAD(event_io_map, event_map_entry);
|
|
|
|
#else
|
|
|
|
#define event_io_map event_signal_map
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
|
|
|
|
defined, this structure is also used as event_io_map, which maps fds to a
|
|
|
|
list of events.
|
|
|
|
*/
|
|
|
|
struct event_signal_map {
|
|
|
|
/* An array of evmap_io * or of evmap_signal *; empty entries are
|
|
|
|
* set to NULL. */
|
|
|
|
void **entries;
|
|
|
|
/* The number of entries available in entries */
|
|
|
|
int nentries;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* A list of events waiting on a given 'common' timeout value. Ordinarily,
|
|
|
|
* events waiting for a timeout wait on a minheap. Sometimes, however, a
|
|
|
|
* queue can be faster.
|
|
|
|
**/
|
|
|
|
struct common_timeout_list {
|
|
|
|
/* List of events currently waiting in the queue. */
|
|
|
|
struct event_list events;
|
|
|
|
/* 'magic' timeval used to indicate the duration of events in this
|
|
|
|
* queue. */
|
|
|
|
struct timeval duration;
|
|
|
|
/* Event that triggers whenever one of the events in the queue is
|
|
|
|
* ready to activate */
|
|
|
|
struct event timeout_event;
|
|
|
|
/* The event_base that this timeout list is part of */
|
|
|
|
struct event_base *base;
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Mask used to get the real tv_usec value from a common timeout. */
|
|
|
|
#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
|
|
|
|
|
|
|
|
struct event_change;
|
|
|
|
|
|
|
|
/* List of 'changes' since the last call to eventop.dispatch. Only maintained
|
|
|
|
* if the backend is using changesets. */
|
|
|
|
struct event_changelist {
|
|
|
|
struct event_change *changes;
|
|
|
|
int n_changes;
|
|
|
|
int changes_size;
|
|
|
|
};
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
#ifndef EVENT__DISABLE_DEBUG_MODE
|
2013-04-11 20:43:19 +04:00
|
|
|
/* Global internal flag: set to one if debug mode is on. */
|
2017-02-01 02:17:39 +03:00
|
|
|
extern int event_debug_mode_on_;
|
|
|
|
#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
|
2013-04-11 20:43:19 +04:00
|
|
|
#else
|
|
|
|
#define EVENT_DEBUG_MODE_IS_ON() (0)
|
|
|
|
#endif
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
TAILQ_HEAD(evcallback_list, event_callback);
|
|
|
|
|
|
|
|
/* Sets up an event for processing once */
|
|
|
|
struct event_once {
|
|
|
|
LIST_ENTRY(event_once) next_once;
|
|
|
|
struct event ev;
|
|
|
|
|
|
|
|
void (*cb)(evutil_socket_t, short, void *);
|
|
|
|
void *arg;
|
|
|
|
};
|
|
|
|
|
2009-11-02 13:00:52 +03:00
|
|
|
struct event_base {
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Function pointers and other data to describe this event_base's
|
|
|
|
* backend. */
|
2009-11-02 13:00:52 +03:00
|
|
|
const struct eventop *evsel;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Pointer to backend-specific data. */
|
2009-11-02 13:00:52 +03:00
|
|
|
void *evbase;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/** List of changes to tell backend about at next dispatch. Only used
|
|
|
|
* by the O(1) backends. */
|
|
|
|
struct event_changelist changelist;
|
|
|
|
|
|
|
|
/** Function pointers used to describe the backend that this event_base
|
|
|
|
* uses for signals */
|
|
|
|
const struct eventop *evsigsel;
|
2021-04-07 06:36:48 +03:00
|
|
|
/** Data to implement the common signal handler code. */
|
2013-04-11 20:43:19 +04:00
|
|
|
struct evsig_info sig;
|
|
|
|
|
|
|
|
/** Number of virtual events */
|
|
|
|
int virtual_event_count;
|
2017-02-01 02:17:39 +03:00
|
|
|
/** Maximum number of virtual events active */
|
|
|
|
int virtual_event_count_max;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Number of total events added to this event_base */
|
|
|
|
int event_count;
|
2017-02-01 02:17:39 +03:00
|
|
|
/** Maximum number of total events added to this event_base */
|
|
|
|
int event_count_max;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Number of total events active in this event_base */
|
|
|
|
int event_count_active;
|
2017-02-01 02:17:39 +03:00
|
|
|
/** Maximum number of total events active in this event_base */
|
|
|
|
int event_count_active_max;
|
2013-04-11 20:43:19 +04:00
|
|
|
|
|
|
|
/** Set if we should terminate the loop once we're done processing
|
|
|
|
* events. */
|
|
|
|
int event_gotterm;
|
|
|
|
/** Set if we should terminate the loop immediately */
|
|
|
|
int event_break;
|
|
|
|
/** Set if we should start a new instance of the loop immediately. */
|
|
|
|
int event_continue;
|
2009-11-02 13:00:52 +03:00
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/** The currently running priority of events */
|
|
|
|
int event_running_priority;
|
|
|
|
|
|
|
|
/** Set if we're running the event_base_loop function, to prevent
|
|
|
|
* reentrant invocation. */
|
|
|
|
int running_loop;
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
/** Set to the number of deferred_cbs we've made 'active' in the
|
|
|
|
* loop. This is a hack to prevent starvation; it would be smarter
|
|
|
|
* to just use event_config_set_max_dispatch_interval's max_callbacks
|
|
|
|
* feature */
|
|
|
|
int n_deferreds_queued;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/* Active event management. */
|
2017-02-01 02:17:39 +03:00
|
|
|
/** An array of nactivequeues queues for active event_callbacks (ones
|
|
|
|
* that have triggered, and whose callbacks need to be called). Low
|
2013-04-11 20:43:19 +04:00
|
|
|
* priority numbers are more important, and stall higher ones.
|
|
|
|
*/
|
2017-02-01 02:17:39 +03:00
|
|
|
struct evcallback_list *activequeues;
|
2013-04-11 20:43:19 +04:00
|
|
|
/** The length of the activequeues array */
|
2009-11-02 13:00:52 +03:00
|
|
|
int nactivequeues;
|
2017-02-01 02:17:39 +03:00
|
|
|
/** A list of event_callbacks that should become active the next time
|
|
|
|
* we process events, but not this time. */
|
|
|
|
struct evcallback_list active_later_queue;
|
2009-11-02 13:00:52 +03:00
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/* common timeout logic */
|
|
|
|
|
|
|
|
/** An array of common_timeout_list* for all of the common timeout
|
|
|
|
* values we know. */
|
|
|
|
struct common_timeout_list **common_timeout_queues;
|
|
|
|
/** The number of entries used in common_timeout_queues */
|
|
|
|
int n_common_timeouts;
|
|
|
|
/** The total size of common_timeout_queues. */
|
|
|
|
int n_common_timeouts_allocated;
|
|
|
|
|
|
|
|
/** Mapping from file descriptors to enabled (added) events */
|
|
|
|
struct event_io_map io;
|
|
|
|
|
|
|
|
/** Mapping from signal numbers to enabled (added) events. */
|
|
|
|
struct event_signal_map sigmap;
|
|
|
|
|
|
|
|
/** Priority queue of events with timeouts. */
|
2009-11-02 13:00:52 +03:00
|
|
|
struct min_heap timeheap;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
|
|
|
|
* too often. */
|
2009-11-02 13:00:52 +03:00
|
|
|
struct timeval tv_cache;
|
2013-04-11 20:43:19 +04:00
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
struct evutil_monotonic_timer monotonic_timer;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/** Difference between internal time (maybe from clock_gettime) and
|
|
|
|
* gettimeofday. */
|
|
|
|
struct timeval tv_clock_diff;
|
|
|
|
/** Second in which we last updated tv_clock_diff, in monotonic time. */
|
|
|
|
time_t last_updated_clock_diff;
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
2013-04-11 20:43:19 +04:00
|
|
|
/* threading support */
|
|
|
|
/** The thread currently running the event_loop for this base */
|
|
|
|
unsigned long th_owner_id;
|
|
|
|
/** A lock to prevent conflicting accesses to this event_base */
|
|
|
|
void *th_base_lock;
|
|
|
|
/** A condition that gets signalled when we're done processing an
|
|
|
|
* event with waiters on it. */
|
|
|
|
void *current_event_cond;
|
|
|
|
/** Number of threads blocking on current_event_cond. */
|
|
|
|
int current_event_waiters;
|
|
|
|
#endif
|
2017-02-01 02:17:39 +03:00
|
|
|
/** The event whose callback is executing right now */
|
|
|
|
struct event_callback *current_event;
|
2013-04-11 20:43:19 +04:00
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
#ifdef _WIN32
|
2013-04-11 20:43:19 +04:00
|
|
|
/** IOCP support structure, if IOCP is enabled. */
|
|
|
|
struct event_iocp_port *iocp;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/** Flags that this base was configured with */
|
|
|
|
enum event_base_config_flag flags;
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
struct timeval max_dispatch_time;
|
|
|
|
int max_dispatch_callbacks;
|
|
|
|
int limit_callbacks_after_prio;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
/* Notify main thread to wake up break, etc. */
|
|
|
|
/** True if the base already has a pending notify, and we don't need
|
|
|
|
* to add any more. */
|
|
|
|
int is_notify_pending;
|
|
|
|
/** A socketpair used by some th_notify functions to wake up the main
|
|
|
|
* thread. */
|
|
|
|
evutil_socket_t th_notify_fd[2];
|
|
|
|
/** An event used by some th_notify functions to wake up the main
|
|
|
|
* thread. */
|
|
|
|
struct event th_notify;
|
|
|
|
/** A function used to wake up the main thread from another thread. */
|
|
|
|
int (*th_notify_fn)(struct event_base *base);
|
2017-02-01 02:17:39 +03:00
|
|
|
|
|
|
|
/** Saved seed for weak random number generator. Some backends use
|
|
|
|
* this to produce fairness among sockets. Protected by th_base_lock. */
|
|
|
|
struct evutil_weakrand_state weakrand_seed;
|
|
|
|
|
|
|
|
/** List of event_onces that have not yet fired. */
|
|
|
|
LIST_HEAD(once_event_list, event_once) once_events;
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct event_config_entry {
|
|
|
|
TAILQ_ENTRY(event_config_entry) next;
|
|
|
|
|
|
|
|
const char *avoid_method;
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Internal structure: describes the configuration we want for an event_base
|
|
|
|
* that we're about to allocate. */
|
|
|
|
struct event_config {
|
|
|
|
TAILQ_HEAD(event_configq, event_config_entry) entries;
|
|
|
|
|
|
|
|
int n_cpus_hint;
|
2017-02-01 02:17:39 +03:00
|
|
|
struct timeval max_dispatch_interval;
|
|
|
|
int max_dispatch_callbacks;
|
|
|
|
int limit_callbacks_after_prio;
|
2013-04-11 20:43:19 +04:00
|
|
|
enum event_method_feature require_features;
|
|
|
|
enum event_base_config_flag flags;
|
2009-11-02 13:00:52 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Internal use only: Functions that might be missing from <sys/queue.h> */
|
2021-04-07 06:36:48 +03:00
|
|
|
#ifndef LIST_END
|
|
|
|
#define LIST_END(head) NULL
|
|
|
|
#endif
|
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
#ifndef TAILQ_FIRST
|
2009-11-02 13:00:52 +03:00
|
|
|
#define TAILQ_FIRST(head) ((head)->tqh_first)
|
2013-04-11 20:43:19 +04:00
|
|
|
#endif
|
|
|
|
#ifndef TAILQ_END
|
2009-11-02 13:00:52 +03:00
|
|
|
#define TAILQ_END(head) NULL
|
2013-04-11 20:43:19 +04:00
|
|
|
#endif
|
|
|
|
#ifndef TAILQ_NEXT
|
2009-11-02 13:00:52 +03:00
|
|
|
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
2013-04-11 20:43:19 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef TAILQ_FOREACH
|
2009-11-02 13:00:52 +03:00
|
|
|
#define TAILQ_FOREACH(var, head, field) \
|
2013-04-11 20:43:19 +04:00
|
|
|
for ((var) = TAILQ_FIRST(head); \
|
|
|
|
(var) != TAILQ_END(head); \
|
|
|
|
(var) = TAILQ_NEXT(var, field))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef TAILQ_INSERT_BEFORE
|
2009-11-02 13:00:52 +03:00
|
|
|
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
|
|
|
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
|
|
|
(elm)->field.tqe_next = (listelm); \
|
|
|
|
*(listelm)->field.tqe_prev = (elm); \
|
|
|
|
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
|
2021-04-10 22:18:45 +03:00
|
|
|
} while (0)
|
2013-04-11 20:43:19 +04:00
|
|
|
#endif
|
2009-11-02 13:00:52 +03:00
|
|
|
|
2013-04-11 20:43:19 +04:00
|
|
|
#define N_ACTIVE_CALLBACKS(base) \
|
2017-02-01 02:17:39 +03:00
|
|
|
((base)->event_count_active)
|
2013-04-11 20:43:19 +04:00
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
int evsig_set_handler_(struct event_base *base, int evsignal,
|
2009-11-02 13:00:52 +03:00
|
|
|
void (*fn)(int));
|
2017-02-01 02:17:39 +03:00
|
|
|
int evsig_restore_handler_(struct event_base *base, int evsignal);
|
|
|
|
|
|
|
|
int event_add_nolock_(struct event *ev,
|
|
|
|
const struct timeval *tv, int tv_is_absolute);
|
|
|
|
/** Argument for event_del_nolock_. Tells event_del not to block on the event
|
|
|
|
* if it's running in another thread. */
|
|
|
|
#define EVENT_DEL_NOBLOCK 0
|
|
|
|
/** Argument for event_del_nolock_. Tells event_del to block on the event
|
|
|
|
* if it's running in another thread, regardless of its value for EV_FINALIZE
|
|
|
|
*/
|
|
|
|
#define EVENT_DEL_BLOCK 1
|
|
|
|
/** Argument for event_del_nolock_. Tells event_del to block on the event
|
|
|
|
* if it is running in another thread and it doesn't have EV_FINALIZE set.
|
|
|
|
*/
|
|
|
|
#define EVENT_DEL_AUTOBLOCK 2
|
2021-04-07 06:36:48 +03:00
|
|
|
/** Argument for event_del_nolock_. Tells event_del to proceed even if the
|
2017-02-01 02:17:39 +03:00
|
|
|
* event is set up for finalization rather for regular use.*/
|
|
|
|
#define EVENT_DEL_EVEN_IF_FINALIZING 3
|
|
|
|
int event_del_nolock_(struct event *ev, int blocking);
|
|
|
|
int event_remove_timer_nolock_(struct event *ev);
|
|
|
|
|
|
|
|
void event_active_nolock_(struct event *ev, int res, short count);
|
2021-04-07 06:36:48 +03:00
|
|
|
EVENT2_EXPORT_SYMBOL
|
2017-02-01 02:17:39 +03:00
|
|
|
int event_callback_activate_(struct event_base *, struct event_callback *);
|
|
|
|
int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
|
|
|
|
int event_callback_cancel_(struct event_base *base,
|
|
|
|
struct event_callback *evcb);
|
|
|
|
|
|
|
|
void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
|
2021-04-07 06:36:48 +03:00
|
|
|
EVENT2_EXPORT_SYMBOL
|
2017-02-01 02:17:39 +03:00
|
|
|
void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
|
|
|
|
int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
|
|
|
|
|
|
|
|
|
2021-04-07 06:36:48 +03:00
|
|
|
EVENT2_EXPORT_SYMBOL
|
2017-02-01 02:17:39 +03:00
|
|
|
void event_active_later_(struct event *ev, int res);
|
|
|
|
void event_active_later_nolock_(struct event *ev, int res);
|
|
|
|
int event_callback_activate_later_nolock_(struct event_base *base,
|
|
|
|
struct event_callback *evcb);
|
|
|
|
int event_callback_cancel_nolock_(struct event_base *base,
|
|
|
|
struct event_callback *evcb, int even_if_finalizing);
|
|
|
|
void event_callback_init_(struct event_base *base,
|
|
|
|
struct event_callback *cb);
|
2013-04-11 20:43:19 +04:00
|
|
|
|
|
|
|
/* FIXME document. */
|
2021-04-07 06:36:48 +03:00
|
|
|
EVENT2_EXPORT_SYMBOL
|
2017-02-01 02:17:39 +03:00
|
|
|
void event_base_add_virtual_(struct event_base *base);
|
|
|
|
void event_base_del_virtual_(struct event_base *base);
|
2013-04-11 20:43:19 +04:00
|
|
|
|
|
|
|
/** For debugging: unless assertions are disabled, verify the referential
|
|
|
|
integrity of the internal data structures of 'base'. This operation can
|
|
|
|
be expensive.
|
|
|
|
|
|
|
|
Returns on success; aborts on failure.
|
|
|
|
*/
|
2021-04-07 06:36:48 +03:00
|
|
|
EVENT2_EXPORT_SYMBOL
|
2017-02-01 02:17:39 +03:00
|
|
|
void event_base_assert_ok_(struct event_base *base);
|
|
|
|
void event_base_assert_ok_nolock_(struct event_base *base);
|
|
|
|
|
|
|
|
|
|
|
|
/* Helper function: Call 'fn' exactly once every inserted or active event in
|
|
|
|
* the event_base 'base'.
|
|
|
|
*
|
|
|
|
* If fn returns 0, continue on to the next event. Otherwise, return the same
|
|
|
|
* value that fn returned.
|
|
|
|
*
|
|
|
|
* Requires that 'base' be locked.
|
|
|
|
*/
|
|
|
|
int event_base_foreach_event_nolock_(struct event_base *base,
|
|
|
|
event_base_foreach_event_cb cb, void *arg);
|
|
|
|
|
|
|
|
/* Cleanup function to reset debug mode during shutdown.
|
|
|
|
*
|
|
|
|
* Calling this function doesn't mean it'll be possible to re-enable
|
|
|
|
* debug mode if any events were added.
|
|
|
|
*/
|
|
|
|
void event_disable_debug_mode(void);
|
2009-11-02 13:00:52 +03:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-01 02:17:39 +03:00
|
|
|
#endif /* EVENT_INTERNAL_H_INCLUDED_ */
|