File: | src/lib/libevent/event.c |
Warning: | line 137, column 36 Access to field 'name' results in a dereference of a null pointer (loaded from field 'evsel') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: event.c,v 1.42 2022/12/27 23:05:55 jmc Exp $ */ | |||
2 | ||||
3 | /* | |||
4 | * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> | |||
5 | * All rights reserved. | |||
6 | * | |||
7 | * Redistribution and use in source and binary forms, with or without | |||
8 | * modification, are permitted provided that the following conditions | |||
9 | * are met: | |||
10 | * 1. Redistributions of source code must retain the above copyright | |||
11 | * notice, this list of conditions and the following disclaimer. | |||
12 | * 2. Redistributions in binary form must reproduce the above copyright | |||
13 | * notice, this list of conditions and the following disclaimer in the | |||
14 | * documentation and/or other materials provided with the distribution. | |||
15 | * 3. The name of the author may not be used to endorse or promote products | |||
16 | * derived from this software without specific prior written permission. | |||
17 | * | |||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
28 | */ | |||
29 | ||||
30 | #include <sys/types.h> | |||
31 | #include <sys/socket.h> | |||
32 | #include <sys/time.h> | |||
33 | #include <sys/queue.h> | |||
34 | ||||
35 | #include <stdio.h> | |||
36 | #include <stdlib.h> | |||
37 | #include <unistd.h> | |||
38 | #include <errno(*__errno()).h> | |||
39 | #include <signal.h> | |||
40 | #include <string.h> | |||
41 | #include <assert.h> | |||
42 | #include <time.h> | |||
43 | #include <netdb.h> | |||
44 | #include <asr.h> | |||
45 | ||||
46 | #include "event.h" | |||
47 | #include "event-internal.h" | |||
48 | #include "log.h" | |||
49 | ||||
50 | extern const struct eventop selectops; | |||
51 | extern const struct eventop pollops; | |||
52 | extern const struct eventop kqops; | |||
53 | ||||
54 | /* In order of preference */ | |||
55 | static const struct eventop *eventops[] = { | |||
56 | &kqops, | |||
57 | &pollops, | |||
58 | &selectops, | |||
59 | NULL((void *)0) | |||
60 | }; | |||
61 | ||||
62 | /* Global state */ | |||
63 | struct event_base *current_base = NULL((void *)0); | |||
64 | extern struct event_base *evsignal_base; | |||
65 | ||||
66 | /* Handle signals - This is a deprecated interface */ | |||
67 | int (*event_sigcb)(void); /* Signal callback when gotsig is set */ | |||
68 | volatile sig_atomic_t event_gotsig; /* Set in signal handler */ | |||
69 | ||||
70 | /* Prototypes */ | |||
71 | static void event_queue_insert(struct event_base *, struct event *, int); | |||
72 | static void event_queue_remove(struct event_base *, struct event *, int); | |||
73 | static int event_haveevents(struct event_base *); | |||
74 | ||||
75 | static void event_process_active(struct event_base *); | |||
76 | ||||
77 | static int timeout_next(struct event_base *, struct timeval **); | |||
78 | static void timeout_process(struct event_base *); | |||
79 | ||||
80 | static void | |||
81 | gettime(struct event_base *base, struct timeval *tp) | |||
82 | { | |||
83 | struct timespec ts; | |||
84 | ||||
85 | if (base->tv_cache.tv_sec
| |||
86 | *tp = base->tv_cache; | |||
87 | return; | |||
88 | } | |||
89 | ||||
90 | if (clock_gettime(CLOCK_MONOTONIC3, &ts) == -1) | |||
91 | event_err(1, "%s: clock_gettime", __func__); | |||
92 | ||||
93 | TIMESPEC_TO_TIMEVAL(tp, &ts)do { (tp)->tv_sec = (&ts)->tv_sec; (tp)->tv_usec = (&ts)->tv_nsec / 1000; } while (0); | |||
94 | } | |||
95 | ||||
96 | struct event_base * | |||
97 | event_init(void) | |||
98 | { | |||
99 | struct event_base *base = event_base_new(); | |||
| ||||
100 | ||||
101 | if (base != NULL((void *)0)) | |||
102 | current_base = base; | |||
103 | ||||
104 | return (base); | |||
105 | } | |||
106 | ||||
107 | struct event_base * | |||
108 | event_base_new(void) | |||
109 | { | |||
110 | int i; | |||
111 | struct event_base *base; | |||
112 | ||||
113 | if ((base = calloc(1, sizeof(struct event_base))) == NULL((void *)0)) | |||
114 | event_err(1, "%s: calloc", __func__); | |||
115 | ||||
116 | event_sigcb = NULL((void *)0); | |||
117 | event_gotsig = 0; | |||
118 | ||||
119 | gettime(base, &base->event_tv); | |||
120 | ||||
121 | min_heap_ctor(&base->timeheap); | |||
122 | TAILQ_INIT(&base->eventqueue)do { (&base->eventqueue)->tqh_first = ((void *)0); ( &base->eventqueue)->tqh_last = &(&base-> eventqueue)->tqh_first; } while (0); | |||
123 | base->sig.ev_signal_pair[0] = -1; | |||
124 | base->sig.ev_signal_pair[1] = -1; | |||
125 | ||||
126 | base->evbase = NULL((void *)0); | |||
127 | for (i = 0; eventops[i] && !base->evbase; i++) { | |||
128 | base->evsel = eventops[i]; | |||
129 | ||||
130 | base->evbase = base->evsel->init(base); | |||
131 | } | |||
132 | ||||
133 | if (base->evbase
| |||
134 | event_errx(1, "%s: no event mechanism available", __func__); | |||
135 | ||||
136 | if (!issetugid() && getenv("EVENT_SHOW_METHOD")) | |||
137 | event_msgx("libevent using: %s", base->evsel->name); | |||
| ||||
138 | ||||
139 | /* allocate a single active event queue */ | |||
140 | event_base_priority_init(base, 1); | |||
141 | ||||
142 | return (base); | |||
143 | } | |||
144 | ||||
145 | void | |||
146 | event_base_free(struct event_base *base) | |||
147 | { | |||
148 | int i; | |||
149 | size_t n_deleted=0; | |||
150 | struct event *ev; | |||
151 | ||||
152 | if (base == NULL((void *)0) && current_base) | |||
153 | base = current_base; | |||
154 | if (base == current_base) | |||
155 | current_base = NULL((void *)0); | |||
156 | ||||
157 | /* XXX(niels) - check for internal events first */ | |||
158 | assert(base)((void)0); | |||
159 | /* Delete all non-internal events. */ | |||
160 | for (ev = TAILQ_FIRST(&base->eventqueue)((&base->eventqueue)->tqh_first); ev; ) { | |||
161 | struct event *next = TAILQ_NEXT(ev, ev_next)((ev)->ev_next.tqe_next); | |||
162 | if (!(ev->ev_flags & EVLIST_INTERNAL0x10)) { | |||
163 | event_del(ev); | |||
164 | ++n_deleted; | |||
165 | } | |||
166 | ev = next; | |||
167 | } | |||
168 | while ((ev = min_heap_top(&base->timeheap)) != NULL((void *)0)) { | |||
169 | event_del(ev); | |||
170 | ++n_deleted; | |||
171 | } | |||
172 | ||||
173 | for (i = 0; i < base->nactivequeues; ++i) { | |||
174 | for (ev = TAILQ_FIRST(base->activequeues[i])((base->activequeues[i])->tqh_first); ev; ) { | |||
175 | struct event *next = TAILQ_NEXT(ev, ev_active_next)((ev)->ev_active_next.tqe_next); | |||
176 | if (!(ev->ev_flags & EVLIST_INTERNAL0x10)) { | |||
177 | event_del(ev); | |||
178 | ++n_deleted; | |||
179 | } | |||
180 | ev = next; | |||
181 | } | |||
182 | } | |||
183 | ||||
184 | if (n_deleted) | |||
185 | event_debug(("%s: %zu events were still set in base",do {;} while (0) | |||
186 | __func__, n_deleted))do {;} while (0); | |||
187 | ||||
188 | if (base->evsel->dealloc != NULL((void *)0)) | |||
189 | base->evsel->dealloc(base, base->evbase); | |||
190 | ||||
191 | for (i = 0; i < base->nactivequeues; ++i) | |||
192 | assert(TAILQ_EMPTY(base->activequeues[i]))((void)0); | |||
193 | ||||
194 | assert(min_heap_empty(&base->timeheap))((void)0); | |||
195 | min_heap_dtor(&base->timeheap); | |||
196 | ||||
197 | for (i = 0; i < base->nactivequeues; ++i) | |||
198 | free(base->activequeues[i]); | |||
199 | free(base->activequeues); | |||
200 | ||||
201 | assert(TAILQ_EMPTY(&base->eventqueue))((void)0); | |||
202 | ||||
203 | free(base); | |||
204 | } | |||
205 | ||||
206 | /* reinitialized the event base after a fork */ | |||
207 | int | |||
208 | event_reinit(struct event_base *base) | |||
209 | { | |||
210 | const struct eventop *evsel = base->evsel; | |||
211 | void *evbase = base->evbase; | |||
212 | int res = 0; | |||
213 | struct event *ev; | |||
214 | ||||
215 | #if 0 | |||
216 | /* Right now, reinit always takes effect, since even if the | |||
217 | backend doesn't require it, the signal socketpair code does. | |||
218 | */ | |||
219 | /* check if this event mechanism requires reinit */ | |||
220 | if (!evsel->need_reinit) | |||
221 | return (0); | |||
222 | #endif | |||
223 | ||||
224 | /* prevent internal delete */ | |||
225 | if (base->sig.ev_signal_added) { | |||
226 | /* we cannot call event_del here because the base has | |||
227 | * not been reinitialized yet. */ | |||
228 | event_queue_remove(base, &base->sig.ev_signal, | |||
229 | EVLIST_INSERTED0x02); | |||
230 | if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE0x08) | |||
231 | event_queue_remove(base, &base->sig.ev_signal, | |||
232 | EVLIST_ACTIVE0x08); | |||
233 | base->sig.ev_signal_added = 0; | |||
234 | } | |||
235 | ||||
236 | if (base->evsel->dealloc != NULL((void *)0)) | |||
237 | base->evsel->dealloc(base, base->evbase); | |||
238 | evbase = base->evbase = evsel->init(base); | |||
239 | if (base->evbase == NULL((void *)0)) | |||
240 | event_errx(1, "%s: could not reinitialize event mechanism", | |||
241 | __func__); | |||
242 | ||||
243 | TAILQ_FOREACH(ev, &base->eventqueue, ev_next)for((ev) = ((&base->eventqueue)->tqh_first); (ev) != ((void *)0); (ev) = ((ev)->ev_next.tqe_next)) { | |||
244 | if (evsel->add(evbase, ev) == -1) | |||
245 | res = -1; | |||
246 | } | |||
247 | ||||
248 | return (res); | |||
249 | } | |||
250 | ||||
251 | int | |||
252 | event_priority_init(int npriorities) | |||
253 | { | |||
254 | return event_base_priority_init(current_base, npriorities); | |||
255 | } | |||
256 | ||||
257 | int | |||
258 | event_base_priority_init(struct event_base *base, int npriorities) | |||
259 | { | |||
260 | int i; | |||
261 | ||||
262 | if (base->event_count_active) | |||
263 | return (-1); | |||
264 | ||||
265 | if (npriorities == base->nactivequeues) | |||
266 | return (0); | |||
267 | ||||
268 | if (base->nactivequeues) { | |||
269 | for (i = 0; i < base->nactivequeues; ++i) { | |||
270 | free(base->activequeues[i]); | |||
271 | } | |||
272 | free(base->activequeues); | |||
273 | } | |||
274 | ||||
275 | /* Allocate our priority queues */ | |||
276 | base->nactivequeues = npriorities; | |||
277 | base->activequeues = (struct event_list **) | |||
278 | calloc(base->nactivequeues, sizeof(struct event_list *)); | |||
279 | if (base->activequeues == NULL((void *)0)) | |||
280 | event_err(1, "%s: calloc", __func__); | |||
281 | ||||
282 | for (i = 0; i < base->nactivequeues; ++i) { | |||
283 | base->activequeues[i] = malloc(sizeof(struct event_list)); | |||
284 | if (base->activequeues[i] == NULL((void *)0)) | |||
285 | event_err(1, "%s: malloc", __func__); | |||
286 | TAILQ_INIT(base->activequeues[i])do { (base->activequeues[i])->tqh_first = ((void *)0); ( base->activequeues[i])->tqh_last = &(base->activequeues [i])->tqh_first; } while (0); | |||
287 | } | |||
288 | ||||
289 | return (0); | |||
290 | } | |||
291 | ||||
292 | int | |||
293 | event_haveevents(struct event_base *base) | |||
294 | { | |||
295 | return (base->event_count > 0); | |||
296 | } | |||
297 | ||||
298 | /* | |||
299 | * Active events are stored in priority queues. Lower priorities are always | |||
300 | * process before higher priorities. Low priority events can starve high | |||
301 | * priority ones. | |||
302 | */ | |||
303 | ||||
304 | static void | |||
305 | event_process_active(struct event_base *base) | |||
306 | { | |||
307 | struct event *ev; | |||
308 | struct event_list *activeq = NULL((void *)0); | |||
309 | int i; | |||
310 | short ncalls; | |||
311 | ||||
312 | for (i = 0; i < base->nactivequeues; ++i) { | |||
313 | if (TAILQ_FIRST(base->activequeues[i])((base->activequeues[i])->tqh_first) != NULL((void *)0)) { | |||
314 | activeq = base->activequeues[i]; | |||
315 | break; | |||
316 | } | |||
317 | } | |||
318 | ||||
319 | assert(activeq != NULL)((void)0); | |||
320 | ||||
321 | for (ev = TAILQ_FIRST(activeq)((activeq)->tqh_first); ev; ev = TAILQ_FIRST(activeq)((activeq)->tqh_first)) { | |||
322 | if (ev->ev_events & EV_PERSIST0x10) | |||
323 | event_queue_remove(base, ev, EVLIST_ACTIVE0x08); | |||
324 | else | |||
325 | event_del(ev); | |||
326 | ||||
327 | /* Allows deletes to work */ | |||
328 | ncalls = ev->ev_ncalls; | |||
329 | ev->ev_pncalls = &ncalls; | |||
330 | while (ncalls) { | |||
331 | ncalls--; | |||
332 | ev->ev_ncalls = ncalls; | |||
333 | (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); | |||
334 | if (event_gotsig || base->event_break) | |||
335 | return; | |||
336 | } | |||
337 | } | |||
338 | } | |||
339 | ||||
340 | /* | |||
341 | * Wait continuously for events. We exit only if no events are left. | |||
342 | */ | |||
343 | ||||
344 | int | |||
345 | event_dispatch(void) | |||
346 | { | |||
347 | return (event_loop(0)); | |||
348 | } | |||
349 | ||||
350 | int | |||
351 | event_base_dispatch(struct event_base *event_base) | |||
352 | { | |||
353 | return (event_base_loop(event_base, 0)); | |||
354 | } | |||
355 | ||||
356 | const char * | |||
357 | event_base_get_method(struct event_base *base) | |||
358 | { | |||
359 | assert(base)((void)0); | |||
360 | return (base->evsel->name); | |||
361 | } | |||
362 | ||||
363 | static void | |||
364 | event_loopexit_cb(int fd, short what, void *arg) | |||
365 | { | |||
366 | struct event_base *base = arg; | |||
367 | base->event_gotterm = 1; | |||
368 | } | |||
369 | ||||
370 | /* not thread safe */ | |||
371 | int | |||
372 | event_loopexit(const struct timeval *tv) | |||
373 | { | |||
374 | return (event_once(-1, EV_TIMEOUT0x01, event_loopexit_cb, | |||
375 | current_base, tv)); | |||
376 | } | |||
377 | ||||
378 | int | |||
379 | event_base_loopexit(struct event_base *event_base, const struct timeval *tv) | |||
380 | { | |||
381 | return (event_base_once(event_base, -1, EV_TIMEOUT0x01, event_loopexit_cb, | |||
382 | event_base, tv)); | |||
383 | } | |||
384 | ||||
385 | /* not thread safe */ | |||
386 | int | |||
387 | event_loopbreak(void) | |||
388 | { | |||
389 | return (event_base_loopbreak(current_base)); | |||
390 | } | |||
391 | ||||
392 | int | |||
393 | event_base_loopbreak(struct event_base *event_base) | |||
394 | { | |||
395 | if (event_base == NULL((void *)0)) | |||
396 | return (-1); | |||
397 | ||||
398 | event_base->event_break = 1; | |||
399 | return (0); | |||
400 | } | |||
401 | ||||
402 | ||||
403 | ||||
404 | /* not thread safe */ | |||
405 | ||||
406 | int | |||
407 | event_loop(int flags) | |||
408 | { | |||
409 | return event_base_loop(current_base, flags); | |||
410 | } | |||
411 | ||||
412 | int | |||
413 | event_base_loop(struct event_base *base, int flags) | |||
414 | { | |||
415 | const struct eventop *evsel = base->evsel; | |||
416 | void *evbase = base->evbase; | |||
417 | struct timeval tv; | |||
418 | struct timeval *tv_p; | |||
419 | int res, done; | |||
420 | ||||
421 | /* clear time cache */ | |||
422 | base->tv_cache.tv_sec = 0; | |||
423 | ||||
424 | if (base->sig.ev_signal_added) | |||
425 | evsignal_base = base; | |||
426 | done = 0; | |||
427 | while (!done) { | |||
428 | /* Terminate the loop if we have been asked to */ | |||
429 | if (base->event_gotterm) { | |||
430 | base->event_gotterm = 0; | |||
431 | break; | |||
432 | } | |||
433 | ||||
434 | if (base->event_break) { | |||
435 | base->event_break = 0; | |||
436 | break; | |||
437 | } | |||
438 | ||||
439 | /* You cannot use this interface for multi-threaded apps */ | |||
440 | while (event_gotsig) { | |||
441 | event_gotsig = 0; | |||
442 | if (event_sigcb) { | |||
443 | res = (*event_sigcb)(); | |||
444 | if (res == -1) { | |||
445 | errno(*__errno()) = EINTR4; | |||
446 | return (-1); | |||
447 | } | |||
448 | } | |||
449 | } | |||
450 | ||||
451 | tv_p = &tv; | |||
452 | if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK0x02)) { | |||
453 | timeout_next(base, &tv_p); | |||
454 | } else { | |||
455 | /* | |||
456 | * if we have active events, we just poll new events | |||
457 | * without waiting. | |||
458 | */ | |||
459 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
460 | } | |||
461 | ||||
462 | /* If we have no events, we just exit */ | |||
463 | if (!event_haveevents(base)) { | |||
464 | event_debug(("%s: no events registered.", __func__))do {;} while (0); | |||
465 | return (1); | |||
466 | } | |||
467 | ||||
468 | /* update last old time */ | |||
469 | gettime(base, &base->event_tv); | |||
470 | ||||
471 | /* clear time cache */ | |||
472 | base->tv_cache.tv_sec = 0; | |||
473 | ||||
474 | res = evsel->dispatch(base, evbase, tv_p); | |||
475 | ||||
476 | if (res == -1) | |||
477 | return (-1); | |||
478 | gettime(base, &base->tv_cache); | |||
479 | ||||
480 | timeout_process(base); | |||
481 | ||||
482 | if (base->event_count_active) { | |||
483 | event_process_active(base); | |||
484 | if (!base->event_count_active && (flags & EVLOOP_ONCE0x01)) | |||
485 | done = 1; | |||
486 | } else if (flags & EVLOOP_NONBLOCK0x02) | |||
487 | done = 1; | |||
488 | } | |||
489 | ||||
490 | /* clear time cache */ | |||
491 | base->tv_cache.tv_sec = 0; | |||
492 | ||||
493 | event_debug(("%s: asked to terminate loop.", __func__))do {;} while (0); | |||
494 | return (0); | |||
495 | } | |||
496 | ||||
497 | /* Sets up an event for processing once */ | |||
498 | ||||
499 | struct event_once { | |||
500 | struct event ev; | |||
501 | ||||
502 | void (*cb)(int, short, void *); | |||
503 | void *arg; | |||
504 | }; | |||
505 | ||||
506 | /* One-time callback, it deletes itself */ | |||
507 | ||||
508 | static void | |||
509 | event_once_cb(int fd, short events, void *arg) | |||
510 | { | |||
511 | struct event_once *eonce = arg; | |||
512 | ||||
513 | (*eonce->cb)(fd, events, eonce->arg); | |||
514 | free(eonce); | |||
515 | } | |||
516 | ||||
517 | /* not threadsafe, event scheduled once. */ | |||
518 | int | |||
519 | event_once(int fd, short events, | |||
520 | void (*callback)(int, short, void *), void *arg, const struct timeval *tv) | |||
521 | { | |||
522 | return event_base_once(current_base, fd, events, callback, arg, tv); | |||
523 | } | |||
524 | ||||
525 | /* Schedules an event once */ | |||
526 | int | |||
527 | event_base_once(struct event_base *base, int fd, short events, | |||
528 | void (*callback)(int, short, void *), void *arg, const struct timeval *tv) | |||
529 | { | |||
530 | struct event_once *eonce; | |||
531 | struct timeval etv; | |||
532 | int res; | |||
533 | ||||
534 | /* We cannot support signals that just fire once */ | |||
535 | if (events & EV_SIGNAL0x08) | |||
536 | return (-1); | |||
537 | ||||
538 | if ((eonce = calloc(1, sizeof(struct event_once))) == NULL((void *)0)) | |||
539 | return (-1); | |||
540 | ||||
541 | eonce->cb = callback; | |||
542 | eonce->arg = arg; | |||
543 | ||||
544 | if (events == EV_TIMEOUT0x01) { | |||
545 | if (tv == NULL((void *)0)) { | |||
546 | timerclear(&etv)(&etv)->tv_sec = (&etv)->tv_usec = 0; | |||
547 | tv = &etv; | |||
548 | } | |||
549 | ||||
550 | evtimer_set(&eonce->ev, event_once_cb, eonce)event_set(&eonce->ev, -1, 0, event_once_cb, eonce); | |||
551 | } else if (events & (EV_READ0x02|EV_WRITE0x04)) { | |||
552 | events &= EV_READ0x02|EV_WRITE0x04; | |||
553 | ||||
554 | event_set(&eonce->ev, fd, events, event_once_cb, eonce); | |||
555 | } else { | |||
556 | /* Bad event combination */ | |||
557 | free(eonce); | |||
558 | return (-1); | |||
559 | } | |||
560 | ||||
561 | res = event_base_set(base, &eonce->ev); | |||
562 | if (res == 0) | |||
563 | res = event_add(&eonce->ev, tv); | |||
564 | if (res != 0) { | |||
565 | free(eonce); | |||
566 | return (res); | |||
567 | } | |||
568 | ||||
569 | return (0); | |||
570 | } | |||
571 | ||||
572 | void | |||
573 | event_set(struct event *ev, int fd, short events, | |||
574 | void (*callback)(int, short, void *), void *arg) | |||
575 | { | |||
576 | /* Take the current base - caller needs to set the real base later */ | |||
577 | ev->ev_base = current_base; | |||
578 | ||||
579 | ev->ev_callback = callback; | |||
580 | ev->ev_arg = arg; | |||
581 | ev->ev_fd = fd; | |||
582 | ev->ev_events = events; | |||
583 | ev->ev_res = 0; | |||
584 | ev->ev_flags = EVLIST_INIT0x80; | |||
585 | ev->ev_ncalls = 0; | |||
586 | ev->ev_pncalls = NULL((void *)0); | |||
587 | ||||
588 | min_heap_elem_init(ev); | |||
589 | ||||
590 | /* by default, we put new events into the middle priority */ | |||
591 | if(current_base) | |||
592 | ev->ev_pri = current_base->nactivequeues/2; | |||
593 | } | |||
594 | ||||
595 | int | |||
596 | event_base_set(struct event_base *base, struct event *ev) | |||
597 | { | |||
598 | /* Only innocent events may be assigned to a different base */ | |||
599 | if (ev->ev_flags != EVLIST_INIT0x80) | |||
600 | return (-1); | |||
601 | ||||
602 | ev->ev_base = base; | |||
603 | ev->ev_pri = base->nactivequeues/2; | |||
604 | ||||
605 | return (0); | |||
606 | } | |||
607 | ||||
608 | /* | |||
609 | * Set's the priority of an event - if an event is already scheduled | |||
610 | * changing the priority is going to fail. | |||
611 | */ | |||
612 | ||||
613 | int | |||
614 | event_priority_set(struct event *ev, int pri) | |||
615 | { | |||
616 | if (ev->ev_flags & EVLIST_ACTIVE0x08) | |||
617 | return (-1); | |||
618 | if (pri < 0 || pri >= ev->ev_base->nactivequeues) | |||
619 | return (-1); | |||
620 | ||||
621 | ev->ev_pri = pri; | |||
622 | ||||
623 | return (0); | |||
624 | } | |||
625 | ||||
626 | /* | |||
627 | * Checks if a specific event is pending or scheduled. | |||
628 | */ | |||
629 | ||||
630 | int | |||
631 | event_pending(struct event *ev, short event, struct timeval *tv) | |||
632 | { | |||
633 | struct timeval now, res; | |||
634 | int flags = 0; | |||
635 | ||||
636 | if (ev->ev_flags & EVLIST_INSERTED0x02) | |||
637 | flags |= (ev->ev_events & (EV_READ0x02|EV_WRITE0x04|EV_SIGNAL0x08)); | |||
638 | if (ev->ev_flags & EVLIST_ACTIVE0x08) | |||
639 | flags |= ev->ev_res; | |||
640 | if (ev->ev_flags & EVLIST_TIMEOUT0x01) | |||
641 | flags |= EV_TIMEOUT0x01; | |||
642 | ||||
643 | event &= (EV_TIMEOUT0x01|EV_READ0x02|EV_WRITE0x04|EV_SIGNAL0x08); | |||
644 | ||||
645 | /* See if there is a timeout that we should report */ | |||
646 | if (tv != NULL((void *)0) && (flags & event & EV_TIMEOUT0x01)) { | |||
647 | gettime(ev->ev_base, &now); | |||
648 | timersub(&ev->ev_timeout, &now, &res)do { (&res)->tv_sec = (&ev->ev_timeout)->tv_sec - (&now)->tv_sec; (&res)->tv_usec = (&ev-> ev_timeout)->tv_usec - (&now)->tv_usec; if ((&res )->tv_usec < 0) { (&res)->tv_sec--; (&res)-> tv_usec += 1000000; } } while (0); | |||
649 | /* correctly remap to real time */ | |||
650 | gettimeofday(&now, NULL((void *)0)); | |||
651 | timeradd(&now, &res, tv)do { (tv)->tv_sec = (&now)->tv_sec + (&res)-> tv_sec; (tv)->tv_usec = (&now)->tv_usec + (&res )->tv_usec; if ((tv)->tv_usec >= 1000000) { (tv)-> tv_sec++; (tv)->tv_usec -= 1000000; } } while (0); | |||
652 | } | |||
653 | ||||
654 | return (flags & event); | |||
655 | } | |||
656 | ||||
657 | int | |||
658 | event_add(struct event *ev, const struct timeval *tv) | |||
659 | { | |||
660 | struct event_base *base = ev->ev_base; | |||
661 | const struct eventop *evsel = base->evsel; | |||
662 | void *evbase = base->evbase; | |||
663 | int res = 0; | |||
664 | ||||
665 | event_debug((do {;} while (0) | |||
666 | "event_add: event: %p, %s%s%scall %p",do {;} while (0) | |||
667 | ev,do {;} while (0) | |||
668 | ev->ev_events & EV_READ ? "EV_READ " : " ",do {;} while (0) | |||
669 | ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",do {;} while (0) | |||
670 | tv ? "EV_TIMEOUT " : " ",do {;} while (0) | |||
671 | ev->ev_callback))do {;} while (0); | |||
672 | ||||
673 | assert(!(ev->ev_flags & ~EVLIST_ALL))((void)0); | |||
674 | ||||
675 | /* | |||
676 | * prepare for timeout insertion further below, if we get a | |||
677 | * failure on any step, we should not change any state. | |||
678 | */ | |||
679 | if (tv != NULL((void *)0) && !(ev->ev_flags & EVLIST_TIMEOUT0x01)) { | |||
680 | if (min_heap_reserve(&base->timeheap, | |||
681 | 1 + min_heap_size(&base->timeheap)) == -1) | |||
682 | return (-1); /* ENOMEM == errno */ | |||
683 | } | |||
684 | ||||
685 | if ((ev->ev_events & (EV_READ0x02|EV_WRITE0x04|EV_SIGNAL0x08)) && | |||
686 | !(ev->ev_flags & (EVLIST_INSERTED0x02|EVLIST_ACTIVE0x08))) { | |||
687 | res = evsel->add(evbase, ev); | |||
688 | if (res != -1) | |||
689 | event_queue_insert(base, ev, EVLIST_INSERTED0x02); | |||
690 | } | |||
691 | ||||
692 | /* | |||
693 | * we should change the timeout state only if the previous event | |||
694 | * addition succeeded. | |||
695 | */ | |||
696 | if (res != -1 && tv != NULL((void *)0)) { | |||
697 | struct timeval now; | |||
698 | ||||
699 | /* | |||
700 | * we already reserved memory above for the case where we | |||
701 | * are not replacing an existing timeout. | |||
702 | */ | |||
703 | if (ev->ev_flags & EVLIST_TIMEOUT0x01) | |||
704 | event_queue_remove(base, ev, EVLIST_TIMEOUT0x01); | |||
705 | ||||
706 | /* Check if it is active due to a timeout. Rescheduling | |||
707 | * this timeout before the callback can be executed | |||
708 | * removes it from the active list. */ | |||
709 | if ((ev->ev_flags & EVLIST_ACTIVE0x08) && | |||
710 | (ev->ev_res & EV_TIMEOUT0x01)) { | |||
711 | /* See if we are just active executing this | |||
712 | * event in a loop | |||
713 | */ | |||
714 | if (ev->ev_ncalls && ev->ev_pncalls) { | |||
715 | /* Abort loop */ | |||
716 | *ev->ev_pncalls = 0; | |||
717 | } | |||
718 | ||||
719 | event_queue_remove(base, ev, EVLIST_ACTIVE0x08); | |||
720 | } | |||
721 | ||||
722 | gettime(base, &now); | |||
723 | timeradd(&now, tv, &ev->ev_timeout)do { (&ev->ev_timeout)->tv_sec = (&now)->tv_sec + (tv)->tv_sec; (&ev->ev_timeout)->tv_usec = (& now)->tv_usec + (tv)->tv_usec; if ((&ev->ev_timeout )->tv_usec >= 1000000) { (&ev->ev_timeout)->tv_sec ++; (&ev->ev_timeout)->tv_usec -= 1000000; } } while (0); | |||
724 | ||||
725 | event_debug((do {;} while (0) | |||
726 | "event_add: timeout in %lld seconds, call %p",do {;} while (0) | |||
727 | (long long)tv->tv_sec, ev->ev_callback))do {;} while (0); | |||
728 | ||||
729 | event_queue_insert(base, ev, EVLIST_TIMEOUT0x01); | |||
730 | } | |||
731 | ||||
732 | return (res); | |||
733 | } | |||
734 | ||||
735 | int | |||
736 | event_del(struct event *ev) | |||
737 | { | |||
738 | struct event_base *base; | |||
739 | const struct eventop *evsel; | |||
740 | void *evbase; | |||
741 | ||||
742 | event_debug(("event_del: %p, callback %p",do {;} while (0) | |||
743 | ev, ev->ev_callback))do {;} while (0); | |||
744 | ||||
745 | /* An event without a base has not been added */ | |||
746 | if (ev->ev_base == NULL((void *)0)) | |||
747 | return (-1); | |||
748 | ||||
749 | base = ev->ev_base; | |||
750 | evsel = base->evsel; | |||
751 | evbase = base->evbase; | |||
752 | ||||
753 | assert(!(ev->ev_flags & ~EVLIST_ALL))((void)0); | |||
754 | ||||
755 | /* See if we are just active executing this event in a loop */ | |||
756 | if (ev->ev_ncalls && ev->ev_pncalls) { | |||
757 | /* Abort loop */ | |||
758 | *ev->ev_pncalls = 0; | |||
759 | } | |||
760 | ||||
761 | if (ev->ev_flags & EVLIST_TIMEOUT0x01) | |||
762 | event_queue_remove(base, ev, EVLIST_TIMEOUT0x01); | |||
763 | ||||
764 | if (ev->ev_flags & EVLIST_ACTIVE0x08) | |||
765 | event_queue_remove(base, ev, EVLIST_ACTIVE0x08); | |||
766 | ||||
767 | if (ev->ev_flags & EVLIST_INSERTED0x02) { | |||
768 | event_queue_remove(base, ev, EVLIST_INSERTED0x02); | |||
769 | return (evsel->del(evbase, ev)); | |||
770 | } | |||
771 | ||||
772 | return (0); | |||
773 | } | |||
774 | ||||
775 | void | |||
776 | event_active(struct event *ev, int res, short ncalls) | |||
777 | { | |||
778 | /* We get different kinds of events, add them together */ | |||
779 | if (ev->ev_flags & EVLIST_ACTIVE0x08) { | |||
780 | ev->ev_res |= res; | |||
781 | return; | |||
782 | } | |||
783 | ||||
784 | ev->ev_res = res; | |||
785 | ev->ev_ncalls = ncalls; | |||
786 | ev->ev_pncalls = NULL((void *)0); | |||
787 | event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE0x08); | |||
788 | } | |||
789 | ||||
790 | static int | |||
791 | timeout_next(struct event_base *base, struct timeval **tv_p) | |||
792 | { | |||
793 | struct timeval now; | |||
794 | struct event *ev; | |||
795 | struct timeval *tv = *tv_p; | |||
796 | ||||
797 | if ((ev = min_heap_top(&base->timeheap)) == NULL((void *)0)) { | |||
798 | /* if no time-based events are active wait for I/O */ | |||
799 | *tv_p = NULL((void *)0); | |||
800 | return (0); | |||
801 | } | |||
802 | ||||
803 | gettime(base, &now); | |||
804 | ||||
805 | if (timercmp(&ev->ev_timeout, &now, <=)(((&ev->ev_timeout)->tv_sec == (&now)->tv_sec ) ? ((&ev->ev_timeout)->tv_usec <= (&now)-> tv_usec) : ((&ev->ev_timeout)->tv_sec <= (&now )->tv_sec))) { | |||
806 | timerclear(tv)(tv)->tv_sec = (tv)->tv_usec = 0; | |||
807 | return (0); | |||
808 | } | |||
809 | ||||
810 | timersub(&ev->ev_timeout, &now, tv)do { (tv)->tv_sec = (&ev->ev_timeout)->tv_sec - ( &now)->tv_sec; (tv)->tv_usec = (&ev->ev_timeout )->tv_usec - (&now)->tv_usec; if ((tv)->tv_usec < 0) { (tv)->tv_sec--; (tv)->tv_usec += 1000000; } } while (0); | |||
811 | ||||
812 | assert(tv->tv_sec >= 0)((void)0); | |||
813 | assert(tv->tv_usec >= 0)((void)0); | |||
814 | ||||
815 | event_debug(("timeout_next: in %lld seconds", (long long)tv->tv_sec))do {;} while (0); | |||
816 | return (0); | |||
817 | } | |||
818 | ||||
819 | void | |||
820 | timeout_process(struct event_base *base) | |||
821 | { | |||
822 | struct timeval now; | |||
823 | struct event *ev; | |||
824 | ||||
825 | if (min_heap_empty(&base->timeheap)) | |||
826 | return; | |||
827 | ||||
828 | gettime(base, &now); | |||
829 | ||||
830 | while ((ev = min_heap_top(&base->timeheap))) { | |||
831 | if (timercmp(&ev->ev_timeout, &now, >)(((&ev->ev_timeout)->tv_sec == (&now)->tv_sec ) ? ((&ev->ev_timeout)->tv_usec > (&now)-> tv_usec) : ((&ev->ev_timeout)->tv_sec > (&now )->tv_sec))) | |||
832 | break; | |||
833 | ||||
834 | /* delete this event from the I/O queues */ | |||
835 | event_del(ev); | |||
836 | ||||
837 | event_debug(("timeout_process: call %p",do {;} while (0) | |||
838 | ev->ev_callback))do {;} while (0); | |||
839 | event_active(ev, EV_TIMEOUT0x01, 1); | |||
840 | } | |||
841 | } | |||
842 | ||||
843 | void | |||
844 | event_queue_remove(struct event_base *base, struct event *ev, int queue) | |||
845 | { | |||
846 | if (!(ev->ev_flags & queue)) | |||
847 | event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, | |||
848 | ev, ev->ev_fd, queue); | |||
849 | ||||
850 | if (~ev->ev_flags & EVLIST_INTERNAL0x10) | |||
851 | base->event_count--; | |||
852 | ||||
853 | ev->ev_flags &= ~queue; | |||
854 | switch (queue) { | |||
855 | case EVLIST_INSERTED0x02: | |||
856 | TAILQ_REMOVE(&base->eventqueue, ev, ev_next)do { if (((ev)->ev_next.tqe_next) != ((void *)0)) (ev)-> ev_next.tqe_next->ev_next.tqe_prev = (ev)->ev_next.tqe_prev ; else (&base->eventqueue)->tqh_last = (ev)->ev_next .tqe_prev; *(ev)->ev_next.tqe_prev = (ev)->ev_next.tqe_next ; ; ; } while (0); | |||
857 | break; | |||
858 | case EVLIST_ACTIVE0x08: | |||
859 | base->event_count_active--; | |||
860 | TAILQ_REMOVE(base->activequeues[ev->ev_pri],do { if (((ev)->ev_active_next.tqe_next) != ((void *)0)) ( ev)->ev_active_next.tqe_next->ev_active_next.tqe_prev = (ev)->ev_active_next.tqe_prev; else (base->activequeues [ev->ev_pri])->tqh_last = (ev)->ev_active_next.tqe_prev ; *(ev)->ev_active_next.tqe_prev = (ev)->ev_active_next .tqe_next; ; ; } while (0) | |||
861 | ev, ev_active_next)do { if (((ev)->ev_active_next.tqe_next) != ((void *)0)) ( ev)->ev_active_next.tqe_next->ev_active_next.tqe_prev = (ev)->ev_active_next.tqe_prev; else (base->activequeues [ev->ev_pri])->tqh_last = (ev)->ev_active_next.tqe_prev ; *(ev)->ev_active_next.tqe_prev = (ev)->ev_active_next .tqe_next; ; ; } while (0); | |||
862 | break; | |||
863 | case EVLIST_TIMEOUT0x01: | |||
864 | min_heap_erase(&base->timeheap, ev); | |||
865 | break; | |||
866 | default: | |||
867 | event_errx(1, "%s: unknown queue %x", __func__, queue); | |||
868 | } | |||
869 | } | |||
870 | ||||
871 | void | |||
872 | event_queue_insert(struct event_base *base, struct event *ev, int queue) | |||
873 | { | |||
874 | if (ev->ev_flags & queue) { | |||
875 | /* Double insertion is possible for active events */ | |||
876 | if (queue & EVLIST_ACTIVE0x08) | |||
877 | return; | |||
878 | ||||
879 | event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, | |||
880 | ev, ev->ev_fd, queue); | |||
881 | } | |||
882 | ||||
883 | if (~ev->ev_flags & EVLIST_INTERNAL0x10) | |||
884 | base->event_count++; | |||
885 | ||||
886 | ev->ev_flags |= queue; | |||
887 | switch (queue) { | |||
888 | case EVLIST_INSERTED0x02: | |||
889 | TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next)do { (ev)->ev_next.tqe_next = ((void *)0); (ev)->ev_next .tqe_prev = (&base->eventqueue)->tqh_last; *(&base ->eventqueue)->tqh_last = (ev); (&base->eventqueue )->tqh_last = &(ev)->ev_next.tqe_next; } while (0); | |||
890 | break; | |||
891 | case EVLIST_ACTIVE0x08: | |||
892 | base->event_count_active++; | |||
893 | TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],do { (ev)->ev_active_next.tqe_next = ((void *)0); (ev)-> ev_active_next.tqe_prev = (base->activequeues[ev->ev_pri ])->tqh_last; *(base->activequeues[ev->ev_pri])-> tqh_last = (ev); (base->activequeues[ev->ev_pri])->tqh_last = &(ev)->ev_active_next.tqe_next; } while (0) | |||
894 | ev,ev_active_next)do { (ev)->ev_active_next.tqe_next = ((void *)0); (ev)-> ev_active_next.tqe_prev = (base->activequeues[ev->ev_pri ])->tqh_last; *(base->activequeues[ev->ev_pri])-> tqh_last = (ev); (base->activequeues[ev->ev_pri])->tqh_last = &(ev)->ev_active_next.tqe_next; } while (0); | |||
895 | break; | |||
896 | case EVLIST_TIMEOUT0x01: { | |||
897 | min_heap_push(&base->timeheap, ev); | |||
898 | break; | |||
899 | } | |||
900 | default: | |||
901 | event_errx(1, "%s: unknown queue %x", __func__, queue); | |||
902 | } | |||
903 | } | |||
904 | ||||
905 | /* Functions for debugging */ | |||
906 | ||||
907 | const char * | |||
908 | event_get_version(void) | |||
909 | { | |||
910 | return (_EVENT_VERSION"1.4.15-stable"); | |||
911 | } | |||
912 | ||||
913 | /* | |||
914 | * No thread-safe interface needed - the information should be the same | |||
915 | * for all threads. | |||
916 | */ | |||
917 | ||||
918 | const char * | |||
919 | event_get_method(void) | |||
920 | { | |||
921 | return (current_base->evsel->name); | |||
922 | } | |||
923 | ||||
924 | ||||
925 | /* | |||
926 | * Libevent glue for ASR. | |||
927 | */ | |||
928 | struct event_asr { | |||
929 | struct event ev; | |||
930 | struct asr_query *async; | |||
931 | void (*cb)(struct asr_result *, void *); | |||
932 | void *arg; | |||
933 | }; | |||
934 | ||||
935 | static void | |||
936 | event_asr_dispatch(int fd __attribute__((__unused__)), | |||
937 | short ev __attribute__((__unused__)), void *arg) | |||
938 | { | |||
939 | struct event_asr *eva = arg; | |||
940 | struct asr_result ar; | |||
941 | struct timeval tv; | |||
942 | ||||
943 | event_del(&eva->ev); | |||
944 | ||||
945 | if (asr_run(eva->async, &ar)) { | |||
946 | eva->cb(&ar, eva->arg); | |||
947 | free(eva); | |||
948 | } else { | |||
949 | event_set(&eva->ev, ar.ar_fd, | |||
950 | ar.ar_cond == ASR_WANT_READ1 ? EV_READ0x02 : EV_WRITE0x04, | |||
951 | event_asr_dispatch, eva); | |||
952 | tv.tv_sec = ar.ar_timeout / 1000; | |||
953 | tv.tv_usec = (ar.ar_timeout % 1000) * 1000; | |||
954 | event_add(&eva->ev, &tv); | |||
955 | } | |||
956 | } | |||
957 | ||||
958 | struct event_asr * | |||
959 | event_asr_run(struct asr_query *async, void (*cb)(struct asr_result *, void *), | |||
960 | void *arg) | |||
961 | { | |||
962 | struct event_asr *eva; | |||
963 | struct timeval tv; | |||
964 | ||||
965 | eva = calloc(1, sizeof *eva); | |||
966 | if (eva == NULL((void *)0)) | |||
967 | return (NULL((void *)0)); | |||
968 | eva->async = async; | |||
969 | eva->cb = cb; | |||
970 | eva->arg = arg; | |||
971 | tv.tv_sec = 0; | |||
972 | tv.tv_usec = 0; | |||
973 | evtimer_set(&eva->ev, event_asr_dispatch, eva)event_set(&eva->ev, -1, 0, event_asr_dispatch, eva); | |||
974 | evtimer_add(&eva->ev, &tv)event_add(&eva->ev, &tv); | |||
975 | return (eva); | |||
976 | } | |||
977 | ||||
978 | void | |||
979 | event_asr_abort(struct event_asr *eva) | |||
980 | { | |||
981 | asr_abort(eva->async); | |||
982 | event_del(&eva->ev); | |||
983 | free(eva); | |||
984 | } |