Bug Summary

File:src/lib/librthread/rthread.c
Warning:line 296, column 2
Value stored to 'e' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name rthread.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/lib/librthread/obj -resource-dir /usr/local/lib/clang/13.0.0 -include namespace.h -I /usr/src/lib/librthread -I /usr/src/lib/librthread/../libc/arch/amd64 -I /usr/src/lib/librthread/../libc/include -D FUTEX -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/lib/librthread/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/lib/librthread/rthread.c
1/* $OpenBSD: rthread.c,v 1.99 2017/11/04 22:53:57 jca Exp $ */
2/*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18/*
19 * The heart of rthreads. Basic functions like creating and joining
20 * threads.
21 */
22
23#include <sys/types.h>
24#ifndef NO_PIC
25#include <elf.h>
26#pragma weak _DYNAMIC
27#endif
28
29#include <stdlib.h>
30#include <unistd.h>
31#include <signal.h>
32#include <stdio.h>
33#include <string.h>
34#include <errno(*__errno()).h>
35#include <dlfcn.h>
36#include <tib.h>
37
38#include <pthread.h>
39
40#include "cancel.h" /* in libc/include */
41#include "rthread.h"
42#include "rthread_cb.h"
43
44/*
45 * Call nonstandard functions via names in the reserved namespace:
46 * dlctl() -> _dlctl()
47 * getthrid -> _thread_sys_getthrid
48 */
49typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
50REDIRECT_SYSCALL(getthrid)typeof(getthrid) getthrid asm("_thread_sys_""getthrid");
51
52/* weak stub to be overriden by ld.so */
53int dlctl(void *handle, int cmd, void *data) { return 0; }
54
55/*
56 * libc's signal wrappers hide SIGTHR; we need to call the real syscall
57 * stubs _thread_sys_* directly.
58 */
59REDIRECT_SYSCALL(sigaction)typeof(sigaction) sigaction asm("_thread_sys_""sigaction");
60REDIRECT_SYSCALL(sigprocmask)typeof(sigprocmask) sigprocmask asm("_thread_sys_""sigprocmask"
)
;
61REDIRECT_SYSCALL(thrkill)typeof(thrkill) thrkill asm("_thread_sys_""thrkill");
62
63static int concurrency_level; /* not used */
64
65int _threads_ready;
66int _post_threaded;
67size_t _thread_pagesize;
68struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list){ ((void*)0) };
69_atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED(0);
70static struct pthread_queue _thread_gc_list
71 = TAILQ_HEAD_INITIALIZER(_thread_gc_list){ ((void*)0), &(_thread_gc_list).tqh_first };
72static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED(0);
73static struct pthread _initial_thread;
74
75struct pthread_attr _rthread_attr_default = {
76 .stack_addr = NULL((void*)0),
77 .stack_size = RTHREAD_STACK_SIZE_DEF(512 * 1024),
78/* .guard_size set in _rthread_init */
79 .detach_state = PTHREAD_CREATE_JOINABLE0,
80 .contention_scope = PTHREAD_SCOPE_SYSTEM0x2,
81 .sched_policy = SCHED_OTHER2,
82 .sched_param = { .sched_priority = 0 },
83 .sched_inherit = PTHREAD_INHERIT_SCHED0x4,
84};
85
86/*
87 * internal support functions
88 */
89
90static void
91_rthread_start(void *v)
92{
93 pthread_t thread = v;
94 void *retval;
95
96 retval = thread->fn(thread->arg);
97 pthread_exit(retval);
98}
99
100static void
101sigthr_handler(__unused__attribute__((__unused__)) int sig)
102{
103 struct tib *tib = TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
;
104 pthread_t self = tib->tib_thread;
105
106 /*
107 * Do nothing unless
108 * 1) pthread_cancel() has been called on this thread,
109 * 2) cancelation is enabled for it, and
110 * 3) we're not already in cancelation processing
111 */
112 if (!tib->tib_canceled || tib->tib_cantcancel)
113 return;
114
115 /*
116 * If delaying cancels inside complex ops (pthread_cond_wait,
117 * pthread_join, etc), just mark that this has happened to
118 * prevent a race with going to sleep
119 */
120 if (tib->tib_cancel_point & CANCEL_POINT_DELAYED2) {
121 self->delayed_cancel = 1;
122 return;
123 }
124
125 /*
126 * otherwise, if in a cancel point or async cancels are
127 * enabled, then exit
128 */
129 if (tib->tib_cancel_point ||
130 (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL0x001))
131 pthread_exit(PTHREAD_CANCELED((void *) 1));
132}
133
134
135/*
136 * A few basic callbacks for libc. The first couple are only used
137 * on archs where there isn't a fast TCB_GET()
138 */
139#ifndef TCB_HAVE_MD_GET1
140static int *
141multi_threaded_errnoptr(void)
142{
143 return (&TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
->tib_errno);
144}
145
146static void *
147multi_threaded_tcb(void)
148{
149 return (TCB_GET()__amd64_read_tcb(0));
150}
151#endif /* TCB_HAVE_MD_GET */
152
153static void
154_rthread_free(pthread_t thread)
155{
156 _spinlock(&_thread_gc_lock);
157 TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting)do { (thread)->waiting.tqe_next = ((void*)0); (thread)->
waiting.tqe_prev = (&_thread_gc_list)->tqh_last; *(&
_thread_gc_list)->tqh_last = (thread); (&_thread_gc_list
)->tqh_last = &(thread)->waiting.tqe_next; } while (
0)
;
158 _spinunlock(&_thread_gc_lock);
159}
160
161static void
162_thread_release(pthread_t thread)
163{
164 _spinlock(&_thread_lock);
165 LIST_REMOVE(thread, threads)do { if ((thread)->threads.le_next != ((void*)0)) (thread)
->threads.le_next->threads.le_prev = (thread)->threads
.le_prev; *(thread)->threads.le_prev = (thread)->threads
.le_next; ; ; } while (0)
;
166 _spinunlock(&_thread_lock);
167
168 _spinlock(&thread->flags_lock);
169 if (thread->flags & THREAD_DETACHED0x002) {
170 _spinunlock(&thread->flags_lock);
171 _rthread_free(thread);
172 } else {
173 thread->flags |= THREAD_DONE0x001;
174 _spinunlock(&thread->flags_lock);
175 _sem_post(&thread->donesem);
176 }
177}
178
179static void
180_thread_key_zero(int key)
181{
182 pthread_t thread;
183 struct rthread_storage *rs;
184
185 LIST_FOREACH(thread, &_thread_list, threads)for((thread) = ((&_thread_list)->lh_first); (thread)!=
((void*)0); (thread) = ((thread)->threads.le_next))
{
186 for (rs = thread->local_storage; rs; rs = rs->next) {
187 if (rs->keyid == key)
188 rs->data = NULL((void*)0);
189 }
190 }
191}
192
193void
194_rthread_init(void)
195{
196 pthread_t thread = pthread_self();
197 struct sigaction sa;
198
199 if (_threads_ready)
200 return;
201
202 LIST_INSERT_HEAD(&_thread_list, thread, threads)do { if (((thread)->threads.le_next = (&_thread_list)->
lh_first) != ((void*)0)) (&_thread_list)->lh_first->
threads.le_prev = &(thread)->threads.le_next; (&_thread_list
)->lh_first = (thread); (thread)->threads.le_prev = &
(&_thread_list)->lh_first; } while (0)
;
203
204 _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE28);
205 _rthread_attr_default.guard_size = _thread_pagesize;
206 thread->attr = _rthread_attr_default;
207
208 /* get libc to start using our callbacks */
209 {
210 struct thread_callbacks cb = { 0 };
211
212#ifndef TCB_HAVE_MD_GET1
213 cb.tc_errnoptr = multi_threaded_errnoptr;
214 cb.tc_tcb = multi_threaded_tcb;
215#endif
216 cb.tc_fork = _thread_fork;
217 cb.tc_vfork = _thread_vfork;
218 cb.tc_thread_release = _thread_release;
219 cb.tc_thread_key_zero = _thread_key_zero;
220 _thread_set_callbacks(&cb, sizeof(cb));
221 }
222
223#ifndef NO_PIC
224 if (_DYNAMIC) {
225 dlctl(NULL((void*)0), DL_SETTHREADLCK2, _rthread_dl_lock);
226 }
227#endif
228
229 /*
230 * Set the handler on the signal used for cancelation and
231 * suspension, and make sure it's unblocked
232 */
233 memset(&sa, 0, sizeof(sa));
234 sigemptyset(&sa.sa_mask);
235 sa.sa_handler__sigaction_u.__sa_handler = sigthr_handler;
236 sigaction(SIGTHR32, &sa, NULL((void*)0));
237 sigaddset(&sa.sa_mask, SIGTHR32);
238 sigprocmask(SIG_UNBLOCK2, &sa.sa_mask, NULL((void*)0));
239
240 _threads_ready = 1;
241
242 _malloc_init(1);
243
244 _rthread_debug(1, "rthread init\n");
245}
246
247static void
248_rthread_reaper(void)
249{
250 pthread_t thread;
251
252restart:
253 _spinlock(&_thread_gc_lock);
254 TAILQ_FOREACH(thread, &_thread_gc_list, waiting)for((thread) = ((&_thread_gc_list)->tqh_first); (thread
) != ((void*)0); (thread) = ((thread)->waiting.tqe_next))
{
255 if (thread->tib->tib_tid != 0)
256 continue;
257 TAILQ_REMOVE(&_thread_gc_list, thread, waiting)do { if (((thread)->waiting.tqe_next) != ((void*)0)) (thread
)->waiting.tqe_next->waiting.tqe_prev = (thread)->waiting
.tqe_prev; else (&_thread_gc_list)->tqh_last = (thread
)->waiting.tqe_prev; *(thread)->waiting.tqe_prev = (thread
)->waiting.tqe_next; ; ; } while (0)
;
258 _spinunlock(&_thread_gc_lock);
259 if (thread != &_initial_thread) {
260 _rthread_debug(3, "rthread reaping %p stack %p\n",
261 (void *)thread, (void *)thread->stack);
262 _rthread_free_stack(thread->stack);
263 _dl_free_tib(thread->tib, sizeof(*thread));
264 } else {
265 /* initial thread isn't part of TIB allocation */
266 _rthread_debug(3, "rthread reaping %p (initial)\n",
267 (void *)thread);
268 _dl_free_tib(thread->tib, 0);
269 }
270 goto restart;
271 }
272 _spinunlock(&_thread_gc_lock);
273}
274
275/*
276 * real pthread functions
277 */
278
279int
280pthread_join(pthread_t thread, void **retval)
281{
282 int e;
283 struct tib *tib = TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
;
284 pthread_t self;
285 PREP_CANCEL_POINT(tib)int _cantcancel = (tib)->tib_cantcancel;
286
287 if (_post_threaded) {
288#define GREATSCOTT"great scott! serious repercussions on future events!\n" "great scott! serious repercussions on future events!\n"
289 write(2, GREATSCOTT"great scott! serious repercussions on future events!\n", sizeof(GREATSCOTT"great scott! serious repercussions on future events!\n") - 1);
290 abort();
291 }
292 if (!_threads_ready)
293 _rthread_init();
294 self = tib->tib_thread;
295
296 e = 0;
Value stored to 'e' is never read
297 ENTER_DELAYED_CANCEL_POINT(tib, self)(self)->delayed_cancel = 0; if (_cantcancel == 0) { (tib)->
tib_cancel_point = (1) ? 2 : 1; if (1) { __asm volatile("":::
"memory"); if (__builtin_expect((((tib)->tib_canceled) != 0
), 0)) _thread_canceled(); } }
;
298 if (thread == NULL((void*)0))
299 e = EINVAL22;
300 else if (thread == self)
301 e = EDEADLK11;
302 else if (thread->flags & THREAD_DETACHED0x002)
303 e = EINVAL22;
304 else if ((e = _sem_wait(&thread->donesem, 0, NULL((void*)0),
305 &self->delayed_cancel)) == 0) {
306 if (retval)
307 *retval = thread->retval;
308
309 /*
310 * We should be the last having a ref to this thread,
311 * but someone stupid or evil might haved detached it;
312 * in that case the thread will clean up itself
313 */
314 if ((thread->flags & THREAD_DETACHED0x002) == 0)
315 _rthread_free(thread);
316 }
317
318 LEAVE_CANCEL_POINT_INNER(tib, e)if (_cantcancel == 0) { (tib)->tib_cancel_point = 0; if (e
) { __asm volatile("":::"memory"); if (__builtin_expect((((tib
)->tib_canceled) != 0), 0)) _thread_canceled(); } }
;
319 _rthread_reaper();
320 return (e);
321}
322
323int
324pthread_detach(pthread_t thread)
325{
326 int rc = 0;
327
328 _spinlock(&thread->flags_lock);
329 if (thread->flags & THREAD_DETACHED0x002) {
330 rc = EINVAL22;
331 _spinunlock(&thread->flags_lock);
332 } else if (thread->flags & THREAD_DONE0x001) {
333 _spinunlock(&thread->flags_lock);
334 _rthread_free(thread);
335 } else {
336 thread->flags |= THREAD_DETACHED0x002;
337 _spinunlock(&thread->flags_lock);
338 }
339 _rthread_reaper();
340 return (rc);
341}
342
343int
344pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
345 void *(*start_routine)(void *), void *arg)
346{
347 extern int __isthreaded;
348 struct tib *tib;
349 pthread_t thread;
350 struct __tfork param;
351 int rc;
352
353 if (!_threads_ready)
354 _rthread_init();
355
356 _rthread_reaper();
357
358 tib = _dl_allocate_tib(sizeof(*thread));
359 if (tib == NULL((void*)0))
360 return (ENOMEM12);
361 thread = tib->tib_thread;
362 memset(thread, 0, sizeof(*thread));
363 thread->tib = tib;
364 thread->donesem.lock = _SPINLOCK_UNLOCKED(0);
365 thread->flags_lock = _SPINLOCK_UNLOCKED(0);
366 thread->fn = start_routine;
367 thread->arg = arg;
368 tib->tib_tid = -1;
369
370 thread->attr = attr != NULL((void*)0) ? *(*attr) : _rthread_attr_default;
371 if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED0x4) {
372 pthread_t self = pthread_self();
373
374 thread->attr.sched_policy = self->attr.sched_policy;
375 thread->attr.sched_param = self->attr.sched_param;
376 }
377 if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED0x1)
378 thread->flags |= THREAD_DETACHED0x002;
379
380 thread->stack = _rthread_alloc_stack(thread);
381 if (!thread->stack) {
382 rc = errno(*__errno());
383 goto fail1;
384 }
385
386 param.tf_tcb = TIB_TO_TCB(tib)((char *)(tib) + (__builtin_offsetof(struct tib, __tib_self) +
0))
;
387 param.tf_tid = &tib->tib_tid;
388 param.tf_stack = thread->stack->sp;
389
390 _spinlock(&_thread_lock);
391 LIST_INSERT_HEAD(&_thread_list, thread, threads)do { if (((thread)->threads.le_next = (&_thread_list)->
lh_first) != ((void*)0)) (&_thread_list)->lh_first->
threads.le_prev = &(thread)->threads.le_next; (&_thread_list
)->lh_first = (thread); (thread)->threads.le_prev = &
(&_thread_list)->lh_first; } while (0)
;
392 _spinunlock(&_thread_lock);
393
394 /* we're going to be multi-threaded real soon now */
395 __isthreaded = 1;
396 rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
397 if (rc != -1) {
398 /* success */
399 *threadp = thread;
400 return (0);
401 }
402
403 rc = errno(*__errno());
404
405 _spinlock(&_thread_lock);
406 LIST_REMOVE(thread, threads)do { if ((thread)->threads.le_next != ((void*)0)) (thread)
->threads.le_next->threads.le_prev = (thread)->threads
.le_prev; *(thread)->threads.le_prev = (thread)->threads
.le_next; ; ; } while (0)
;
407 _spinunlock(&_thread_lock);
408 _rthread_free_stack(thread->stack);
409fail1:
410 _dl_free_tib(tib, sizeof(*thread));
411
412 return (rc);
413}
414
415int
416pthread_kill(pthread_t thread, int sig)
417{
418 struct tib *tib = thread->tib;
419
420 if (sig == SIGTHR32)
421 return (EINVAL22);
422 if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)((char *)(tib) + (__builtin_offsetof(struct tib, __tib_self) +
0))
))
423 return (errno(*__errno()));
424 return (0);
425}
426
427int
428pthread_cancel(pthread_t thread)
429{
430 struct tib *tib = thread->tib;
431 pid_t tid = tib->tib_tid;
432
433 if (tib->tib_canceled == 0 && tid != 0 &&
434 (tib->tib_cantcancel & CANCEL_DYING2) == 0) {
435 tib->tib_canceled = 1;
436
437 if ((tib->tib_cantcancel & CANCEL_DISABLED1) == 0) {
438 thrkill(tid, SIGTHR32, TIB_TO_TCB(tib)((char *)(tib) + (__builtin_offsetof(struct tib, __tib_self) +
0))
);
439 return (0);
440 }
441 }
442 return (0);
443}
444
445void
446pthread_testcancel(void)
447{
448 struct tib *tib = TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
;
449
450 if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED1) == 0)
451 pthread_exit(PTHREAD_CANCELED((void *) 1));
452}
453
454int
455pthread_setcancelstate(int state, int *oldstatep)
456{
457 struct tib *tib = TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
;
458 int oldstate;
459
460 oldstate = tib->tib_cantcancel & CANCEL_DISABLED1 ?
461 PTHREAD_CANCEL_DISABLE1 : PTHREAD_CANCEL_ENABLE0;
462 if (state == PTHREAD_CANCEL_ENABLE0) {
463 tib->tib_cantcancel &= ~CANCEL_DISABLED1;
464 } else if (state == PTHREAD_CANCEL_DISABLE1) {
465 tib->tib_cantcancel |= CANCEL_DISABLED1;
466 } else {
467 return (EINVAL22);
468 }
469 if (oldstatep)
470 *oldstatep = oldstate;
471
472 return (0);
473}
474DEF_STD(pthread_setcancelstate)__asm__(".global " "pthread_setcancelstate" " ; " "pthread_setcancelstate"
" = " "_libpthread_pthread_setcancelstate")
;
475
476int
477pthread_setcanceltype(int type, int *oldtypep)
478{
479 struct tib *tib = TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
;
480 int oldtype;
481
482 oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL0x001 ?
483 PTHREAD_CANCEL_ASYNCHRONOUS2 : PTHREAD_CANCEL_DEFERRED0;
484 if (type == PTHREAD_CANCEL_DEFERRED0) {
485 tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL0x001;
486 } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS2) {
487 tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL0x001;
488 } else {
489 return (EINVAL22);
490 }
491 if (oldtypep)
492 *oldtypep = oldtype;
493
494 return (0);
495}
496
497void
498pthread_cleanup_push(void (*fn)(void *), void *arg)
499{
500 struct rthread_cleanup_fn *clfn;
501 pthread_t self = pthread_self();
502
503 clfn = calloc(1, sizeof(*clfn));
504 if (!clfn)
505 return;
506 clfn->fn = fn;
507 clfn->arg = arg;
508 clfn->next = self->cleanup_fns;
509 self->cleanup_fns = clfn;
510}
511
512void
513pthread_cleanup_pop(int execute)
514{
515 struct rthread_cleanup_fn *clfn;
516 pthread_t self = pthread_self();
517
518 clfn = self->cleanup_fns;
519 if (clfn) {
520 self->cleanup_fns = clfn->next;
521 if (execute)
522 clfn->fn(clfn->arg);
523 free(clfn);
524 }
525}
526
527int
528pthread_getconcurrency(void)
529{
530 return (concurrency_level);
531}
532
533int
534pthread_setconcurrency(int new_level)
535{
536 if (new_level < 0)
537 return (EINVAL22);
538 concurrency_level = new_level;
539 return (0);
540}
541
542/*
543 * compat debug stuff
544 */
545void
546_thread_dump_info(void)
547{
548 pthread_t thread;
549
550 _spinlock(&_thread_lock);
551 LIST_FOREACH(thread, &_thread_list, threads)for((thread) = ((&_thread_list)->lh_first); (thread)!=
((void*)0); (thread) = ((thread)->threads.le_next))
552 printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
553 thread->tib->tib_thread_flags, thread->name);
554 _spinunlock(&_thread_lock);
555}
556
557#ifndef NO_PIC
558/*
559 * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
560 * the function called via atexit() to invoke all destructors. The latter
561 * two call shared-object destructors, which may need to call dlclose(),
562 * so this lock needs to permit recursive locking.
563 * The specific code here was extracted from _rthread_mutex_lock() and
564 * pthread_mutex_unlock() and simplified to use the static variables.
565 */
566void
567_rthread_dl_lock(int what)
568{
569 static _atomic_lock_t lock = _SPINLOCK_UNLOCKED(0);
570 static pthread_t owner = NULL((void*)0);
571 static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers){ ((void*)0), &(lockers).tqh_first };
572 static int count = 0;
573
574 if (what == 0) {
575 pthread_t self = pthread_self();
576
577 /* lock, possibly recursive */
578 _spinlock(&lock);
579 if (owner == NULL((void*)0)) {
580 owner = self;
581 } else if (owner != self) {
582 TAILQ_INSERT_TAIL(&lockers, self, waiting)do { (self)->waiting.tqe_next = ((void*)0); (self)->waiting
.tqe_prev = (&lockers)->tqh_last; *(&lockers)->
tqh_last = (self); (&lockers)->tqh_last = &(self)->
waiting.tqe_next; } while (0)
;
583 while (owner != self) {
584 __thrsleep(self, 0, NULL((void*)0), &lock, NULL((void*)0));
585 _spinlock(&lock);
586 }
587 }
588 count++;
589 _spinunlock(&lock);
590 } else if (what == 1) {
591 /* unlock, possibly recursive */
592 if (--count == 0) {
593 pthread_t next;
594
595 _spinlock(&lock);
596 owner = next = TAILQ_FIRST(&lockers)((&lockers)->tqh_first);
597 if (next != NULL((void*)0))
598 TAILQ_REMOVE(&lockers, next, waiting)do { if (((next)->waiting.tqe_next) != ((void*)0)) (next)->
waiting.tqe_next->waiting.tqe_prev = (next)->waiting.tqe_prev
; else (&lockers)->tqh_last = (next)->waiting.tqe_prev
; *(next)->waiting.tqe_prev = (next)->waiting.tqe_next;
; ; } while (0)
;
599 _spinunlock(&lock);
600 if (next != NULL((void*)0))
601 __thrwakeup(next, 1);
602 }
603 } else {
604 /* reinit: used in child after fork to clear the queue */
605 lock = _SPINLOCK_UNLOCKED(0);
606 if (--count == 0)
607 owner = NULL((void*)0);
608 TAILQ_INIT(&lockers)do { (&lockers)->tqh_first = ((void*)0); (&lockers
)->tqh_last = &(&lockers)->tqh_first; } while (
0)
;
609 }
610}
611#endif