File: | kern/kern_synch.c |
Warning: | line 460, column 7 Although the value stored to 'sig' is used in the enclosing expression, the value is never actually read from 'sig' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: kern_synch.c,v 1.200 2023/09/13 14:25:49 claudio Exp $ */ |
2 | /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 1982, 1986, 1990, 1991, 1993 |
6 | * The Regents of the University of California. All rights reserved. |
7 | * (c) UNIX System Laboratories, Inc. |
8 | * All or some portions of this file are derived from material licensed |
9 | * to the University of California by American Telephone and Telegraph |
10 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
11 | * the permission of UNIX System Laboratories, Inc. |
12 | * |
13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions |
15 | * are met: |
16 | * 1. Redistributions of source code must retain the above copyright |
17 | * notice, this list of conditions and the following disclaimer. |
18 | * 2. Redistributions in binary form must reproduce the above copyright |
19 | * notice, this list of conditions and the following disclaimer in the |
20 | * documentation and/or other materials provided with the distribution. |
21 | * 3. Neither the name of the University nor the names of its contributors |
22 | * may be used to endorse or promote products derived from this software |
23 | * without specific prior written permission. |
24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
30 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
31 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
32 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
33 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
34 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
35 | * SUCH DAMAGE. |
36 | * |
37 | * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 |
38 | */ |
39 | |
40 | #include <sys/param.h> |
41 | #include <sys/systm.h> |
42 | #include <sys/proc.h> |
43 | #include <sys/kernel.h> |
44 | #include <sys/signalvar.h> |
45 | #include <sys/sched.h> |
46 | #include <sys/timeout.h> |
47 | #include <sys/mount.h> |
48 | #include <sys/syscallargs.h> |
49 | #include <sys/refcnt.h> |
50 | #include <sys/atomic.h> |
51 | #include <sys/tracepoint.h> |
52 | |
53 | #include <ddb/db_output.h> |
54 | |
55 | #include <machine/spinlock.h> |
56 | |
57 | #ifdef DIAGNOSTIC1 |
58 | #include <sys/syslog.h> |
59 | #endif |
60 | |
61 | #ifdef KTRACE1 |
62 | #include <sys/ktrace.h> |
63 | #endif |
64 | |
65 | int sleep_signal_check(void); |
66 | int thrsleep(struct proc *, struct sys___thrsleep_args *); |
67 | int thrsleep_unlock(void *); |
68 | |
69 | /* |
70 | * We're only looking at 7 bits of the address; everything is |
71 | * aligned to 4, lots of things are aligned to greater powers |
72 | * of 2. Shift right by 8, i.e. drop the bottom 256 worth. |
73 | */ |
74 | #define TABLESIZE128 128 |
75 | #define LOOKUP(x)(((long)(x) >> 8) & (128 - 1)) (((long)(x) >> 8) & (TABLESIZE128 - 1)) |
76 | TAILQ_HEAD(slpque,proc)struct slpque { struct proc *tqh_first; struct proc **tqh_last ; } slpque[TABLESIZE128]; |
77 | |
78 | void |
79 | sleep_queue_init(void) |
80 | { |
81 | int i; |
82 | |
83 | for (i = 0; i < TABLESIZE128; i++) |
84 | TAILQ_INIT(&slpque[i])do { (&slpque[i])->tqh_first = ((void *)0); (&slpque [i])->tqh_last = &(&slpque[i])->tqh_first; } while (0); |
85 | } |
86 | |
87 | /* |
88 | * Global sleep channel for threads that do not want to |
89 | * receive wakeup(9) broadcasts. |
90 | */ |
91 | int nowake; |
92 | |
93 | /* |
94 | * During autoconfiguration or after a panic, a sleep will simply |
95 | * lower the priority briefly to allow interrupts, then return. |
96 | * The priority to be used (safepri) is machine-dependent, thus this |
97 | * value is initialized and maintained in the machine-dependent layers. |
98 | * This priority will typically be 0, or the lowest priority |
99 | * that is safe for use on the interrupt stack; it can be made |
100 | * higher to block network software interrupts after panics. |
101 | */ |
102 | extern int safepri; |
103 | |
104 | /* |
105 | * General sleep call. Suspends the current process until a wakeup is |
106 | * performed on the specified identifier. The process will then be made |
107 | * runnable with the specified priority. Sleeps at most timo/hz seconds |
108 | * (0 means no timeout). If pri includes PCATCH flag, signals are checked |
109 | * before and after sleeping, else signals are not checked. Returns 0 if |
110 | * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a |
111 | * signal needs to be delivered, ERESTART is returned if the current system |
112 | * call should be restarted if possible, and EINTR is returned if the system |
113 | * call should be interrupted by the signal (return EINTR). |
114 | */ |
115 | int |
116 | tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) |
117 | { |
118 | #ifdef MULTIPROCESSOR1 |
119 | int hold_count; |
120 | #endif |
121 | |
122 | KASSERT((priority & ~(PRIMASK | PCATCH)) == 0)(((priority & ~(0x0ff | 0x100)) == 0) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/kern_synch.c", 122, "(priority & ~(PRIMASK | PCATCH)) == 0" )); |
123 | KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0)((ident != &nowake || ((priority) & (0x100)) || timo != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 123, "ident != &nowake || ISSET(priority, PCATCH) || timo != 0" )); |
124 | |
125 | #ifdef MULTIPROCESSOR1 |
126 | KASSERT(ident == &nowake || timo || _kernel_lock_held())((ident == &nowake || timo || _kernel_lock_held()) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 126, "ident == &nowake || timo || _kernel_lock_held()") ); |
127 | #endif |
128 | |
129 | #ifdef DDB1 |
130 | if (cold == 2) |
131 | db_stack_dump(); |
132 | #endif |
133 | if (cold || panicstr) { |
134 | int s; |
135 | /* |
136 | * After a panic, or during autoconfiguration, |
137 | * just give interrupts a chance, then just return; |
138 | * don't run any other procs or panic below, |
139 | * in case this is the idle process and already asleep. |
140 | */ |
141 | s = splhigh()splraise(0xd); |
142 | splx(safepri)spllower(safepri); |
143 | #ifdef MULTIPROCESSOR1 |
144 | if (_kernel_lock_held()) { |
145 | hold_count = __mp_release_all(&kernel_lock); |
146 | __mp_acquire_count(&kernel_lock, hold_count); |
147 | } |
148 | #endif |
149 | splx(s)spllower(s); |
150 | return (0); |
151 | } |
152 | |
153 | sleep_setup(ident, priority, wmesg); |
154 | return sleep_finish(timo, 1); |
155 | } |
156 | |
157 | int |
158 | tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, |
159 | uint64_t nsecs) |
160 | { |
161 | uint64_t to_ticks; |
162 | |
163 | if (nsecs == INFSLP0xffffffffffffffffULL) |
164 | return tsleep(ident, priority, wmesg, 0); |
165 | #ifdef DIAGNOSTIC1 |
166 | if (nsecs == 0) { |
167 | log(LOG_WARNING4, |
168 | "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", |
169 | __func__, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_comm, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, |
170 | wmesg); |
171 | } |
172 | #endif |
173 | /* |
174 | * We want to sleep at least nsecs nanoseconds worth of ticks. |
175 | * |
176 | * - Clamp nsecs to prevent arithmetic overflow. |
177 | * |
178 | * - Round nsecs up to account for any nanoseconds that do not |
179 | * divide evenly into tick_nsec, otherwise we'll lose them to |
180 | * integer division in the next step. We add (tick_nsec - 1) |
181 | * to keep from introducing a spurious tick if there are no |
182 | * such nanoseconds, i.e. nsecs % tick_nsec == 0. |
183 | * |
184 | * - Divide the rounded value to a count of ticks. We divide |
185 | * by (tick_nsec + 1) to discard the extra tick introduced if, |
186 | * before rounding, nsecs % tick_nsec == 1. |
187 | * |
188 | * - Finally, add a tick to the result. We need to wait out |
189 | * the current tick before we can begin counting our interval, |
190 | * as we do not know how much time has elapsed since the |
191 | * current tick began. |
192 | */ |
193 | nsecs = MIN(nsecs, UINT64_MAX - tick_nsec)(((nsecs)<(0xffffffffffffffffULL - tick_nsec))?(nsecs):(0xffffffffffffffffULL - tick_nsec)); |
194 | to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; |
195 | if (to_ticks > INT_MAX0x7fffffff) |
196 | to_ticks = INT_MAX0x7fffffff; |
197 | return tsleep(ident, priority, wmesg, (int)to_ticks); |
198 | } |
199 | |
200 | /* |
201 | * Same as tsleep, but if we have a mutex provided, then once we've |
202 | * entered the sleep queue we drop the mutex. After sleeping we re-lock. |
203 | */ |
204 | int |
205 | msleep(const volatile void *ident, struct mutex *mtx, int priority, |
206 | const char *wmesg, int timo) |
207 | { |
208 | int error, spl; |
209 | #ifdef MULTIPROCESSOR1 |
210 | int hold_count; |
211 | #endif |
212 | |
213 | KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0)(((priority & ~(0x0ff | 0x100 | 0x200)) == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c", 213 , "(priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0")); |
214 | KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0)((ident != &nowake || ((priority) & (0x100)) || timo != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 214, "ident != &nowake || ISSET(priority, PCATCH) || timo != 0" )); |
215 | KASSERT(mtx != NULL)((mtx != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 215, "mtx != NULL")); |
216 | |
217 | #ifdef DDB1 |
218 | if (cold == 2) |
219 | db_stack_dump(); |
220 | #endif |
221 | if (cold || panicstr) { |
222 | /* |
223 | * After a panic, or during autoconfiguration, |
224 | * just give interrupts a chance, then just return; |
225 | * don't run any other procs or panic below, |
226 | * in case this is the idle process and already asleep. |
227 | */ |
228 | spl = MUTEX_OLDIPL(mtx)(mtx)->mtx_oldipl; |
229 | MUTEX_OLDIPL(mtx)(mtx)->mtx_oldipl = safepri; |
230 | mtx_leave(mtx); |
231 | #ifdef MULTIPROCESSOR1 |
232 | if (_kernel_lock_held()) { |
233 | hold_count = __mp_release_all(&kernel_lock); |
234 | __mp_acquire_count(&kernel_lock, hold_count); |
235 | } |
236 | #endif |
237 | if ((priority & PNORELOCK0x200) == 0) { |
238 | mtx_enter(mtx); |
239 | MUTEX_OLDIPL(mtx)(mtx)->mtx_oldipl = spl; |
240 | } else |
241 | splx(spl)spllower(spl); |
242 | return (0); |
243 | } |
244 | |
245 | sleep_setup(ident, priority, wmesg); |
246 | |
247 | mtx_leave(mtx); |
248 | /* signal may stop the process, release mutex before that */ |
249 | error = sleep_finish(timo, 1); |
250 | |
251 | if ((priority & PNORELOCK0x200) == 0) |
252 | mtx_enter(mtx); |
253 | |
254 | return error; |
255 | } |
256 | |
257 | int |
258 | msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, |
259 | const char *wmesg, uint64_t nsecs) |
260 | { |
261 | uint64_t to_ticks; |
262 | |
263 | if (nsecs == INFSLP0xffffffffffffffffULL) |
264 | return msleep(ident, mtx, priority, wmesg, 0); |
265 | #ifdef DIAGNOSTIC1 |
266 | if (nsecs == 0) { |
267 | log(LOG_WARNING4, |
268 | "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", |
269 | __func__, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_comm, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, |
270 | wmesg); |
271 | } |
272 | #endif |
273 | nsecs = MIN(nsecs, UINT64_MAX - tick_nsec)(((nsecs)<(0xffffffffffffffffULL - tick_nsec))?(nsecs):(0xffffffffffffffffULL - tick_nsec)); |
274 | to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; |
275 | if (to_ticks > INT_MAX0x7fffffff) |
276 | to_ticks = INT_MAX0x7fffffff; |
277 | return msleep(ident, mtx, priority, wmesg, (int)to_ticks); |
278 | } |
279 | |
280 | /* |
281 | * Same as tsleep, but if we have a rwlock provided, then once we've |
282 | * entered the sleep queue we drop the it. After sleeping we re-lock. |
283 | */ |
284 | int |
285 | rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, |
286 | const char *wmesg, int timo) |
287 | { |
288 | int error, status; |
289 | |
290 | KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0)(((priority & ~(0x0ff | 0x100 | 0x200)) == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c", 290 , "(priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0")); |
291 | KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0)((ident != &nowake || ((priority) & (0x100)) || timo != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 291, "ident != &nowake || ISSET(priority, PCATCH) || timo != 0" )); |
292 | KASSERT(ident != rwl)((ident != rwl) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 292, "ident != rwl")); |
293 | rw_assert_anylock(rwl); |
294 | status = rw_status(rwl); |
295 | |
296 | sleep_setup(ident, priority, wmesg); |
297 | |
298 | rw_exit(rwl); |
299 | /* signal may stop the process, release rwlock before that */ |
300 | error = sleep_finish(timo, 1); |
301 | |
302 | if ((priority & PNORELOCK0x200) == 0) |
303 | rw_enter(rwl, status); |
304 | |
305 | return error; |
306 | } |
307 | |
308 | int |
309 | rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, |
310 | const char *wmesg, uint64_t nsecs) |
311 | { |
312 | uint64_t to_ticks; |
313 | |
314 | if (nsecs == INFSLP0xffffffffffffffffULL) |
315 | return rwsleep(ident, rwl, priority, wmesg, 0); |
316 | #ifdef DIAGNOSTIC1 |
317 | if (nsecs == 0) { |
318 | log(LOG_WARNING4, |
319 | "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", |
320 | __func__, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_comm, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, |
321 | wmesg); |
322 | } |
323 | #endif |
324 | nsecs = MIN(nsecs, UINT64_MAX - tick_nsec)(((nsecs)<(0xffffffffffffffffULL - tick_nsec))?(nsecs):(0xffffffffffffffffULL - tick_nsec)); |
325 | to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; |
326 | if (to_ticks > INT_MAX0x7fffffff) |
327 | to_ticks = INT_MAX0x7fffffff; |
328 | return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); |
329 | } |
330 | |
331 | void |
332 | sleep_setup(const volatile void *ident, int prio, const char *wmesg) |
333 | { |
334 | struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
335 | int s; |
336 | |
337 | #ifdef DIAGNOSTIC1 |
338 | if (p->p_flag & P_CANTSLEEP0x00000010) |
339 | panic("sleep: %s failed insomnia", p->p_p->ps_comm); |
340 | if (ident == NULL((void *)0)) |
341 | panic("tsleep: no ident"); |
342 | if (p->p_stat != SONPROC7) |
343 | panic("tsleep: not SONPROC"); |
344 | #endif |
345 | |
346 | SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while ( 0); |
347 | |
348 | TRACEPOINT(sched, sleep, NULL)do { extern struct dt_probe (dt_static_sched_sleep); struct dt_probe *dtp = &(dt_static_sched_sleep); if (__builtin_expect((( dt_tracing) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, ((void *)0)); } } while (0); |
349 | |
350 | p->p_wchan = ident; |
351 | p->p_wmesg = wmesg; |
352 | p->p_slptime = 0; |
353 | p->p_slppri = prio & PRIMASK0x0ff; |
354 | atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_WSLEEP0x00000020); |
355 | TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq)do { (p)->p_runq.tqe_next = ((void *)0); (p)->p_runq.tqe_prev = (&slpque[(((long)(ident) >> 8) & (128 - 1))] )->tqh_last; *(&slpque[(((long)(ident) >> 8) & (128 - 1))])->tqh_last = (p); (&slpque[(((long)(ident ) >> 8) & (128 - 1))])->tqh_last = &(p)-> p_runq.tqe_next; } while (0); |
356 | if (prio & PCATCH0x100) |
357 | atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_SINTR0x00000080); |
358 | p->p_stat = SSLEEP3; |
359 | |
360 | SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0); |
361 | } |
362 | |
363 | int |
364 | sleep_finish(int timo, int do_sleep) |
365 | { |
366 | struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
367 | int s, catch, error = 0, error1 = 0; |
368 | |
369 | catch = p->p_flag & P_SINTR0x00000080; |
370 | |
371 | if (timo != 0) { |
372 | KASSERT((p->p_flag & P_TIMEOUT) == 0)(((p->p_flag & 0x00000400) == 0) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/kern/kern_synch.c", 372, "(p->p_flag & P_TIMEOUT) == 0" )); |
373 | timeout_add(&p->p_sleep_to, timo); |
374 | } |
375 | |
376 | if (catch != 0) { |
377 | /* |
378 | * We put ourselves on the sleep queue and start our |
379 | * timeout before calling sleep_signal_check(), as we could |
380 | * stop there, and a wakeup or a SIGCONT (or both) could |
381 | * occur while we were stopped. A SIGCONT would cause |
382 | * us to be marked as SSLEEP without resuming us, thus |
383 | * we must be ready for sleep when sleep_signal_check() is |
384 | * called. |
385 | */ |
386 | if ((error = sleep_signal_check()) != 0) { |
387 | catch = 0; |
388 | do_sleep = 0; |
389 | } |
390 | } |
391 | |
392 | SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while ( 0); |
393 | /* |
394 | * If the wakeup happens while going to sleep, p->p_wchan |
395 | * will be NULL. In that case unwind immediately but still |
396 | * check for possible signals and timeouts. |
397 | */ |
398 | if (p->p_wchan == NULL((void *)0)) |
399 | do_sleep = 0; |
400 | atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_WSLEEP0x00000020); |
401 | |
402 | if (do_sleep) { |
403 | KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP)((p->p_stat == 3 || p->p_stat == 4) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/kern_synch.c", 403, "p->p_stat == SSLEEP || p->p_stat == SSTOP" )); |
404 | p->p_ru.ru_nvcsw++; |
405 | mi_switch(); |
406 | } else { |
407 | KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP ||((p->p_stat == 7 || p->p_stat == 3 || p->p_stat == 4 ) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 408, "p->p_stat == SONPROC || p->p_stat == SSLEEP || p->p_stat == SSTOP" )) |
408 | p->p_stat == SSTOP)((p->p_stat == 7 || p->p_stat == 3 || p->p_stat == 4 ) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 408, "p->p_stat == SONPROC || p->p_stat == SSLEEP || p->p_stat == SSTOP" )); |
409 | unsleep(p); |
410 | p->p_stat = SONPROC7; |
411 | } |
412 | |
413 | #ifdef DIAGNOSTIC1 |
414 | if (p->p_stat != SONPROC7) |
415 | panic("sleep_finish !SONPROC"); |
416 | #endif |
417 | |
418 | p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; |
419 | SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0); |
420 | |
421 | /* |
422 | * Even though this belongs to the signal handling part of sleep, |
423 | * we need to clear it before the ktrace. |
424 | */ |
425 | atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SINTR0x00000080); |
426 | |
427 | if (timo != 0) { |
428 | if (p->p_flag & P_TIMEOUT0x00000400) { |
429 | error1 = EWOULDBLOCK35; |
430 | } else { |
431 | /* This can sleep. It must not use timeouts. */ |
432 | timeout_del_barrier(&p->p_sleep_to); |
433 | } |
434 | atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_TIMEOUT0x00000400); |
435 | } |
436 | |
437 | /* Check if thread was woken up because of a unwind or signal */ |
438 | if (catch != 0) |
439 | error = sleep_signal_check(); |
440 | |
441 | /* Signal errors are higher priority than timeouts. */ |
442 | if (error == 0 && error1 != 0) |
443 | error = error1; |
444 | |
445 | return error; |
446 | } |
447 | |
448 | /* |
449 | * Check and handle signals and suspensions around a sleep cycle. |
450 | */ |
451 | int |
452 | sleep_signal_check(void) |
453 | { |
454 | struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
455 | struct sigctx ctx; |
456 | int err, sig; |
457 | |
458 | if ((err = single_thread_check(p, 1)) != 0) |
459 | return err; |
460 | if ((sig = cursig(p, &ctx)) != 0) { |
Although the value stored to 'sig' is used in the enclosing expression, the value is never actually read from 'sig' | |
461 | if (ctx.sig_intr) |
462 | return EINTR4; |
463 | else |
464 | return ERESTART-1; |
465 | } |
466 | return 0; |
467 | } |
468 | |
469 | int |
470 | wakeup_proc(struct proc *p, const volatile void *chan, int flags) |
471 | { |
472 | int awakened = 0; |
473 | |
474 | SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__ ); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/kern/kern_synch.c", 474, "__mp_lock_held(&sched_lock, curcpu())" )); } while (0); |
475 | |
476 | if (p->p_wchan != NULL((void *)0) && |
477 | ((chan == NULL((void *)0)) || (p->p_wchan == chan))) { |
478 | awakened = 1; |
479 | if (flags) |
480 | atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, flags); |
481 | if (p->p_stat == SSLEEP3) |
482 | setrunnable(p); |
483 | else if (p->p_stat == SSTOP4) |
484 | unsleep(p); |
485 | #ifdef DIAGNOSTIC1 |
486 | else |
487 | panic("wakeup: p_stat is %d", (int)p->p_stat); |
488 | #endif |
489 | } |
490 | |
491 | return awakened; |
492 | } |
493 | |
494 | |
495 | /* |
496 | * Implement timeout for tsleep. |
497 | * If process hasn't been awakened (wchan non-zero), |
498 | * set timeout flag and undo the sleep. If proc |
499 | * is stopped, just unsleep so it will remain stopped. |
500 | */ |
501 | void |
502 | endtsleep(void *arg) |
503 | { |
504 | struct proc *p = arg; |
505 | int s; |
506 | |
507 | SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while ( 0); |
508 | wakeup_proc(p, NULL((void *)0), P_TIMEOUT0x00000400); |
509 | SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0); |
510 | } |
511 | |
512 | /* |
513 | * Remove a process from its wait queue |
514 | */ |
515 | void |
516 | unsleep(struct proc *p) |
517 | { |
518 | SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__ ); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/kern/kern_synch.c", 518, "__mp_lock_held(&sched_lock, curcpu())" )); } while (0); |
519 | |
520 | if (p->p_wchan != NULL((void *)0)) { |
521 | TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq)do { if (((p)->p_runq.tqe_next) != ((void *)0)) (p)->p_runq .tqe_next->p_runq.tqe_prev = (p)->p_runq.tqe_prev; else (&slpque[(((long)(p->p_wchan) >> 8) & (128 - 1))])->tqh_last = (p)->p_runq.tqe_prev; *(p)->p_runq .tqe_prev = (p)->p_runq.tqe_next; ((p)->p_runq.tqe_prev ) = ((void *)-1); ((p)->p_runq.tqe_next) = ((void *)-1); } while (0); |
522 | p->p_wchan = NULL((void *)0); |
523 | TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,do { extern struct dt_probe (dt_static_sched_unsleep); struct dt_probe *dtp = &(dt_static_sched_unsleep); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((dtp-> dtp_recording) != 0), 0)) { struct dt_provider *dtpv = dtp-> dtp_prov; dtpv->dtpv_enter(dtpv, dtp, p->p_tid + 100000 , p->p_p->ps_pid); } } while (0) |
524 | p->p_p->ps_pid)do { extern struct dt_probe (dt_static_sched_unsleep); struct dt_probe *dtp = &(dt_static_sched_unsleep); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((dtp-> dtp_recording) != 0), 0)) { struct dt_provider *dtpv = dtp-> dtp_prov; dtpv->dtpv_enter(dtpv, dtp, p->p_tid + 100000 , p->p_p->ps_pid); } } while (0); |
525 | } |
526 | } |
527 | |
528 | /* |
529 | * Make a number of processes sleeping on the specified identifier runnable. |
530 | */ |
531 | void |
532 | wakeup_n(const volatile void *ident, int n) |
533 | { |
534 | struct slpque *qp; |
535 | struct proc *p; |
536 | struct proc *pnext; |
537 | int s; |
538 | |
539 | SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while ( 0); |
540 | qp = &slpque[LOOKUP(ident)(((long)(ident) >> 8) & (128 - 1))]; |
541 | for (p = TAILQ_FIRST(qp)((qp)->tqh_first); p != NULL((void *)0) && n != 0; p = pnext) { |
542 | pnext = TAILQ_NEXT(p, p_runq)((p)->p_runq.tqe_next); |
543 | #ifdef DIAGNOSTIC1 |
544 | if (p->p_stat != SSLEEP3 && p->p_stat != SSTOP4) |
545 | panic("wakeup: p_stat is %d", (int)p->p_stat); |
546 | #endif |
547 | if (wakeup_proc(p, ident, 0)) |
548 | --n; |
549 | } |
550 | SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0); |
551 | } |
552 | |
553 | /* |
554 | * Make all processes sleeping on the specified identifier runnable. |
555 | */ |
556 | void |
557 | wakeup(const volatile void *chan) |
558 | { |
559 | wakeup_n(chan, -1); |
560 | } |
561 | |
562 | int |
563 | sys_sched_yield(struct proc *p, void *v, register_t *retval) |
564 | { |
565 | struct proc *q; |
566 | uint8_t newprio; |
567 | int s; |
568 | |
569 | SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while ( 0); |
570 | /* |
571 | * If one of the threads of a multi-threaded process called |
572 | * sched_yield(2), drop its priority to ensure its siblings |
573 | * can make some progress. |
574 | */ |
575 | newprio = p->p_usrpri; |
576 | TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)for((q) = ((&p->p_p->ps_threads)->tqh_first); (q ) != ((void *)0); (q) = ((q)->p_thr_link.tqe_next)) |
577 | newprio = max(newprio, q->p_runpri); |
578 | setrunqueue(p->p_cpu, p, newprio); |
579 | p->p_ru.ru_nvcsw++; |
580 | mi_switch(); |
581 | SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0); |
582 | |
583 | return (0); |
584 | } |
585 | |
586 | int |
587 | thrsleep_unlock(void *lock) |
588 | { |
589 | static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED(0); |
590 | _atomic_lock_t *atomiclock = lock; |
591 | |
592 | if (!lock) |
593 | return 0; |
594 | |
595 | return copyout(&unlocked, atomiclock, sizeof(unlocked)); |
596 | } |
597 | |
598 | struct tslpentry { |
599 | TAILQ_ENTRY(tslpentry)struct { struct tslpentry *tqe_next; struct tslpentry **tqe_prev ; } tslp_link; |
600 | long tslp_ident; |
601 | }; |
602 | |
603 | /* thrsleep queue shared between processes */ |
604 | static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue){ ((void *)0), &(thrsleep_queue).tqh_first }; |
605 | static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"){ 0, "thrsleeplk" }; |
606 | |
607 | int |
608 | thrsleep(struct proc *p, struct sys___thrsleep_args *v) |
609 | { |
610 | struct sys___thrsleep_args /* { |
611 | syscallarg(const volatile void *) ident; |
612 | syscallarg(clockid_t) clock_id; |
613 | syscallarg(const struct timespec *) tp; |
614 | syscallarg(void *) lock; |
615 | syscallarg(const int *) abort; |
616 | } */ *uap = v; |
617 | long ident = (long)SCARG(uap, ident)((uap)->ident.le.datum); |
618 | struct tslpentry entry; |
619 | struct tslpqueue *queue; |
620 | struct rwlock *qlock; |
621 | struct timespec *tsp = (struct timespec *)SCARG(uap, tp)((uap)->tp.le.datum); |
622 | void *lock = SCARG(uap, lock)((uap)->lock.le.datum); |
623 | uint64_t nsecs = INFSLP0xffffffffffffffffULL; |
624 | int abort = 0, error; |
625 | clockid_t clock_id = SCARG(uap, clock_id)((uap)->clock_id.le.datum); |
626 | |
627 | if (ident == 0) |
628 | return (EINVAL22); |
629 | if (tsp != NULL((void *)0)) { |
630 | struct timespec now; |
631 | |
632 | if ((error = clock_gettime(p, clock_id, &now))) |
633 | return (error); |
634 | #ifdef KTRACE1 |
635 | if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) && ((p)->p_flag & 0x00000001) == 0)) |
636 | ktrabstimespec(p, tsp)ktrstruct((p), "abstimespec", (tsp), sizeof(struct timespec)); |
637 | #endif |
638 | |
639 | if (timespeccmp(tsp, &now, <=)(((tsp)->tv_sec == (&now)->tv_sec) ? ((tsp)->tv_nsec <= (&now)->tv_nsec) : ((tsp)->tv_sec <= (& now)->tv_sec))) { |
640 | /* already passed: still do the unlock */ |
641 | if ((error = thrsleep_unlock(lock))) |
642 | return (error); |
643 | return (EWOULDBLOCK35); |
644 | } |
645 | |
646 | timespecsub(tsp, &now, tsp)do { (tsp)->tv_sec = (tsp)->tv_sec - (&now)->tv_sec ; (tsp)->tv_nsec = (tsp)->tv_nsec - (&now)->tv_nsec ; if ((tsp)->tv_nsec < 0) { (tsp)->tv_sec--; (tsp)-> tv_nsec += 1000000000L; } } while (0); |
647 | nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP)(((TIMESPEC_TO_NSEC(tsp))<((0xffffffffffffffffULL - 1)))?( TIMESPEC_TO_NSEC(tsp)):((0xffffffffffffffffULL - 1))); |
648 | } |
649 | |
650 | if (ident == -1) { |
651 | queue = &thrsleep_queue; |
652 | qlock = &thrsleep_lock; |
653 | } else { |
654 | queue = &p->p_p->ps_tslpqueue; |
655 | qlock = &p->p_p->ps_lock; |
656 | } |
657 | |
658 | /* Interlock with wakeup. */ |
659 | entry.tslp_ident = ident; |
660 | rw_enter_write(qlock); |
661 | TAILQ_INSERT_TAIL(queue, &entry, tslp_link)do { (&entry)->tslp_link.tqe_next = ((void *)0); (& entry)->tslp_link.tqe_prev = (queue)->tqh_last; *(queue )->tqh_last = (&entry); (queue)->tqh_last = &(& entry)->tslp_link.tqe_next; } while (0); |
662 | rw_exit_write(qlock); |
663 | |
664 | error = thrsleep_unlock(lock); |
665 | |
666 | if (error == 0 && SCARG(uap, abort)((uap)->abort.le.datum) != NULL((void *)0)) |
667 | error = copyin(SCARG(uap, abort)((uap)->abort.le.datum), &abort, sizeof(abort)); |
668 | |
669 | rw_enter_write(qlock); |
670 | if (error != 0) |
671 | goto out; |
672 | if (abort != 0) { |
673 | error = EINTR4; |
674 | goto out; |
675 | } |
676 | if (entry.tslp_ident != 0) { |
677 | error = rwsleep_nsec(&entry, qlock, PWAIT32|PCATCH0x100, "thrsleep", |
678 | nsecs); |
679 | } |
680 | |
681 | out: |
682 | if (entry.tslp_ident != 0) |
683 | TAILQ_REMOVE(queue, &entry, tslp_link)do { if (((&entry)->tslp_link.tqe_next) != ((void *)0) ) (&entry)->tslp_link.tqe_next->tslp_link.tqe_prev = (&entry)->tslp_link.tqe_prev; else (queue)->tqh_last = (&entry)->tslp_link.tqe_prev; *(&entry)->tslp_link .tqe_prev = (&entry)->tslp_link.tqe_next; ((&entry )->tslp_link.tqe_prev) = ((void *)-1); ((&entry)->tslp_link .tqe_next) = ((void *)-1); } while (0); |
684 | rw_exit_write(qlock); |
685 | |
686 | if (error == ERESTART-1) |
687 | error = ECANCELED88; |
688 | |
689 | return (error); |
690 | |
691 | } |
692 | |
693 | int |
694 | sys___thrsleep(struct proc *p, void *v, register_t *retval) |
695 | { |
696 | struct sys___thrsleep_args /* { |
697 | syscallarg(const volatile void *) ident; |
698 | syscallarg(clockid_t) clock_id; |
699 | syscallarg(struct timespec *) tp; |
700 | syscallarg(void *) lock; |
701 | syscallarg(const int *) abort; |
702 | } */ *uap = v; |
703 | struct timespec ts; |
704 | int error; |
705 | |
706 | if (SCARG(uap, tp)((uap)->tp.le.datum) != NULL((void *)0)) { |
707 | if ((error = copyin(SCARG(uap, tp)((uap)->tp.le.datum), &ts, sizeof(ts)))) { |
708 | *retval = error; |
709 | return 0; |
710 | } |
711 | if (!timespecisvalid(&ts)((&ts)->tv_nsec >= 0 && (&ts)->tv_nsec < 1000000000L)) { |
712 | *retval = EINVAL22; |
713 | return 0; |
714 | } |
715 | SCARG(uap, tp)((uap)->tp.le.datum) = &ts; |
716 | } |
717 | |
718 | *retval = thrsleep(p, uap); |
719 | return 0; |
720 | } |
721 | |
722 | int |
723 | sys___thrwakeup(struct proc *p, void *v, register_t *retval) |
724 | { |
725 | struct sys___thrwakeup_args /* { |
726 | syscallarg(const volatile void *) ident; |
727 | syscallarg(int) n; |
728 | } */ *uap = v; |
729 | struct tslpentry *entry, *tmp; |
730 | struct tslpqueue *queue; |
731 | struct rwlock *qlock; |
732 | long ident = (long)SCARG(uap, ident)((uap)->ident.le.datum); |
733 | int n = SCARG(uap, n)((uap)->n.le.datum); |
734 | int found = 0; |
735 | |
736 | if (ident == 0) |
737 | *retval = EINVAL22; |
738 | else { |
739 | if (ident == -1) { |
740 | queue = &thrsleep_queue; |
741 | qlock = &thrsleep_lock; |
742 | /* |
743 | * Wake up all waiters with ident -1. This is needed |
744 | * because ident -1 can be shared by multiple userspace |
745 | * lock state machines concurrently. The implementation |
746 | * has no way to direct the wakeup to a particular |
747 | * state machine. |
748 | */ |
749 | n = 0; |
750 | } else { |
751 | queue = &p->p_p->ps_tslpqueue; |
752 | qlock = &p->p_p->ps_lock; |
753 | } |
754 | |
755 | rw_enter_write(qlock); |
756 | TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp)for ((entry) = ((queue)->tqh_first); (entry) != ((void *)0 ) && ((tmp) = ((entry)->tslp_link.tqe_next), 1); ( entry) = (tmp)) { |
757 | if (entry->tslp_ident == ident) { |
758 | TAILQ_REMOVE(queue, entry, tslp_link)do { if (((entry)->tslp_link.tqe_next) != ((void *)0)) (entry )->tslp_link.tqe_next->tslp_link.tqe_prev = (entry)-> tslp_link.tqe_prev; else (queue)->tqh_last = (entry)->tslp_link .tqe_prev; *(entry)->tslp_link.tqe_prev = (entry)->tslp_link .tqe_next; ((entry)->tslp_link.tqe_prev) = ((void *)-1); ( (entry)->tslp_link.tqe_next) = ((void *)-1); } while (0); |
759 | entry->tslp_ident = 0; |
760 | wakeup_one(entry)wakeup_n((entry), 1); |
761 | if (++found == n) |
762 | break; |
763 | } |
764 | } |
765 | rw_exit_write(qlock); |
766 | |
767 | if (ident == -1) |
768 | *retval = 0; |
769 | else |
770 | *retval = found ? 0 : ESRCH3; |
771 | } |
772 | |
773 | return (0); |
774 | } |
775 | |
776 | void |
777 | refcnt_init(struct refcnt *r) |
778 | { |
779 | refcnt_init_trace(r, 0); |
780 | } |
781 | |
782 | void |
783 | refcnt_init_trace(struct refcnt *r, int idx) |
784 | { |
785 | r->r_traceidx = idx; |
786 | atomic_store_int(&r->r_refs, 1); |
787 | TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, 0, +1); } } } while (0); |
788 | } |
789 | |
790 | void |
791 | refcnt_take(struct refcnt *r) |
792 | { |
793 | u_int refs; |
794 | |
795 | refs = atomic_inc_int_nv(&r->r_refs)_atomic_add_int_nv((&r->r_refs), 1); |
796 | KASSERT(refs != 0)((refs != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 796, "refs != 0")); |
797 | TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs - 1, +1); } } } while (0); |
798 | (void)refs; |
799 | } |
800 | |
801 | int |
802 | refcnt_rele(struct refcnt *r) |
803 | { |
804 | u_int refs; |
805 | |
806 | membar_exit_before_atomic()do { __asm volatile("" ::: "memory"); } while (0); |
807 | refs = atomic_dec_int_nv(&r->r_refs)_atomic_sub_int_nv((&r->r_refs), 1); |
808 | KASSERT(refs != ~0)((refs != ~0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 808, "refs != ~0")); |
809 | TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs + 1, -1); } } } while (0); |
810 | if (refs == 0) { |
811 | membar_enter_after_atomic()do { __asm volatile("" ::: "memory"); } while (0); |
812 | return (1); |
813 | } |
814 | return (0); |
815 | } |
816 | |
817 | void |
818 | refcnt_rele_wake(struct refcnt *r) |
819 | { |
820 | if (refcnt_rele(r)) |
821 | wakeup_one(r)wakeup_n((r), 1); |
822 | } |
823 | |
824 | void |
825 | refcnt_finalize(struct refcnt *r, const char *wmesg) |
826 | { |
827 | u_int refs; |
828 | |
829 | membar_exit_before_atomic()do { __asm volatile("" ::: "memory"); } while (0); |
830 | refs = atomic_dec_int_nv(&r->r_refs)_atomic_sub_int_nv((&r->r_refs), 1); |
831 | KASSERT(refs != ~0)((refs != ~0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_synch.c" , 831, "refs != ~0")); |
832 | TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs + 1, -1); } } } while (0); |
833 | while (refs) { |
834 | sleep_setup(r, PWAIT32, wmesg); |
835 | refs = atomic_load_int(&r->r_refs); |
836 | sleep_finish(0, refs); |
837 | } |
838 | TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs, 0); } } } while (0); |
839 | /* Order subsequent loads and stores after refs == 0 load. */ |
840 | membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0); |
841 | } |
842 | |
843 | int |
844 | refcnt_shared(struct refcnt *r) |
845 | { |
846 | u_int refs; |
847 | |
848 | refs = atomic_load_int(&r->r_refs); |
849 | TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs, 0); } } } while (0); |
850 | return (refs > 1); |
851 | } |
852 | |
853 | unsigned int |
854 | refcnt_read(struct refcnt *r) |
855 | { |
856 | u_int refs; |
857 | |
858 | refs = atomic_load_int(&r->r_refs); |
859 | TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0)do { extern struct dt_probe **(dtps_index_refcnt); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((r-> r_traceidx > 0) != 0), 0) && __builtin_expect((((dtps_index_refcnt ) != ((void *)0)) != 0), 1)) { struct dt_probe *dtp = (dtps_index_refcnt )[r->r_traceidx]; if(__builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, r, refs, 0); } } } while (0); |
860 | return (refs); |
861 | } |
862 | |
863 | void |
864 | cond_init(struct cond *c) |
865 | { |
866 | atomic_store_int(&c->c_wait, 1); |
867 | } |
868 | |
869 | void |
870 | cond_signal(struct cond *c) |
871 | { |
872 | atomic_store_int(&c->c_wait, 0); |
873 | |
874 | wakeup_one(c)wakeup_n((c), 1); |
875 | } |
876 | |
877 | void |
878 | cond_wait(struct cond *c, const char *wmesg) |
879 | { |
880 | unsigned int wait; |
881 | |
882 | wait = atomic_load_int(&c->c_wait); |
883 | while (wait) { |
884 | sleep_setup(c, PWAIT32, wmesg); |
885 | wait = atomic_load_int(&c->c_wait); |
886 | sleep_finish(0, wait); |
887 | } |
888 | } |