Bug Summary

File:kern/kern_sig.c
Warning:line 1594, column 37
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name kern_sig.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/kern/kern_sig.c
1/* $OpenBSD: kern_sig.c,v 1.292 2022/01/02 21:01:20 tb Exp $ */
2/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
3
4/*
5 * Copyright (c) 1997 Theo de Raadt. All rights reserved.
6 * Copyright (c) 1982, 1986, 1989, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 */
40
41#include <sys/param.h>
42#include <sys/signalvar.h>
43#include <sys/resourcevar.h>
44#include <sys/queue.h>
45#include <sys/namei.h>
46#include <sys/vnode.h>
47#include <sys/event.h>
48#include <sys/proc.h>
49#include <sys/systm.h>
50#include <sys/acct.h>
51#include <sys/fcntl.h>
52#include <sys/filedesc.h>
53#include <sys/kernel.h>
54#include <sys/wait.h>
55#include <sys/ktrace.h>
56#include <sys/stat.h>
57#include <sys/core.h>
58#include <sys/malloc.h>
59#include <sys/pool.h>
60#include <sys/ptrace.h>
61#include <sys/sched.h>
62#include <sys/user.h>
63#include <sys/syslog.h>
64#include <sys/ttycom.h>
65#include <sys/pledge.h>
66#include <sys/witness.h>
67#include <sys/exec_elf.h>
68
69#include <sys/mount.h>
70#include <sys/syscallargs.h>
71
72#include <uvm/uvm_extern.h>
73#include <machine/tcb.h>
74
75int filt_sigattach(struct knote *kn);
76void filt_sigdetach(struct knote *kn);
77int filt_signal(struct knote *kn, long hint);
78
79const struct filterops sig_filtops = {
80 .f_flags = 0,
81 .f_attach = filt_sigattach,
82 .f_detach = filt_sigdetach,
83 .f_event = filt_signal,
84};
85
86/*
87 * The array below categorizes the signals and their default actions.
88 */
89const int sigprop[NSIG33] = {
90 0, /* unused */
91 SA_KILL0x01, /* SIGHUP */
92 SA_KILL0x01, /* SIGINT */
93 SA_KILL0x01|SA_CORE0x02, /* SIGQUIT */
94 SA_KILL0x01|SA_CORE0x02, /* SIGILL */
95 SA_KILL0x01|SA_CORE0x02, /* SIGTRAP */
96 SA_KILL0x01|SA_CORE0x02, /* SIGABRT */
97 SA_KILL0x01|SA_CORE0x02, /* SIGEMT */
98 SA_KILL0x01|SA_CORE0x02, /* SIGFPE */
99 SA_KILL0x01, /* SIGKILL */
100 SA_KILL0x01|SA_CORE0x02, /* SIGBUS */
101 SA_KILL0x01|SA_CORE0x02, /* SIGSEGV */
102 SA_KILL0x01|SA_CORE0x02, /* SIGSYS */
103 SA_KILL0x01, /* SIGPIPE */
104 SA_KILL0x01, /* SIGALRM */
105 SA_KILL0x01, /* SIGTERM */
106 SA_IGNORE0x10, /* SIGURG */
107 SA_STOP0x04, /* SIGSTOP */
108 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTSTP */
109 SA_IGNORE0x10|SA_CONT0x20, /* SIGCONT */
110 SA_IGNORE0x10, /* SIGCHLD */
111 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTTIN */
112 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTTOU */
113 SA_IGNORE0x10, /* SIGIO */
114 SA_KILL0x01, /* SIGXCPU */
115 SA_KILL0x01, /* SIGXFSZ */
116 SA_KILL0x01, /* SIGVTALRM */
117 SA_KILL0x01, /* SIGPROF */
118 SA_IGNORE0x10, /* SIGWINCH */
119 SA_IGNORE0x10, /* SIGINFO */
120 SA_KILL0x01, /* SIGUSR1 */
121 SA_KILL0x01, /* SIGUSR2 */
122 SA_IGNORE0x10, /* SIGTHR */
123};
124
125#define CONTSIGMASK((1U << ((19)-1))) (sigmask(SIGCONT)(1U << ((19)-1)))
126#define STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
(sigmask(SIGSTOP)(1U << ((17)-1)) | sigmask(SIGTSTP)(1U << ((18)-1)) | \
127 sigmask(SIGTTIN)(1U << ((21)-1)) | sigmask(SIGTTOU)(1U << ((22)-1)))
128
129void setsigvec(struct proc *, int, struct sigaction *);
130
131void proc_stop(struct proc *p, int);
132void proc_stop_sweep(void *);
133void *proc_stop_si;
134
135void postsig_done(struct proc *, int, sigset_t, int);
136void postsig(struct proc *, int);
137int cansignal(struct proc *, struct process *, int);
138
139struct pool sigacts_pool; /* memory pool for sigacts structures */
140
141void sigio_del(struct sigiolst *);
142void sigio_unlink(struct sigio_ref *, struct sigiolst *);
143struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH){ ((void *)0), ((((0xd)) > 0x0 && ((0xd)) < 0x9
) ? 0x9 : ((0xd))), 0x0 }
;
144
145/*
146 * Can thread p, send the signal signum to process qr?
147 */
148int
149cansignal(struct proc *p, struct process *qr, int signum)
150{
151 struct process *pr = p->p_p;
152 struct ucred *uc = p->p_ucred;
153 struct ucred *quc = qr->ps_ucred;
154
155 if (uc->cr_uid == 0)
156 return (1); /* root can always signal */
157
158 if (pr == qr)
159 return (1); /* process can always signal itself */
160
161 /* optimization: if the same creds then the tests below will pass */
162 if (uc == quc)
163 return (1);
164
165 if (signum == SIGCONT19 && qr->ps_sessionps_pgrp->pg_session == pr->ps_sessionps_pgrp->pg_session)
166 return (1); /* SIGCONT in session */
167
168 /*
169 * Using kill(), only certain signals can be sent to setugid
170 * child processes
171 */
172 if (qr->ps_flags & PS_SUGID0x00000010) {
173 switch (signum) {
174 case 0:
175 case SIGKILL9:
176 case SIGINT2:
177 case SIGTERM15:
178 case SIGALRM14:
179 case SIGSTOP17:
180 case SIGTTIN21:
181 case SIGTTOU22:
182 case SIGTSTP18:
183 case SIGHUP1:
184 case SIGUSR130:
185 case SIGUSR231:
186 if (uc->cr_ruid == quc->cr_ruid ||
187 uc->cr_uid == quc->cr_ruid)
188 return (1);
189 }
190 return (0);
191 }
192
193 if (uc->cr_ruid == quc->cr_ruid ||
194 uc->cr_ruid == quc->cr_svuid ||
195 uc->cr_uid == quc->cr_ruid ||
196 uc->cr_uid == quc->cr_svuid)
197 return (1);
198 return (0);
199}
200
201/*
202 * Initialize signal-related data structures.
203 */
204void
205signal_init(void)
206{
207 proc_stop_si = softintr_establish(IPL_SOFTCLOCK0x4, proc_stop_sweep,
208 NULL((void *)0));
209 if (proc_stop_si == NULL((void *)0))
210 panic("signal_init failed to register softintr");
211
212 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE0x0,
213 PR_WAITOK0x0001, "sigapl", NULL((void *)0));
214}
215
216/*
217 * Initialize a new sigaltstack structure.
218 */
219void
220sigstkinit(struct sigaltstack *ss)
221{
222 ss->ss_flags = SS_DISABLE0x0004;
223 ss->ss_size = 0;
224 ss->ss_sp = NULL((void *)0);
225}
226
227/*
228 * Create an initial sigacts structure, using the same signal state
229 * as pr.
230 */
231struct sigacts *
232sigactsinit(struct process *pr)
233{
234 struct sigacts *ps;
235
236 ps = pool_get(&sigacts_pool, PR_WAITOK0x0001);
237 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts))__builtin_memcpy((ps), (pr->ps_sigacts), (sizeof(struct sigacts
)))
;
238 return (ps);
239}
240
241/*
242 * Release a sigacts structure.
243 */
244void
245sigactsfree(struct process *pr)
246{
247 struct sigacts *ps = pr->ps_sigacts;
248
249 pr->ps_sigacts = NULL((void *)0);
250
251 pool_put(&sigacts_pool, ps);
252}
253
254int
255sys_sigaction(struct proc *p, void *v, register_t *retval)
256{
257 struct sys_sigaction_args /* {
258 syscallarg(int) signum;
259 syscallarg(const struct sigaction *) nsa;
260 syscallarg(struct sigaction *) osa;
261 } */ *uap = v;
262 struct sigaction vec;
263#ifdef KTRACE1
264 struct sigaction ovec;
265#endif
266 struct sigaction *sa;
267 const struct sigaction *nsa;
268 struct sigaction *osa;
269 struct sigacts *ps = p->p_p->ps_sigacts;
270 int signum;
271 int bit, error;
272
273 signum = SCARG(uap, signum)((uap)->signum.le.datum);
274 nsa = SCARG(uap, nsa)((uap)->nsa.le.datum);
275 osa = SCARG(uap, osa)((uap)->osa.le.datum);
276
277 if (signum <= 0 || signum >= NSIG33 ||
278 (nsa && (signum == SIGKILL9 || signum == SIGSTOP17)))
279 return (EINVAL22);
280 sa = &vec;
281 if (osa) {
282 sa->sa_handler__sigaction_u.__sa_handler = ps->ps_sigact[signum];
283 sa->sa_mask = ps->ps_catchmask[signum];
284 bit = sigmask(signum)(1U << ((signum)-1));
285 sa->sa_flags = 0;
286 if ((ps->ps_sigonstack & bit) != 0)
287 sa->sa_flags |= SA_ONSTACK0x0001;
288 if ((ps->ps_sigintr & bit) == 0)
289 sa->sa_flags |= SA_RESTART0x0002;
290 if ((ps->ps_sigreset & bit) != 0)
291 sa->sa_flags |= SA_RESETHAND0x0004;
292 if ((ps->ps_siginfo & bit) != 0)
293 sa->sa_flags |= SA_SIGINFO0x0040;
294 if (signum == SIGCHLD20) {
295 if ((ps->ps_sigflags & SAS_NOCLDSTOP0x01) != 0)
296 sa->sa_flags |= SA_NOCLDSTOP0x0008;
297 if ((ps->ps_sigflags & SAS_NOCLDWAIT0x02) != 0)
298 sa->sa_flags |= SA_NOCLDWAIT0x0020;
299 }
300 if ((sa->sa_mask & bit) == 0)
301 sa->sa_flags |= SA_NODEFER0x0010;
302 sa->sa_mask &= ~bit;
303 error = copyout(sa, osa, sizeof (vec));
304 if (error)
305 return (error);
306#ifdef KTRACE1
307 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
308 ovec = vec;
309#endif
310 }
311 if (nsa) {
312 error = copyin(nsa, sa, sizeof (vec));
313 if (error)
314 return (error);
315#ifdef KTRACE1
316 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
317 ktrsigaction(p, sa)ktrstruct((p), "sigaction", (sa), sizeof(struct sigaction));
318#endif
319 setsigvec(p, signum, sa);
320 }
321#ifdef KTRACE1
322 if (osa && KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
323 ktrsigaction(p, &ovec)ktrstruct((p), "sigaction", (&ovec), sizeof(struct sigaction
))
;
324#endif
325 return (0);
326}
327
328void
329setsigvec(struct proc *p, int signum, struct sigaction *sa)
330{
331 struct sigacts *ps = p->p_p->ps_sigacts;
332 int bit;
333 int s;
334
335 bit = sigmask(signum)(1U << ((signum)-1));
336 /*
337 * Change setting atomically.
338 */
339 s = splhigh()splraise(0xd);
340 ps->ps_sigact[signum] = sa->sa_handler__sigaction_u.__sa_handler;
341 if ((sa->sa_flags & SA_NODEFER0x0010) == 0)
342 sa->sa_mask |= sigmask(signum)(1U << ((signum)-1));
343 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
344 if (signum == SIGCHLD20) {
345 if (sa->sa_flags & SA_NOCLDSTOP0x0008)
346 atomic_setbits_intx86_atomic_setbits_u32(&ps->ps_sigflags, SAS_NOCLDSTOP0x01);
347 else
348 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDSTOP0x01);
349 /*
350 * If the SA_NOCLDWAIT flag is set or the handler
351 * is SIG_IGN we reparent the dying child to PID 1
352 * (init) which will reap the zombie. Because we use
353 * init to do our dirty work we never set SAS_NOCLDWAIT
354 * for PID 1.
355 * XXX exit1 rework means this is unnecessary?
356 */
357 if (initprocess->ps_sigacts != ps &&
358 ((sa->sa_flags & SA_NOCLDWAIT0x0020) ||
359 sa->sa_handler__sigaction_u.__sa_handler == SIG_IGN(void (*)(int))1))
360 atomic_setbits_intx86_atomic_setbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
361 else
362 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
363 }
364 if ((sa->sa_flags & SA_RESETHAND0x0004) != 0)
365 ps->ps_sigreset |= bit;
366 else
367 ps->ps_sigreset &= ~bit;
368 if ((sa->sa_flags & SA_SIGINFO0x0040) != 0)
369 ps->ps_siginfo |= bit;
370 else
371 ps->ps_siginfo &= ~bit;
372 if ((sa->sa_flags & SA_RESTART0x0002) == 0)
373 ps->ps_sigintr |= bit;
374 else
375 ps->ps_sigintr &= ~bit;
376 if ((sa->sa_flags & SA_ONSTACK0x0001) != 0)
377 ps->ps_sigonstack |= bit;
378 else
379 ps->ps_sigonstack &= ~bit;
380 /*
381 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
382 * and for signals set to SIG_DFL where the default is to ignore.
383 * However, don't put SIGCONT in ps_sigignore,
384 * as we have to restart the process.
385 */
386 if (sa->sa_handler__sigaction_u.__sa_handler == SIG_IGN(void (*)(int))1 ||
387 (sigprop[signum] & SA_IGNORE0x10 && sa->sa_handler__sigaction_u.__sa_handler == SIG_DFL(void (*)(int))0)) {
388 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, bit);
389 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_p->ps_siglist, bit);
390 if (signum != SIGCONT19)
391 ps->ps_sigignore |= bit; /* easier in psignal */
392 ps->ps_sigcatch &= ~bit;
393 } else {
394 ps->ps_sigignore &= ~bit;
395 if (sa->sa_handler__sigaction_u.__sa_handler == SIG_DFL(void (*)(int))0)
396 ps->ps_sigcatch &= ~bit;
397 else
398 ps->ps_sigcatch |= bit;
399 }
400 splx(s)spllower(s);
401}
402
403/*
404 * Initialize signal state for process 0;
405 * set to ignore signals that are ignored by default.
406 */
407void
408siginit(struct sigacts *ps)
409{
410 int i;
411
412 for (i = 0; i < NSIG33; i++)
413 if (sigprop[i] & SA_IGNORE0x10 && i != SIGCONT19)
414 ps->ps_sigignore |= sigmask(i)(1U << ((i)-1));
415 ps->ps_sigflags = SAS_NOCLDWAIT0x02 | SAS_NOCLDSTOP0x01;
416}
417
418/*
419 * Reset signals for an exec by the specified thread.
420 */
421void
422execsigs(struct proc *p)
423{
424 struct sigacts *ps;
425 int nc, mask;
426
427 ps = p->p_p->ps_sigacts;
428
429 /*
430 * Reset caught signals. Held signals remain held
431 * through p_sigmask (unless they were caught,
432 * and are now ignored by default).
433 */
434 while (ps->ps_sigcatch) {
435 nc = ffs((long)ps->ps_sigcatch);
436 mask = sigmask(nc)(1U << ((nc)-1));
437 ps->ps_sigcatch &= ~mask;
438 if (sigprop[nc] & SA_IGNORE0x10) {
439 if (nc != SIGCONT19)
440 ps->ps_sigignore |= mask;
441 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
442 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_p->ps_siglist, mask);
443 }
444 ps->ps_sigact[nc] = SIG_DFL(void (*)(int))0;
445 }
446 /*
447 * Reset stack state to the user stack.
448 * Clear set of signals caught on the signal stack.
449 */
450 sigstkinit(&p->p_sigstk);
451 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
452 if (ps->ps_sigact[SIGCHLD20] == SIG_IGN(void (*)(int))1)
453 ps->ps_sigact[SIGCHLD20] = SIG_DFL(void (*)(int))0;
454}
455
456/*
457 * Manipulate signal mask.
458 * Note that we receive new mask, not pointer,
459 * and return old mask as return value;
460 * the library stub does the rest.
461 */
462int
463sys_sigprocmask(struct proc *p, void *v, register_t *retval)
464{
465 struct sys_sigprocmask_args /* {
466 syscallarg(int) how;
467 syscallarg(sigset_t) mask;
468 } */ *uap = v;
469 int error = 0;
470 sigset_t mask;
471
472 KASSERT(p == curproc)((p == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 472, "p == curproc"))
;
473
474 *retval = p->p_sigmask;
475 mask = SCARG(uap, mask)((uap)->mask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
476
477 switch (SCARG(uap, how)((uap)->how.le.datum)) {
478 case SIG_BLOCK1:
479 atomic_setbits_intx86_atomic_setbits_u32(&p->p_sigmask, mask);
480 break;
481 case SIG_UNBLOCK2:
482 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_sigmask, mask);
483 break;
484 case SIG_SETMASK3:
485 p->p_sigmask = mask;
486 break;
487 default:
488 error = EINVAL22;
489 break;
490 }
491 return (error);
492}
493
494int
495sys_sigpending(struct proc *p, void *v, register_t *retval)
496{
497
498 *retval = p->p_siglist | p->p_p->ps_siglist;
499 return (0);
500}
501
502/*
503 * Temporarily replace calling proc's signal mask for the duration of a
504 * system call. Original signal mask will be restored by userret().
505 */
506void
507dosigsuspend(struct proc *p, sigset_t newmask)
508{
509 KASSERT(p == curproc)((p == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 509, "p == curproc"))
;
510
511 p->p_oldmask = p->p_sigmask;
512 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
513 p->p_sigmask = newmask;
514}
515
516/*
517 * Suspend thread until signal, providing mask to be set
518 * in the meantime. Note nonstandard calling convention:
519 * libc stub passes mask, not pointer, to save a copyin.
520 */
521int
522sys_sigsuspend(struct proc *p, void *v, register_t *retval)
523{
524 struct sys_sigsuspend_args /* {
525 syscallarg(int) mask;
526 } */ *uap = v;
527
528 dosigsuspend(p, SCARG(uap, mask)((uap)->mask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1))));
529 while (tsleep_nsec(&nowake, PPAUSE40|PCATCH0x100, "sigsusp", INFSLP0xffffffffffffffffULL) == 0)
530 continue;
531 /* always return EINTR rather than ERESTART... */
532 return (EINTR4);
533}
534
535int
536sigonstack(size_t stack)
537{
538 const struct sigaltstack *ss = &curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_sigstk;
539
540 return (ss->ss_flags & SS_DISABLE0x0004 ? 0 :
541 (stack - (size_t)ss->ss_sp < ss->ss_size));
542}
543
544int
545sys_sigaltstack(struct proc *p, void *v, register_t *retval)
546{
547 struct sys_sigaltstack_args /* {
548 syscallarg(const struct sigaltstack *) nss;
549 syscallarg(struct sigaltstack *) oss;
550 } */ *uap = v;
551 struct sigaltstack ss;
552 const struct sigaltstack *nss;
553 struct sigaltstack *oss;
554 int onstack = sigonstack(PROC_STACK(p)((p)->p_md.md_regs->tf_rsp));
555 int error;
556
557 nss = SCARG(uap, nss)((uap)->nss.le.datum);
558 oss = SCARG(uap, oss)((uap)->oss.le.datum);
559
560 if (oss != NULL((void *)0)) {
561 ss = p->p_sigstk;
562 if (onstack)
563 ss.ss_flags |= SS_ONSTACK0x0001;
564 if ((error = copyout(&ss, oss, sizeof(ss))))
565 return (error);
566 }
567 if (nss == NULL((void *)0))
568 return (0);
569 error = copyin(nss, &ss, sizeof(ss));
570 if (error)
571 return (error);
572 if (onstack)
573 return (EPERM1);
574 if (ss.ss_flags & ~SS_DISABLE0x0004)
575 return (EINVAL22);
576 if (ss.ss_flags & SS_DISABLE0x0004) {
577 p->p_sigstk.ss_flags = ss.ss_flags;
578 return (0);
579 }
580 if (ss.ss_size < MINSIGSTKSZ(3U << 12))
581 return (ENOMEM12);
582
583 error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size);
584 if (error)
585 return (error);
586
587 p->p_sigstk = ss;
588 return (0);
589}
590
591int
592sys_kill(struct proc *cp, void *v, register_t *retval)
593{
594 struct sys_kill_args /* {
595 syscallarg(int) pid;
596 syscallarg(int) signum;
597 } */ *uap = v;
598 struct process *pr;
599 int pid = SCARG(uap, pid)((uap)->pid.le.datum);
600 int signum = SCARG(uap, signum)((uap)->signum.le.datum);
601 int error;
602 int zombie = 0;
603
604 if ((error = pledge_kill(cp, pid)) != 0)
605 return (error);
606 if (((u_int)signum) >= NSIG33)
607 return (EINVAL22);
608 if (pid > 0) {
609 if ((pr = prfind(pid)) == NULL((void *)0)) {
610 if ((pr = zombiefind(pid)) == NULL((void *)0))
611 return (ESRCH3);
612 else
613 zombie = 1;
614 }
615 if (!cansignal(cp, pr, signum))
616 return (EPERM1);
617
618 /* kill single process */
619 if (signum && !zombie)
620 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
621 return (0);
622 }
623 switch (pid) {
624 case -1: /* broadcast signal */
625 return (killpg1(cp, signum, 0, 1));
626 case 0: /* signal own process group */
627 return (killpg1(cp, signum, 0, 0));
628 default: /* negative explicit process group */
629 return (killpg1(cp, signum, -pid, 0));
630 }
631}
632
633int
634sys_thrkill(struct proc *cp, void *v, register_t *retval)
635{
636 struct sys_thrkill_args /* {
637 syscallarg(pid_t) tid;
638 syscallarg(int) signum;
639 syscallarg(void *) tcb;
640 } */ *uap = v;
641 struct proc *p;
642 int tid = SCARG(uap, tid)((uap)->tid.le.datum);
643 int signum = SCARG(uap, signum)((uap)->signum.le.datum);
644 void *tcb;
645
646 if (((u_int)signum) >= NSIG33)
647 return (EINVAL22);
648 if (tid > THREAD_PID_OFFSET100000) {
649 if ((p = tfind(tid - THREAD_PID_OFFSET100000)) == NULL((void *)0))
650 return (ESRCH3);
651
652 /* can only kill threads in the same process */
653 if (p->p_p != cp->p_p)
654 return (ESRCH3);
655 } else if (tid == 0)
656 p = cp;
657 else
658 return (EINVAL22);
659
660 /* optionally require the target thread to have the given tcb addr */
661 tcb = SCARG(uap, tcb)((uap)->tcb.le.datum);
662 if (tcb != NULL((void *)0) && tcb != TCB_GET(p)tcb_get(p))
663 return (ESRCH3);
664
665 if (signum)
666 ptsignal(p, signum, STHREAD);
667 return (0);
668}
669
670/*
671 * Common code for kill process group/broadcast kill.
672 * cp is calling process.
673 */
674int
675killpg1(struct proc *cp, int signum, int pgid, int all)
676{
677 struct process *pr;
678 struct pgrp *pgrp;
679 int nfound = 0;
680
681 if (all) {
682 /*
683 * broadcast
684 */
685 LIST_FOREACH(pr, &allprocess, ps_list)for((pr) = ((&allprocess)->lh_first); (pr)!= ((void *)
0); (pr) = ((pr)->ps_list.le_next))
{
686 if (pr->ps_pid <= 1 ||
687 pr->ps_flags & (PS_SYSTEM0x00010000 | PS_NOBROADCASTKILL0x00080000) ||
688 pr == cp->p_p || !cansignal(cp, pr, signum))
689 continue;
690 nfound++;
691 if (signum)
692 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
693 }
694 } else {
695 if (pgid == 0)
696 /*
697 * zero pgid means send to my process group.
698 */
699 pgrp = cp->p_p->ps_pgrp;
700 else {
701 pgrp = pgfind(pgid);
702 if (pgrp == NULL((void *)0))
703 return (ESRCH3);
704 }
705 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)for((pr) = ((&pgrp->pg_members)->lh_first); (pr)!= (
(void *)0); (pr) = ((pr)->ps_pglist.le_next))
{
706 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM0x00010000 ||
707 !cansignal(cp, pr, signum))
708 continue;
709 nfound++;
710 if (signum)
711 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
712 }
713 }
714 return (nfound ? 0 : ESRCH3);
715}
716
717#define CANDELIVER(uid, euid, pr)(euid == 0 || (uid) == (pr)->ps_ucred->cr_ruid || (uid)
== (pr)->ps_ucred->cr_svuid || (uid) == (pr)->ps_ucred
->cr_uid || (euid) == (pr)->ps_ucred->cr_ruid || (euid
) == (pr)->ps_ucred->cr_svuid || (euid) == (pr)->ps_ucred
->cr_uid)
\
718 (euid == 0 || \
719 (uid) == (pr)->ps_ucred->cr_ruid || \
720 (uid) == (pr)->ps_ucred->cr_svuid || \
721 (uid) == (pr)->ps_ucred->cr_uid || \
722 (euid) == (pr)->ps_ucred->cr_ruid || \
723 (euid) == (pr)->ps_ucred->cr_svuid || \
724 (euid) == (pr)->ps_ucred->cr_uid)
725
726#define CANSIGIO(cr, pr)((cr)->cr_uid == 0 || ((cr)->cr_ruid) == ((pr))->ps_ucred
->cr_ruid || ((cr)->cr_ruid) == ((pr))->ps_ucred->
cr_svuid || ((cr)->cr_ruid) == ((pr))->ps_ucred->cr_uid
|| ((cr)->cr_uid) == ((pr))->ps_ucred->cr_ruid || (
(cr)->cr_uid) == ((pr))->ps_ucred->cr_svuid || ((cr)
->cr_uid) == ((pr))->ps_ucred->cr_uid)
\
727 CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr))((cr)->cr_uid == 0 || ((cr)->cr_ruid) == ((pr))->ps_ucred
->cr_ruid || ((cr)->cr_ruid) == ((pr))->ps_ucred->
cr_svuid || ((cr)->cr_ruid) == ((pr))->ps_ucred->cr_uid
|| ((cr)->cr_uid) == ((pr))->ps_ucred->cr_ruid || (
(cr)->cr_uid) == ((pr))->ps_ucred->cr_svuid || ((cr)
->cr_uid) == ((pr))->ps_ucred->cr_uid)
728
729/*
730 * Send a signal to a process group. If checktty is 1,
731 * limit to members which have a controlling terminal.
732 */
733void
734pgsignal(struct pgrp *pgrp, int signum, int checkctty)
735{
736 struct process *pr;
737
738 if (pgrp)
739 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)for((pr) = ((&pgrp->pg_members)->lh_first); (pr)!= (
(void *)0); (pr) = ((pr)->ps_pglist.le_next))
740 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT0x00000001)
741 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
742}
743
744/*
745 * Send a SIGIO or SIGURG signal to a process or process group using stored
746 * credentials rather than those of the current process.
747 */
748void
749pgsigio(struct sigio_ref *sir, int sig, int checkctty)
750{
751 struct process *pr;
752 struct sigio *sigio;
753
754 if (sir->sir_sigio == NULL((void *)0))
755 return;
756
757 KERNEL_LOCK()_kernel_lock();
758 mtx_enter(&sigio_lock);
759 sigio = sir->sir_sigio;
760 if (sigio == NULL((void *)0))
761 goto out;
762 if (sigio->sio_pgid > 0) {
763 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc)((sigio->sio_ucred)->cr_uid == 0 || ((sigio->sio_ucred
)->cr_ruid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_ruid || ((sigio->sio_ucred)->cr_ruid) == ((sigio->
sio_u.siu_proc))->ps_ucred->cr_svuid || ((sigio->sio_ucred
)->cr_ruid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_uid || ((sigio->sio_ucred)->cr_uid) == ((sigio->sio_u
.siu_proc))->ps_ucred->cr_ruid || ((sigio->sio_ucred
)->cr_uid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_svuid || ((sigio->sio_ucred)->cr_uid) == ((sigio->
sio_u.siu_proc))->ps_ucred->cr_uid)
)
764 prsignal(sigio->sio_proc, sig)ptsignal((sigio->sio_u.siu_proc)->ps_mainproc, (sig), SPROCESS
)
;
765 } else if (sigio->sio_pgid < 0) {
766 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist)for((pr) = ((&sigio->sio_u.siu_pgrp->pg_members)->
lh_first); (pr)!= ((void *)0); (pr) = ((pr)->ps_pglist.le_next
))
{
767 if (CANSIGIO(sigio->sio_ucred, pr)((sigio->sio_ucred)->cr_uid == 0 || ((sigio->sio_ucred
)->cr_ruid) == ((pr))->ps_ucred->cr_ruid || ((sigio->
sio_ucred)->cr_ruid) == ((pr))->ps_ucred->cr_svuid ||
((sigio->sio_ucred)->cr_ruid) == ((pr))->ps_ucred->
cr_uid || ((sigio->sio_ucred)->cr_uid) == ((pr))->ps_ucred
->cr_ruid || ((sigio->sio_ucred)->cr_uid) == ((pr))->
ps_ucred->cr_svuid || ((sigio->sio_ucred)->cr_uid) ==
((pr))->ps_ucred->cr_uid)
&&
768 (checkctty == 0 || (pr->ps_flags & PS_CONTROLT0x00000001)))
769 prsignal(pr, sig)ptsignal((pr)->ps_mainproc, (sig), SPROCESS);
770 }
771 }
772out:
773 mtx_leave(&sigio_lock);
774 KERNEL_UNLOCK()_kernel_unlock();
775}
776
777/*
778 * Recalculate the signal mask and reset the signal disposition after
779 * usermode frame for delivery is formed.
780 */
781void
782postsig_done(struct proc *p, int signum, sigset_t catchmask, int reset)
783{
784 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 784, "_kernel_lock_held()"))
;
785
786 p->p_ru.ru_nsignals++;
787 atomic_setbits_intx86_atomic_setbits_u32(&p->p_sigmask, catchmask);
788 if (reset != 0) {
789 sigset_t mask = sigmask(signum)(1U << ((signum)-1));
790 struct sigacts *ps = p->p_p->ps_sigacts;
791
792 ps->ps_sigcatch &= ~mask;
793 if (signum != SIGCONT19 && sigprop[signum] & SA_IGNORE0x10)
794 ps->ps_sigignore |= mask;
795 ps->ps_sigact[signum] = SIG_DFL(void (*)(int))0;
796 }
797}
798
799/*
800 * Send a signal caused by a trap to the current thread
801 * If it will be caught immediately, deliver it with correct code.
802 * Otherwise, post it normally.
803 */
804void
805trapsignal(struct proc *p, int signum, u_long trapno, int code,
806 union sigval sigval)
807{
808 struct process *pr = p->p_p;
809 struct sigacts *ps = pr->ps_sigacts;
810 int mask;
811
812 KERNEL_LOCK()_kernel_lock();
813 switch (signum) {
814 case SIGILL4:
815 case SIGBUS10:
816 case SIGSEGV11:
817 pr->ps_acflag |= ATRAP0x40;
818 break;
819 }
820
821 mask = sigmask(signum)(1U << ((signum)-1));
822 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 &&
823 (ps->ps_sigcatch & mask) != 0 &&
824 (p->p_sigmask & mask) == 0) {
825 siginfo_t si;
826 sigset_t catchmask = ps->ps_catchmask[signum];
827 int info = (ps->ps_siginfo & mask) != 0;
828 int onstack = (ps->ps_sigonstack & mask) != 0;
829 int reset = (ps->ps_sigreset & mask) != 0;
830
831 initsiginfo(&si, signum, trapno, code, sigval);
832#ifdef KTRACE1
833 if (KTRPOINT(p, KTR_PSIG)((p)->p_p->ps_traceflag & (1<<(5)) &&
((p)->p_flag & 0x00000001) == 0)
) {
834 ktrpsig(p, signum, ps->ps_sigact[signum],
835 p->p_sigmask, code, &si);
836 }
837#endif
838 if (sendsig(ps->ps_sigact[signum], signum, p->p_sigmask, &si,
839 info, onstack)) {
840 sigexit(p, SIGILL4);
841 /* NOTREACHED */
842 }
843 postsig_done(p, signum, catchmask, reset);
844 } else {
845 p->p_sisig = signum;
846 p->p_sitrapno = trapno; /* XXX for core dump/debugger */
847 p->p_sicode = code;
848 p->p_sigval = sigval;
849
850 /*
851 * Signals like SIGBUS and SIGSEGV should not, when
852 * generated by the kernel, be ignorable or blockable.
853 * If it is and we're not being traced, then just kill
854 * the process.
855 * After vfs_shutdown(9), init(8) cannot receive signals
856 * because new code pages of the signal handler cannot be
857 * mapped from halted storage. init(8) may not die or the
858 * kernel panics. Better loop between signal handler and
859 * page fault trap until the machine is halted.
860 */
861 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 &&
862 (sigprop[signum] & SA_KILL0x01) &&
863 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask)) &&
864 pr->ps_pid != 1)
865 sigexit(p, signum);
866 ptsignal(p, signum, STHREAD);
867 }
868 KERNEL_UNLOCK()_kernel_unlock();
869}
870
871/*
872 * Send the signal to the process. If the signal has an action, the action
873 * is usually performed by the target process rather than the caller; we add
874 * the signal to the set of pending signals for the process.
875 *
876 * Exceptions:
877 * o When a stop signal is sent to a sleeping process that takes the
878 * default action, the process is stopped without awakening it.
879 * o SIGCONT restarts stopped processes (or puts them back to sleep)
880 * regardless of the signal action (eg, blocked or ignored).
881 *
882 * Other ignored signals are discarded immediately.
883 */
884void
885psignal(struct proc *p, int signum)
886{
887 ptsignal(p, signum, SPROCESS);
888}
889
890/*
891 * type = SPROCESS process signal, can be diverted (sigwait())
892 * type = STHREAD thread signal, but should be propagated if unhandled
893 * type = SPROPAGATED propagated to this thread, so don't propagate again
894 */
895void
896ptsignal(struct proc *p, int signum, enum signal_type type)
897{
898 int s, prop;
899 sig_t action;
900 int mask;
901 int *siglist;
902 struct process *pr = p->p_p;
903 struct proc *q;
904 int wakeparent = 0;
905
906 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 906, "_kernel_lock_held()"))
;
907
908#ifdef DIAGNOSTIC1
909 if ((u_int)signum >= NSIG33 || signum == 0)
910 panic("psignal signal number");
911#endif
912
913 /* Ignore signal if the target process is exiting */
914 if (pr->ps_flags & PS_EXITING0x00000008)
915 return;
916
917 mask = sigmask(signum)(1U << ((signum)-1));
918
919 if (type == SPROCESS) {
920 /* Accept SIGKILL to coredumping processes */
921 if (pr->ps_flags & PS_COREDUMP0x00000800 && signum == SIGKILL9) {
922 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_siglist, mask);
923 return;
924 }
925
926 /*
927 * If the current thread can process the signal
928 * immediately (it's unblocked) then have it take it.
929 */
930 q = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
931 if (q != NULL((void *)0) && q->p_p == pr && (q->p_flag & P_WEXIT0x00002000) == 0 &&
932 (q->p_sigmask & mask) == 0)
933 p = q;
934 else {
935 /*
936 * A process-wide signal can be diverted to a
937 * different thread that's in sigwait() for this
938 * signal. If there isn't such a thread, then
939 * pick a thread that doesn't have it blocked so
940 * that the stop/kill consideration isn't
941 * delayed. Otherwise, mark it pending on the
942 * main thread.
943 */
944 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
945 /* ignore exiting threads */
946 if (q->p_flag & P_WEXIT0x00002000)
947 continue;
948
949 /* skip threads that have the signal blocked */
950 if ((q->p_sigmask & mask) != 0)
951 continue;
952
953 /* okay, could send to this thread */
954 p = q;
955
956 /*
957 * sigsuspend, sigwait, ppoll/pselect, etc?
958 * Definitely go to this thread, as it's
959 * already blocked in the kernel.
960 */
961 if (q->p_flag & P_SIGSUSPEND0x00000008)
962 break;
963 }
964 }
965 }
966
967 if (type != SPROPAGATED)
968 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum)do { struct klist *__list = (&pr->ps_klist); if (__list
!= ((void *)0)) knote(__list, 0x08000000 | signum); } while (
0)
;
969
970 prop = sigprop[signum];
971
972 /*
973 * If proc is traced, always give parent a chance.
974 */
975 if (pr->ps_flags & PS_TRACED0x00000200) {
976 action = SIG_DFL(void (*)(int))0;
977 } else {
978 /*
979 * If the signal is being ignored,
980 * then we forget about it immediately.
981 * (Note: we don't set SIGCONT in ps_sigignore,
982 * and if it is set to SIG_IGN,
983 * action will be SIG_DFL here.)
984 */
985 if (pr->ps_sigacts->ps_sigignore & mask)
986 return;
987 if (p->p_sigmask & mask) {
988 action = SIG_HOLD(void (*)(int))3;
989 } else if (pr->ps_sigacts->ps_sigcatch & mask) {
990 action = SIG_CATCH(void (*)(int))2;
991 } else {
992 action = SIG_DFL(void (*)(int))0;
993
994 if (prop & SA_KILL0x01 && pr->ps_nice > NZERO20)
995 pr->ps_nice = NZERO20;
996
997 /*
998 * If sending a tty stop signal to a member of an
999 * orphaned process group, discard the signal here if
1000 * the action is default; don't stop the process below
1001 * if sleeping, and don't clear any pending SIGCONT.
1002 */
1003 if (prop & SA_TTYSTOP0x08 && pr->ps_pgrp->pg_jobc == 0)
1004 return;
1005 }
1006 }
1007 /*
1008 * If delivered to process, mark as pending there. Continue and stop
1009 * signals will be propagated to all threads. So they are always
1010 * marked at thread level.
1011 */
1012 siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist;
1013 if (prop & SA_CONT0x20) {
1014 siglist = &p->p_siglist;
1015 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
);
1016 }
1017 if (prop & SA_STOP0x04) {
1018 siglist = &p->p_siglist;
1019 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, CONTSIGMASK((1U << ((19)-1))));
1020 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_CONTINUED0x00800000);
1021 }
1022 atomic_setbits_intx86_atomic_setbits_u32(siglist, mask);
1023
1024 /*
1025 * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
1026 */
1027 if (prop & (SA_CONT0x20 | SA_STOP0x04) && type != SPROPAGATED)
1028 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
1029 if (q != p)
1030 ptsignal(q, signum, SPROPAGATED);
1031
1032 /*
1033 * Defer further processing for signals which are held,
1034 * except that stopped processes must be continued by SIGCONT.
1035 */
1036 if (action == SIG_HOLD(void (*)(int))3 && ((prop & SA_CONT0x20) == 0 || p->p_stat != SSTOP4))
1037 return;
1038
1039 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1040
1041 switch (p->p_stat) {
1042
1043 case SSLEEP3:
1044 /*
1045 * If process is sleeping uninterruptibly
1046 * we can't interrupt the sleep... the signal will
1047 * be noticed when the process returns through
1048 * trap() or syscall().
1049 */
1050 if ((p->p_flag & P_SINTR0x00000080) == 0)
1051 goto out;
1052 /*
1053 * Process is sleeping and traced... make it runnable
1054 * so it can discover the signal in cursig() and stop
1055 * for the parent.
1056 */
1057 if (pr->ps_flags & PS_TRACED0x00000200)
1058 goto run;
1059 /*
1060 * If SIGCONT is default (or ignored) and process is
1061 * asleep, we are finished; the process should not
1062 * be awakened.
1063 */
1064 if ((prop & SA_CONT0x20) && action == SIG_DFL(void (*)(int))0) {
1065 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, mask);
1066 goto out;
1067 }
1068 /*
1069 * When a sleeping process receives a stop
1070 * signal, process immediately if possible.
1071 */
1072 if ((prop & SA_STOP0x04) && action == SIG_DFL(void (*)(int))0) {
1073 /*
1074 * If a child holding parent blocked,
1075 * stopping could cause deadlock.
1076 */
1077 if (pr->ps_flags & PS_PPWAIT0x00000040)
1078 goto out;
1079 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, mask);
1080 pr->ps_xsig = signum;
1081 proc_stop(p, 0);
1082 goto out;
1083 }
1084 /*
1085 * All other (caught or default) signals
1086 * cause the process to run.
1087 */
1088 goto runfast;
1089 /* NOTREACHED */
1090
1091 case SSTOP4:
1092 /*
1093 * If traced process is already stopped,
1094 * then no further action is necessary.
1095 */
1096 if (pr->ps_flags & PS_TRACED0x00000200)
1097 goto out;
1098
1099 /*
1100 * Kill signal always sets processes running.
1101 */
1102 if (signum == SIGKILL9) {
1103 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1104 goto runfast;
1105 }
1106
1107 if (prop & SA_CONT0x20) {
1108 /*
1109 * If SIGCONT is default (or ignored), we continue the
1110 * process but don't leave the signal in p_siglist, as
1111 * it has no further action. If SIGCONT is held, we
1112 * continue the process and leave the signal in
1113 * p_siglist. If the process catches SIGCONT, let it
1114 * handle the signal itself. If it isn't waiting on
1115 * an event, then it goes back to run state.
1116 * Otherwise, process goes back to sleep state.
1117 */
1118 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_CONTINUED0x00800000);
1119 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1120 wakeparent = 1;
1121 if (action == SIG_DFL(void (*)(int))0)
1122 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, mask);
1123 if (action == SIG_CATCH(void (*)(int))2)
1124 goto runfast;
1125 if (p->p_wchan == NULL((void *)0))
1126 goto run;
1127 p->p_stat = SSLEEP3;
1128 goto out;
1129 }
1130
1131 if (prop & SA_STOP0x04) {
1132 /*
1133 * Already stopped, don't need to stop again.
1134 * (If we did the shell could get confused.)
1135 */
1136 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, mask);
1137 goto out;
1138 }
1139
1140 /*
1141 * If process is sleeping interruptibly, then simulate a
1142 * wakeup so that when it is continued, it will be made
1143 * runnable and can look at the signal. But don't make
1144 * the process runnable, leave it stopped.
1145 */
1146 if (p->p_flag & P_SINTR0x00000080)
1147 unsleep(p);
1148 goto out;
1149
1150 case SONPROC7:
1151 signotify(p);
1152 /* FALLTHROUGH */
1153 default:
1154 /*
1155 * SRUN, SIDL, SDEAD do nothing with the signal,
1156 * other than kicking ourselves if we are running.
1157 * It will either never be noticed, or noticed very soon.
1158 */
1159 goto out;
1160 }
1161 /* NOTREACHED */
1162
1163runfast:
1164 /*
1165 * Raise priority to at least PUSER.
1166 */
1167 if (p->p_usrpri > PUSER50)
1168 p->p_usrpri = PUSER50;
1169run:
1170 setrunnable(p);
1171out:
1172 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1173 if (wakeparent)
1174 wakeup(pr->ps_pptr);
1175}
1176
1177/*
1178 * Determine signal that should be delivered to process p, the current
1179 * process, 0 if none.
1180 *
1181 * If the current process has received a signal (should be caught or cause
1182 * termination, should interrupt current syscall), return the signal number.
1183 * Stop signals with default action are processed immediately, then cleared;
1184 * they aren't returned. This is checked after each entry to the system for
1185 * a syscall or trap. The normal call sequence is
1186 *
1187 * while (signum = cursig(curproc))
1188 * postsig(signum);
1189 *
1190 * Assumes that if the P_SINTR flag is set, we're holding both the
1191 * kernel and scheduler locks.
1192 */
1193int
1194cursig(struct proc *p)
1195{
1196 struct process *pr = p->p_p;
1197 int sigpending, signum, mask, prop;
1198 int dolock = (p->p_flag & P_SINTR0x00000080) == 0;
1199 int s;
1200
1201 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 1201, "_kernel_lock_held()"))
;
1202
1203 sigpending = (p->p_siglist | pr->ps_siglist);
1204 if (sigpending == 0)
1205 return 0;
1206
1207 if (!ISSET(pr->ps_flags, PS_TRACED)((pr->ps_flags) & (0x00000200)) && SIGPENDING(p)(((p)->p_siglist | (p)->p_p->ps_siglist) & ~(p)->
p_sigmask)
== 0)
1208 return 0;
1209
1210 for (;;) {
1211 mask = SIGPENDING(p)(((p)->p_siglist | (p)->p_p->ps_siglist) & ~(p)->
p_sigmask)
;
1212 if (pr->ps_flags & PS_PPWAIT0x00000040)
1213 mask &= ~STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
;
1214 if (mask == 0) /* no signal to send */
1215 return (0);
1216 signum = ffs((long)mask);
1217 mask = sigmask(signum)(1U << ((signum)-1));
1218 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1219 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_siglist, mask);
1220
1221 /*
1222 * We should see pending but ignored signals
1223 * only if PS_TRACED was on when they were posted.
1224 */
1225 if (mask & pr->ps_sigacts->ps_sigignore &&
1226 (pr->ps_flags & PS_TRACED0x00000200) == 0)
1227 continue;
1228
1229 /*
1230 * If traced, always stop, and stay stopped until released
1231 * by the debugger. If our parent process is waiting for
1232 * us, don't hang as we could deadlock.
1233 */
1234 if (((pr->ps_flags & (PS_TRACED0x00000200 | PS_PPWAIT0x00000040)) == PS_TRACED0x00000200) &&
1235 signum != SIGKILL9) {
1236 pr->ps_xsig = signum;
1237
1238 single_thread_set(p, SINGLE_SUSPEND, 0);
1239
1240 if (dolock)
1241 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1242 proc_stop(p, 1);
1243 if (dolock)
1244 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1245
1246 single_thread_clear(p, 0);
1247
1248 /*
1249 * If we are no longer being traced, or the parent
1250 * didn't give us a signal, look for more signals.
1251 */
1252 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 ||
1253 pr->ps_xsig == 0)
1254 continue;
1255
1256 /*
1257 * If the new signal is being masked, look for other
1258 * signals.
1259 */
1260 signum = pr->ps_xsig;
1261 mask = sigmask(signum)(1U << ((signum)-1));
1262 if ((p->p_sigmask & mask) != 0)
1263 continue;
1264
1265 /* take the signal! */
1266 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1267 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_siglist, mask);
1268 }
1269
1270 prop = sigprop[signum];
1271
1272 /*
1273 * Decide whether the signal should be returned.
1274 * Return the signal's number, or fall through
1275 * to clear it from the pending mask.
1276 */
1277 switch ((long)pr->ps_sigacts->ps_sigact[signum]) {
1278 case (long)SIG_DFL(void (*)(int))0:
1279 /*
1280 * Don't take default actions on system processes.
1281 */
1282 if (pr->ps_pid <= 1) {
1283#ifdef DIAGNOSTIC1
1284 /*
1285 * Are you sure you want to ignore SIGSEGV
1286 * in init? XXX
1287 */
1288 printf("Process (pid %d) got signal"
1289 " %d\n", pr->ps_pid, signum);
1290#endif
1291 break; /* == ignore */
1292 }
1293 /*
1294 * If there is a pending stop signal to process
1295 * with default action, stop here,
1296 * then clear the signal. However,
1297 * if process is member of an orphaned
1298 * process group, ignore tty stop signals.
1299 */
1300 if (prop & SA_STOP0x04) {
1301 if (pr->ps_flags & PS_TRACED0x00000200 ||
1302 (pr->ps_pgrp->pg_jobc == 0 &&
1303 prop & SA_TTYSTOP0x08))
1304 break; /* == ignore */
1305 pr->ps_xsig = signum;
1306 if (dolock)
1307 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1308 proc_stop(p, 1);
1309 if (dolock)
1310 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1311 break;
1312 } else if (prop & SA_IGNORE0x10) {
1313 /*
1314 * Except for SIGCONT, shouldn't get here.
1315 * Default action is to ignore; drop it.
1316 */
1317 break; /* == ignore */
1318 } else
1319 goto keep;
1320 /* NOTREACHED */
1321 case (long)SIG_IGN(void (*)(int))1:
1322 /*
1323 * Masking above should prevent us ever trying
1324 * to take action on an ignored signal other
1325 * than SIGCONT, unless process is traced.
1326 */
1327 if ((prop & SA_CONT0x20) == 0 &&
1328 (pr->ps_flags & PS_TRACED0x00000200) == 0)
1329 printf("%s\n", __func__);
1330 break; /* == ignore */
1331 default:
1332 /*
1333 * This signal has an action, let
1334 * postsig() process it.
1335 */
1336 goto keep;
1337 }
1338 }
1339 /* NOTREACHED */
1340
1341keep:
1342 atomic_setbits_intx86_atomic_setbits_u32(&p->p_siglist, mask); /*leave the signal for later */
1343 return (signum);
1344}
1345
1346/*
1347 * Put the argument process into the stopped state and notify the parent
1348 * via wakeup. Signals are handled elsewhere. The process must not be
1349 * on the run queue.
1350 */
1351void
1352proc_stop(struct proc *p, int sw)
1353{
1354 struct process *pr = p->p_p;
1355
1356#ifdef MULTIPROCESSOR1
1357 SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__
); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert(
"diagnostic ", "/usr/src/sys/kern/kern_sig.c", 1357, "__mp_lock_held(&sched_lock, curcpu())"
)); } while (0)
;
1358#endif
1359
1360 p->p_stat = SSTOP4;
1361 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_WAITED0x00000400);
1362 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_STOPPED0x00008000);
1363 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1364 /*
1365 * We need this soft interrupt to be handled fast.
1366 * Extra calls to softclock don't hurt.
1367 */
1368 softintr_schedule(proc_stop_si)do { struct x86_soft_intrhand *__sih = (proc_stop_si); struct
x86_soft_intr *__si = __sih->sih_intrhead; mtx_enter(&
__si->softintr_lock); if (__sih->sih_pending == 0) { do
{ (__sih)->sih_q.tqe_next = ((void *)0); (__sih)->sih_q
.tqe_prev = (&__si->softintr_q)->tqh_last; *(&__si
->softintr_q)->tqh_last = (__sih); (&__si->softintr_q
)->tqh_last = &(__sih)->sih_q.tqe_next; } while (0)
; __sih->sih_pending = 1; softintr(__si->softintr_ssir)
; } mtx_leave(&__si->softintr_lock); } while ( 0)
;
1369 if (sw)
1370 mi_switch();
1371}
1372
1373/*
1374 * Called from a soft interrupt to send signals to the parents of stopped
1375 * processes.
1376 * We can't do this in proc_stop because it's called with nasty locks held
1377 * and we would need recursive scheduler lock to deal with that.
1378 */
1379void
1380proc_stop_sweep(void *v)
1381{
1382 struct process *pr;
1383
1384 LIST_FOREACH(pr, &allprocess, ps_list)for((pr) = ((&allprocess)->lh_first); (pr)!= ((void *)
0); (pr) = ((pr)->ps_list.le_next))
{
1385 if ((pr->ps_flags & PS_STOPPED0x00008000) == 0)
1386 continue;
1387 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_STOPPED0x00008000);
1388
1389 if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP0x01) == 0)
1390 prsignal(pr->ps_pptr, SIGCHLD)ptsignal((pr->ps_pptr)->ps_mainproc, (20), SPROCESS);
1391 wakeup(pr->ps_pptr);
1392 }
1393}
1394
1395/*
1396 * Take the action for the specified signal
1397 * from the current set of pending signals.
1398 */
1399void
1400postsig(struct proc *p, int signum)
1401{
1402 struct process *pr = p->p_p;
1403 struct sigacts *ps = pr->ps_sigacts;
1404 sig_t action;
1405 u_long trapno;
1406 int mask, returnmask;
1407 sigset_t catchmask;
1408 siginfo_t si;
1409 union sigval sigval;
1410 int s, code, info, onstack, reset;
1411
1412 KASSERT(signum != 0)((signum != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 1412, "signum != 0"))
;
1413 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 1413, "_kernel_lock_held()"))
;
1414
1415 mask = sigmask(signum)(1U << ((signum)-1));
1416 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1417 action = ps->ps_sigact[signum];
1418 catchmask = ps->ps_catchmask[signum];
1419 info = (ps->ps_siginfo & mask) != 0;
1420 onstack = (ps->ps_sigonstack & mask) != 0;
1421 reset = (ps->ps_sigreset & mask) != 0;
1422 sigval.sival_ptr = NULL((void *)0);
1423
1424 if (p->p_sisig != signum) {
1425 trapno = 0;
1426 code = SI_USER0;
1427 sigval.sival_ptr = NULL((void *)0);
1428 } else {
1429 trapno = p->p_sitrapno;
1430 code = p->p_sicode;
1431 sigval = p->p_sigval;
1432 }
1433 initsiginfo(&si, signum, trapno, code, sigval);
1434
1435#ifdef KTRACE1
1436 if (KTRPOINT(p, KTR_PSIG)((p)->p_p->ps_traceflag & (1<<(5)) &&
((p)->p_flag & 0x00000001) == 0)
) {
1437 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND0x00000008 ?
1438 p->p_oldmask : p->p_sigmask, code, &si);
1439 }
1440#endif
1441 if (action == SIG_DFL(void (*)(int))0) {
1442 /*
1443 * Default action, where the default is to kill
1444 * the process. (Other cases were ignored above.)
1445 */
1446 sigexit(p, signum);
1447 /* NOTREACHED */
1448 } else {
1449 /*
1450 * If we get here, the signal must be caught.
1451 */
1452#ifdef DIAGNOSTIC1
1453 if (action == SIG_IGN(void (*)(int))1 || (p->p_sigmask & mask))
1454 panic("postsig action");
1455#endif
1456 /*
1457 * Set the new mask value and also defer further
1458 * occurrences of this signal.
1459 *
1460 * Special case: user has done a sigpause. Here the
1461 * current mask is not of interest, but rather the
1462 * mask from before the sigpause is what we want
1463 * restored after the signal processing is completed.
1464 */
1465#ifdef MULTIPROCESSOR1
1466 s = splsched()splraise(0xc);
1467#else
1468 s = splhigh()splraise(0xd);
1469#endif
1470 if (p->p_flag & P_SIGSUSPEND0x00000008) {
1471 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
1472 returnmask = p->p_oldmask;
1473 } else {
1474 returnmask = p->p_sigmask;
1475 }
1476 if (p->p_sisig == signum) {
1477 p->p_sisig = 0;
1478 p->p_sitrapno = 0;
1479 p->p_sicode = SI_USER0;
1480 p->p_sigval.sival_ptr = NULL((void *)0);
1481 }
1482
1483 if (sendsig(action, signum, returnmask, &si, info, onstack)) {
1484 sigexit(p, SIGILL4);
1485 /* NOTREACHED */
1486 }
1487 postsig_done(p, signum, catchmask, reset);
1488 splx(s)spllower(s);
1489 }
1490}
1491
1492/*
1493 * Force the current process to exit with the specified signal, dumping core
1494 * if appropriate. We bypass the normal tests for masked and caught signals,
1495 * allowing unrecoverable failures to terminate the process without changing
1496 * signal state. Mark the accounting record with the signal termination.
1497 * If dumping core, save the signal number for the debugger. Calls exit and
1498 * does not return.
1499 */
1500void
1501sigexit(struct proc *p, int signum)
1502{
1503 /* Mark process as going away */
1504 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_WEXIT0x00002000);
1505
1506 p->p_p->ps_acflag |= AXSIG0x10;
1507 if (sigprop[signum] & SA_CORE0x02) {
1508 p->p_sisig = signum;
1509
1510 /* if there are other threads, pause them */
1511 if (P_HASSIBLING(p)(((&(p)->p_p->ps_threads)->tqh_first) != (p) || (
((p))->p_thr_link.tqe_next) != ((void *)0))
)
1512 single_thread_set(p, SINGLE_SUSPEND, 1);
1513
1514 if (coredump(p) == 0)
1515 signum |= WCOREFLAG0200;
1516 }
1517 exit1(p, 0, signum, EXIT_NORMAL0x00000001);
1518 /* NOTREACHED */
1519}
1520
1521/*
1522 * Send uncatchable SIGABRT for coredump.
1523 */
1524void
1525sigabort(struct proc *p)
1526{
1527 struct sigaction sa;
1528
1529 memset(&sa, 0, sizeof sa)__builtin_memset((&sa), (0), (sizeof sa));
1530 sa.sa_handler__sigaction_u.__sa_handler = SIG_DFL(void (*)(int))0;
1531 setsigvec(p, SIGABRT6, &sa);
1532 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_sigmask, sigmask(SIGABRT)(1U << ((6)-1)));
1533 psignal(p, SIGABRT6);
1534}
1535
1536/*
1537 * Return 1 if `sig', a given signal, is ignored or masked for `p', a given
1538 * thread, and 0 otherwise.
1539 */
1540int
1541sigismasked(struct proc *p, int sig)
1542{
1543 struct process *pr = p->p_p;
1544
1545 if ((pr->ps_sigacts->ps_sigignore & sigmask(sig)(1U << ((sig)-1))) ||
1546 (p->p_sigmask & sigmask(sig)(1U << ((sig)-1))))
1547 return 1;
1548
1549 return 0;
1550}
1551
1552int nosuidcoredump = 1;
1553
1554struct coredump_iostate {
1555 struct proc *io_proc;
1556 struct vnode *io_vp;
1557 struct ucred *io_cred;
1558 off_t io_offset;
1559};
1560
1561/*
1562 * Dump core, into a file named "progname.core", unless the process was
1563 * setuid/setgid.
1564 */
1565int
1566coredump(struct proc *p)
1567{
1568#ifdef SMALL_KERNEL
1569 return EPERM1;
1570#else
1571 struct process *pr = p->p_p;
1572 struct vnode *vp;
1573 struct ucred *cred = p->p_ucred;
1574 struct vmspace *vm = p->p_vmspace;
1575 struct nameidata nd;
1576 struct vattr vattr;
1577 struct coredump_iostate io;
1578 int error, len, incrash = 0;
1579 char *name;
1580 const char *dir = "/var/crash";
1581
1582 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_COREDUMP0x00000800);
1583
1584 /* Don't dump if will exceed file size limit. */
1585 if (USPACE(6 * (1 << 12)) + ptoa(vm->vm_dsize + vm->vm_ssize)((paddr_t)(vm->vm_dsize + vm->vm_ssize) << 12) >= lim_cur(RLIMIT_CORE4))
1586 return (EFBIG27);
1587
1588 name = pool_get(&namei_pool, PR_WAITOK0x0001);
1589
1590 /*
1591 * If the process has inconsistent uids, nosuidcoredump
1592 * determines coredump placement policy.
1593 */
1594 if (((pr->ps_flags & PS_SUGID0x00000010) && (error = suser(p))) ||
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
1595 ((pr->ps_flags & PS_SUGID0x00000010) && nosuidcoredump)) {
1596 if (nosuidcoredump == 3) {
1597 /*
1598 * If the program directory does not exist, dumps of
1599 * that core will silently fail.
1600 */
1601 len = snprintf(name, MAXPATHLEN1024, "%s/%s/%u.core",
1602 dir, pr->ps_comm, pr->ps_pid);
1603 incrash = KERNELPATH0x800000;
1604 } else if (nosuidcoredump == 2) {
1605 len = snprintf(name, MAXPATHLEN1024, "%s/%s.core",
1606 dir, pr->ps_comm);
1607 incrash = KERNELPATH0x800000;
1608 } else {
1609 pool_put(&namei_pool, name);
1610 return (EPERM1);
1611 }
1612 } else
1613 len = snprintf(name, MAXPATHLEN1024, "%s.core", pr->ps_comm);
1614
1615 if (len >= MAXPATHLEN1024) {
1616 pool_put(&namei_pool, name);
1617 return (EACCES13);
1618 }
1619
1620 /*
1621 * Control the UID used to write out. The normal case uses
1622 * the real UID. If the sugid case is going to write into the
1623 * controlled directory, we do so as root.
1624 */
1625 if (incrash == 0) {
1626 cred = crdup(cred);
1627 cred->cr_uid = cred->cr_ruid;
1628 cred->cr_gid = cred->cr_rgid;
1629 } else {
1630 if (p->p_fd->fd_rdir) {
1631 vrele(p->p_fd->fd_rdir);
1632 p->p_fd->fd_rdir = NULL((void *)0);
1633 }
1634 p->p_ucred = crdup(p->p_ucred);
1635 crfree(cred);
1636 cred = p->p_ucred;
1637 crhold(cred);
1638 cred->cr_uid = 0;
1639 cred->cr_gid = 0;
1640 }
1641
1642 /* incrash should be 0 or KERNELPATH only */
1643 NDINIT(&nd, 0, incrash, UIO_SYSSPACE, name, p)ndinitat(&nd, 0, incrash, UIO_SYSSPACE, -100, name, p);
1644
1645 error = vn_open(&nd, O_CREAT0x0200 | FWRITE0x0002 | O_NOFOLLOW0x0100 | O_NONBLOCK0x0004,
1646 S_IRUSR0000400 | S_IWUSR0000200);
1647
1648 if (error)
1649 goto out;
1650
1651 /*
1652 * Don't dump to non-regular files, files with links, or files
1653 * owned by someone else.
1654 */
1655 vp = nd.ni_vp;
1656 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
1657 VOP_UNLOCK(vp);
1658 vn_close(vp, FWRITE0x0002, cred, p);
1659 goto out;
1660 }
1661 if (vp->v_type != VREG || vattr.va_nlink != 1 ||
1662 vattr.va_mode & ((VREAD00400 | VWRITE00200) >> 3 | (VREAD00400 | VWRITE00200) >> 6) ||
1663 vattr.va_uid != cred->cr_uid) {
1664 error = EACCES13;
1665 VOP_UNLOCK(vp);
1666 vn_close(vp, FWRITE0x0002, cred, p);
1667 goto out;
1668 }
1669 VATTR_NULL(&vattr)vattr_null(&vattr);
1670 vattr.va_size = 0;
1671 VOP_SETATTR(vp, &vattr, cred, p);
1672 pr->ps_acflag |= ACORE0x08;
1673
1674 io.io_proc = p;
1675 io.io_vp = vp;
1676 io.io_cred = cred;
1677 io.io_offset = 0;
1678 VOP_UNLOCK(vp);
1679 vref(vp);
1680 error = vn_close(vp, FWRITE0x0002, cred, p);
1681 if (error == 0)
1682 error = coredump_elf(p, &io);
1683 vrele(vp);
1684out:
1685 crfree(cred);
1686 pool_put(&namei_pool, name);
1687 return (error);
1688#endif
1689}
1690
1691#ifndef SMALL_KERNEL
1692int
1693coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
1694{
1695 struct coredump_iostate *io = cookie;
1696 off_t coffset = 0;
1697 size_t csize;
1698 int chunk, error;
1699
1700 csize = len;
1701 do {
1702 if (sigmask(SIGKILL)(1U << ((9)-1)) &
1703 (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist))
1704 return (EINTR4);
1705
1706 /* Rest of the loop sleeps with lock held, so... */
1707 yield();
1708
1709 chunk = MIN(csize, MAXPHYS)(((csize)<((64 * 1024)))?(csize):((64 * 1024)));
1710 error = vn_rdwr(UIO_WRITE, io->io_vp,
1711 (caddr_t)data + coffset, chunk,
1712 io->io_offset + coffset, segflg,
1713 IO_UNIT0x01, io->io_cred, NULL((void *)0), io->io_proc);
1714 if (error) {
1715 struct process *pr = io->io_proc->p_p;
1716
1717 if (error == ENOSPC28)
1718 log(LOG_ERR3,
1719 "coredump of %s(%d) failed, filesystem full\n",
1720 pr->ps_comm, pr->ps_pid);
1721 else
1722 log(LOG_ERR3,
1723 "coredump of %s(%d), write failed: errno %d\n",
1724 pr->ps_comm, pr->ps_pid, error);
1725 return (error);
1726 }
1727
1728 coffset += chunk;
1729 csize -= chunk;
1730 } while (csize > 0);
1731
1732 io->io_offset += len;
1733 return (0);
1734}
1735
1736void
1737coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
1738{
1739 struct coredump_iostate *io = cookie;
1740
1741 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
1742}
1743
1744#endif /* !SMALL_KERNEL */
1745
1746/*
1747 * Nonexistent system call-- signal process (may want to handle it).
1748 * Flag error in case process won't see signal immediately (blocked or ignored).
1749 */
1750int
1751sys_nosys(struct proc *p, void *v, register_t *retval)
1752{
1753
1754 ptsignal(p, SIGSYS12, STHREAD);
1755 return (ENOSYS78);
1756}
1757
1758int
1759sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
1760{
1761 static int sigwaitsleep;
1762 struct sys___thrsigdivert_args /* {
1763 syscallarg(sigset_t) sigmask;
1764 syscallarg(siginfo_t *) info;
1765 syscallarg(const struct timespec *) timeout;
1766 } */ *uap = v;
1767 sigset_t mask = SCARG(uap, sigmask)((uap)->sigmask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
1768 siginfo_t si;
1769 uint64_t nsecs = INFSLP0xffffffffffffffffULL;
1770 int timeinvalid = 0;
1771 int error = 0;
1772
1773 memset(&si, 0, sizeof(si))__builtin_memset((&si), (0), (sizeof(si)));
1774
1775 if (SCARG(uap, timeout)((uap)->timeout.le.datum) != NULL((void *)0)) {
1776 struct timespec ts;
1777 if ((error = copyin(SCARG(uap, timeout)((uap)->timeout.le.datum), &ts, sizeof(ts))) != 0)
1778 return (error);
1779#ifdef KTRACE1
1780 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
1781 ktrreltimespec(p, &ts)ktrstruct((p), "reltimespec", (&ts), sizeof(struct timespec
))
;
1782#endif
1783 if (!timespecisvalid(&ts)((&ts)->tv_nsec >= 0 && (&ts)->tv_nsec
< 1000000000L)
)
1784 timeinvalid = 1;
1785 else
1786 nsecs = TIMESPEC_TO_NSEC(&ts);
1787 }
1788
1789 dosigsuspend(p, p->p_sigmask &~ mask);
1790 for (;;) {
1791 si.si_signo = cursig(p);
1792 if (si.si_signo != 0) {
1793 sigset_t smask = sigmask(si.si_signo)(1U << ((si.si_signo)-1));
1794 if (smask & mask) {
1795 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, smask);
1796 error = 0;
1797 break;
1798 }
1799 }
1800
1801 /* per-POSIX, delay this error until after the above */
1802 if (timeinvalid)
1803 error = EINVAL22;
1804 /* per-POSIX, return immediately if timeout is zero-valued */
1805 if (nsecs == 0)
1806 error = EAGAIN35;
1807
1808 if (error != 0)
1809 break;
1810
1811 error = tsleep_nsec(&sigwaitsleep, PPAUSE40|PCATCH0x100, "sigwait",
1812 nsecs);
1813 }
1814
1815 if (error == 0) {
1816 *retval = si.si_signo;
1817 if (SCARG(uap, info)((uap)->info.le.datum) != NULL((void *)0))
1818 error = copyout(&si, SCARG(uap, info)((uap)->info.le.datum), sizeof(si));
1819 } else if (error == ERESTART-1 && SCARG(uap, timeout)((uap)->timeout.le.datum) != NULL((void *)0)) {
1820 /*
1821 * Restarting is wrong if there's a timeout, as it'll be
1822 * for the same interval again
1823 */
1824 error = EINTR4;
1825 }
1826
1827 return (error);
1828}
1829
1830void
1831initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
1832{
1833 memset(si, 0, sizeof(*si))__builtin_memset((si), (0), (sizeof(*si)));
1834
1835 si->si_signo = sig;
1836 si->si_code = code;
1837 if (code == SI_USER0) {
1838 si->si_value_data._proc._pdata._kill._value = val;
1839 } else {
1840 switch (sig) {
1841 case SIGSEGV11:
1842 case SIGILL4:
1843 case SIGBUS10:
1844 case SIGFPE8:
1845 si->si_addr_data._fault._addr = val.sival_ptr;
1846 si->si_trapno_data._fault._trapno = trapno;
1847 break;
1848 case SIGXFSZ25:
1849 break;
1850 }
1851 }
1852}
1853
1854int
1855filt_sigattach(struct knote *kn)
1856{
1857 struct process *pr = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p;
1858 int s;
1859
1860 if (kn->kn_idkn_kevent.ident >= NSIG33)
1861 return EINVAL22;
1862
1863 kn->kn_ptr.p_process = pr;
1864 kn->kn_flagskn_kevent.flags |= EV_CLEAR0x0020; /* automatically set */
1865
1866 s = splhigh()splraise(0xd);
1867 klist_insert_locked(&pr->ps_klist, kn);
1868 splx(s)spllower(s);
1869
1870 return (0);
1871}
1872
1873void
1874filt_sigdetach(struct knote *kn)
1875{
1876 struct process *pr = kn->kn_ptr.p_process;
1877 int s;
1878
1879 s = splhigh()splraise(0xd);
1880 klist_remove_locked(&pr->ps_klist, kn);
1881 splx(s)spllower(s);
1882}
1883
1884/*
1885 * signal knotes are shared with proc knotes, so we apply a mask to
1886 * the hint in order to differentiate them from process hints. This
1887 * could be avoided by using a signal-specific knote list, but probably
1888 * isn't worth the trouble.
1889 */
1890int
1891filt_signal(struct knote *kn, long hint)
1892{
1893
1894 if (hint & NOTE_SIGNAL0x08000000) {
1895 hint &= ~NOTE_SIGNAL0x08000000;
1896
1897 if (kn->kn_idkn_kevent.ident == hint)
1898 kn->kn_datakn_kevent.data++;
1899 }
1900 return (kn->kn_datakn_kevent.data != 0);
1901}
1902
1903void
1904userret(struct proc *p)
1905{
1906 int signum;
1907
1908 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
1909 if (p->p_flag & P_PROFPEND0x00000002) {
1910 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_PROFPEND0x00000002);
1911 KERNEL_LOCK()_kernel_lock();
1912 psignal(p, SIGPROF27);
1913 KERNEL_UNLOCK()_kernel_unlock();
1914 }
1915 if (p->p_flag & P_ALRMPEND0x00000004) {
1916 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_ALRMPEND0x00000004);
1917 KERNEL_LOCK()_kernel_lock();
1918 psignal(p, SIGVTALRM26);
1919 KERNEL_UNLOCK()_kernel_unlock();
1920 }
1921
1922 if (SIGPENDING(p)(((p)->p_siglist | (p)->p_p->ps_siglist) & ~(p)->
p_sigmask)
!= 0) {
1923 KERNEL_LOCK()_kernel_lock();
1924 while ((signum = cursig(p)) != 0)
1925 postsig(p, signum);
1926 KERNEL_UNLOCK()_kernel_unlock();
1927 }
1928
1929 /*
1930 * If P_SIGSUSPEND is still set here, then we still need to restore
1931 * the original sigmask before returning to userspace. Also, this
1932 * might unmask some pending signals, so we need to check a second
1933 * time for signals to post.
1934 */
1935 if (p->p_flag & P_SIGSUSPEND0x00000008) {
1936 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
1937 p->p_sigmask = p->p_oldmask;
1938
1939 KERNEL_LOCK()_kernel_lock();
1940 while ((signum = cursig(p)) != 0)
1941 postsig(p, signum);
1942 KERNEL_UNLOCK()_kernel_unlock();
1943 }
1944
1945 if (p->p_flag & P_SUSPSINGLE0x00080000)
1946 single_thread_check(p, 0);
1947
1948 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning")(void)0;
1949
1950 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
1951}
1952
1953int
1954single_thread_check_locked(struct proc *p, int deep, int s)
1955{
1956 struct process *pr = p->p_p;
1957
1958 SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__
); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert(
"diagnostic ", "/usr/src/sys/kern/kern_sig.c", 1958, "__mp_lock_held(&sched_lock, curcpu())"
)); } while (0)
;
1959
1960 if (pr->ps_single != NULL((void *)0) && pr->ps_single != p) {
1961 do {
1962 /* if we're in deep, we need to unwind to the edge */
1963 if (deep) {
1964 if (pr->ps_flags & PS_SINGLEUNWIND0x00002000)
1965 return (ERESTART-1);
1966 if (pr->ps_flags & PS_SINGLEEXIT0x00001000)
1967 return (EINTR4);
1968 }
1969
1970 if (pr->ps_single == NULL((void *)0))
1971 continue;
1972
1973 if (atomic_dec_int_nv(&pr->ps_singlecount)_atomic_sub_int_nv((&pr->ps_singlecount), 1) == 0)
1974 wakeup(&pr->ps_singlecount);
1975
1976 if (pr->ps_flags & PS_SINGLEEXIT0x00001000) {
1977 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1978 KERNEL_LOCK()_kernel_lock();
1979 exit1(p, 0, 0, EXIT_THREAD_NOCHECK0x00000003);
1980 /* NOTREACHED */
1981 }
1982
1983 /* not exiting and don't need to unwind, so suspend */
1984 p->p_stat = SSTOP4;
1985 mi_switch();
1986 } while (pr->ps_single != NULL((void *)0));
1987 }
1988
1989 return (0);
1990}
1991
1992int
1993single_thread_check(struct proc *p, int deep)
1994{
1995 int s, error;
1996
1997 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1998 error = single_thread_check_locked(p, deep, s);
1999 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2000
2001 return error;
2002}
2003
2004/*
2005 * Stop other threads in the process. The mode controls how and
2006 * where the other threads should stop:
2007 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit
2008 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear())
2009 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
2010 * or released as with SINGLE_SUSPEND
2011 * - SINGLE_EXIT: unwind to kernel boundary and exit
2012 */
2013int
2014single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
2015{
2016 struct process *pr = p->p_p;
2017 struct proc *q;
2018 int error, s;
2019
2020 KASSERT(curproc == p)((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc == p) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2020, "curproc == p"))
;
2021
2022 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
2023 error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s);
2024 if (error) {
2025 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2026 return error;
2027 }
2028
2029 switch (mode) {
2030 case SINGLE_SUSPEND:
2031 break;
2032 case SINGLE_UNWIND:
2033 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000);
2034 break;
2035 case SINGLE_EXIT:
2036 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_SINGLEEXIT0x00001000);
2037 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000);
2038 break;
2039#ifdef DIAGNOSTIC1
2040 default:
2041 panic("single_thread_mode = %d", mode);
2042#endif
2043 }
2044 pr->ps_singlecount = 0;
2045 membar_producer()do { __asm volatile("" ::: "memory"); } while (0);
2046 pr->ps_single = p;
2047 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
2048 if (q == p)
2049 continue;
2050 if (q->p_flag & P_WEXIT0x00002000) {
2051 if (mode == SINGLE_EXIT) {
2052 if (q->p_stat == SSTOP4) {
2053 setrunnable(q);
2054 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2055 }
2056 }
2057 continue;
2058 }
2059 atomic_setbits_intx86_atomic_setbits_u32(&q->p_flag, P_SUSPSINGLE0x00080000);
2060 switch (q->p_stat) {
2061 case SIDL1:
2062 case SRUN2:
2063 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2064 break;
2065 case SSLEEP3:
2066 /* if it's not interruptible, then just have to wait */
2067 if (q->p_flag & P_SINTR0x00000080) {
2068 /* merely need to suspend? just stop it */
2069 if (mode == SINGLE_SUSPEND) {
2070 q->p_stat = SSTOP4;
2071 break;
2072 }
2073 /* need to unwind or exit, so wake it */
2074 setrunnable(q);
2075 }
2076 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2077 break;
2078 case SSTOP4:
2079 if (mode == SINGLE_EXIT) {
2080 setrunnable(q);
2081 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2082 }
2083 break;
2084 case SDEAD6:
2085 break;
2086 case SONPROC7:
2087 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2088 signotify(q);
2089 break;
2090 }
2091 }
2092 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2093
2094 if (wait)
2095 single_thread_wait(pr, 1);
2096
2097 return 0;
2098}
2099
2100/*
2101 * Wait for other threads to stop. If recheck is false then the function
2102 * returns non-zero if the caller needs to restart the check else 0 is
2103 * returned. If recheck is true the return value is always 0.
2104 */
2105int
2106single_thread_wait(struct process *pr, int recheck)
2107{
2108 struct sleep_state sls;
2109 int wait;
2110
2111 /* wait until they're all suspended */
2112 wait = pr->ps_singlecount > 0;
2113 while (wait) {
2114 sleep_setup(&sls, &pr->ps_singlecount, PWAIT32, "suspend", 0);
2115 wait = pr->ps_singlecount > 0;
2116 sleep_finish(&sls, wait);
2117 if (!recheck)
2118 break;
2119 }
2120
2121 return wait;
2122}
2123
2124void
2125single_thread_clear(struct proc *p, int flag)
2126{
2127 struct process *pr = p->p_p;
2128 struct proc *q;
2129 int s;
2130
2131 KASSERT(pr->ps_single == p)((pr->ps_single == p) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 2131, "pr->ps_single == p"))
;
2132 KASSERT(curproc == p)((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc == p) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2132, "curproc == p"))
;
2133
2134 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
2135 pr->ps_single = NULL((void *)0);
2136 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000 | PS_SINGLEEXIT0x00001000);
2137 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
2138 if (q == p || (q->p_flag & P_SUSPSINGLE0x00080000) == 0)
2139 continue;
2140 atomic_clearbits_intx86_atomic_clearbits_u32(&q->p_flag, P_SUSPSINGLE0x00080000);
2141
2142 /*
2143 * if the thread was only stopped for single threading
2144 * then clearing that either makes it runnable or puts
2145 * it back into some sleep queue
2146 */
2147 if (q->p_stat == SSTOP4 && (q->p_flag & flag) == 0) {
2148 if (q->p_wchan == NULL((void *)0))
2149 setrunnable(q);
2150 else
2151 q->p_stat = SSLEEP3;
2152 }
2153 }
2154 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2155}
2156
2157void
2158sigio_del(struct sigiolst *rmlist)
2159{
2160 struct sigio *sigio;
2161
2162 while ((sigio = LIST_FIRST(rmlist)((rmlist)->lh_first)) != NULL((void *)0)) {
2163 LIST_REMOVE(sigio, sio_pgsigio)do { if ((sigio)->sio_pgsigio.le_next != ((void *)0)) (sigio
)->sio_pgsigio.le_next->sio_pgsigio.le_prev = (sigio)->
sio_pgsigio.le_prev; *(sigio)->sio_pgsigio.le_prev = (sigio
)->sio_pgsigio.le_next; ((sigio)->sio_pgsigio.le_prev) =
((void *)-1); ((sigio)->sio_pgsigio.le_next) = ((void *)-
1); } while (0)
;
2164 crfree(sigio->sio_ucred);
2165 free(sigio, M_SIGIO40, sizeof(*sigio));
2166 }
2167}
2168
2169void
2170sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist)
2171{
2172 struct sigio *sigio;
2173
2174 MUTEX_ASSERT_LOCKED(&sigio_lock)do { if (((&sigio_lock)->mtx_owner != ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})) && !(panicstr ||
db_active)) panic("mutex %p not held in %s", (&sigio_lock
), __func__); } while (0)
;
2175
2176 sigio = sir->sir_sigio;
2177 if (sigio != NULL((void *)0)) {
2178 KASSERT(sigio->sio_myref == sir)((sigio->sio_myref == sir) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2178, "sigio->sio_myref == sir"
))
;
2179 sir->sir_sigio = NULL((void *)0);
2180
2181 if (sigio->sio_pgid > 0)
2182 sigio->sio_procsio_u.siu_proc = NULL((void *)0);
2183 else
2184 sigio->sio_pgrpsio_u.siu_pgrp = NULL((void *)0);
2185 LIST_REMOVE(sigio, sio_pgsigio)do { if ((sigio)->sio_pgsigio.le_next != ((void *)0)) (sigio
)->sio_pgsigio.le_next->sio_pgsigio.le_prev = (sigio)->
sio_pgsigio.le_prev; *(sigio)->sio_pgsigio.le_prev = (sigio
)->sio_pgsigio.le_next; ((sigio)->sio_pgsigio.le_prev) =
((void *)-1); ((sigio)->sio_pgsigio.le_next) = ((void *)-
1); } while (0)
;
2186
2187 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (rmlist)->lh_first
) != ((void *)0)) (rmlist)->lh_first->sio_pgsigio.le_prev
= &(sigio)->sio_pgsigio.le_next; (rmlist)->lh_first
= (sigio); (sigio)->sio_pgsigio.le_prev = &(rmlist)->
lh_first; } while (0)
;
2188 }
2189}
2190
2191void
2192sigio_free(struct sigio_ref *sir)
2193{
2194 struct sigiolst rmlist;
2195
2196 if (sir->sir_sigio == NULL((void *)0))
2197 return;
2198
2199 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2200
2201 mtx_enter(&sigio_lock);
2202 sigio_unlink(sir, &rmlist);
2203 mtx_leave(&sigio_lock);
2204
2205 sigio_del(&rmlist);
2206}
2207
2208void
2209sigio_freelist(struct sigiolst *sigiolst)
2210{
2211 struct sigiolst rmlist;
2212 struct sigio *sigio;
2213
2214 if (LIST_EMPTY(sigiolst)(((sigiolst)->lh_first) == ((void *)0)))
2215 return;
2216
2217 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2218
2219 mtx_enter(&sigio_lock);
2220 while ((sigio = LIST_FIRST(sigiolst)((sigiolst)->lh_first)) != NULL((void *)0))
2221 sigio_unlink(sigio->sio_myref, &rmlist);
2222 mtx_leave(&sigio_lock);
2223
2224 sigio_del(&rmlist);
2225}
2226
2227int
2228sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2229{
2230 struct sigiolst rmlist;
2231 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
2232 struct pgrp *pgrp = NULL((void *)0);
2233 struct process *pr = NULL((void *)0);
2234 struct sigio *sigio;
2235 int error;
2236 pid_t pgid = *(int *)data;
2237
2238 if (pgid == 0) {
2239 sigio_free(sir);
2240 return (0);
2241 }
2242
2243 if (cmd == TIOCSPGRP((unsigned long)0x80000000 | ((sizeof(int) & 0x1fff) <<
16) | ((('t')) << 8) | ((118)))
) {
2244 if (pgid < 0)
2245 return (EINVAL22);
2246 pgid = -pgid;
2247 }
2248
2249 sigio = malloc(sizeof(*sigio), M_SIGIO40, M_WAITOK0x0001);
2250 sigio->sio_pgid = pgid;
2251 sigio->sio_ucred = crhold(p->p_ucred);
2252 sigio->sio_myref = sir;
2253
2254 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2255
2256 /*
2257 * The kernel lock, and not sleeping between prfind()/pgfind() and
2258 * linking of the sigio ensure that the process or process group does
2259 * not disappear unexpectedly.
2260 */
2261 KERNEL_LOCK()_kernel_lock();
2262 mtx_enter(&sigio_lock);
2263
2264 if (pgid > 0) {
2265 pr = prfind(pgid);
2266 if (pr == NULL((void *)0)) {
2267 error = ESRCH3;
2268 goto fail;
2269 }
2270
2271 /*
2272 * Policy - Don't allow a process to FSETOWN a process
2273 * in another session.
2274 *
2275 * Remove this test to allow maximum flexibility or
2276 * restrict FSETOWN to the current process or process
2277 * group for maximum safety.
2278 */
2279 if (pr->ps_sessionps_pgrp->pg_session != p->p_p->ps_sessionps_pgrp->pg_session) {
2280 error = EPERM1;
2281 goto fail;
2282 }
2283
2284 if ((pr->ps_flags & PS_EXITING0x00000008) != 0) {
2285 error = ESRCH3;
2286 goto fail;
2287 }
2288 } else /* if (pgid < 0) */ {
2289 pgrp = pgfind(-pgid);
2290 if (pgrp == NULL((void *)0)) {
2291 error = ESRCH3;
2292 goto fail;
2293 }
2294
2295 /*
2296 * Policy - Don't allow a process to FSETOWN a process
2297 * in another session.
2298 *
2299 * Remove this test to allow maximum flexibility or
2300 * restrict FSETOWN to the current process or process
2301 * group for maximum safety.
2302 */
2303 if (pgrp->pg_session != p->p_p->ps_sessionps_pgrp->pg_session) {
2304 error = EPERM1;
2305 goto fail;
2306 }
2307 }
2308
2309 if (pgid > 0) {
2310 sigio->sio_procsio_u.siu_proc = pr;
2311 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (&pr->ps_sigiolst
)->lh_first) != ((void *)0)) (&pr->ps_sigiolst)->
lh_first->sio_pgsigio.le_prev = &(sigio)->sio_pgsigio
.le_next; (&pr->ps_sigiolst)->lh_first = (sigio); (
sigio)->sio_pgsigio.le_prev = &(&pr->ps_sigiolst
)->lh_first; } while (0)
;
2312 } else {
2313 sigio->sio_pgrpsio_u.siu_pgrp = pgrp;
2314 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (&pgrp->pg_sigiolst
)->lh_first) != ((void *)0)) (&pgrp->pg_sigiolst)->
lh_first->sio_pgsigio.le_prev = &(sigio)->sio_pgsigio
.le_next; (&pgrp->pg_sigiolst)->lh_first = (sigio);
(sigio)->sio_pgsigio.le_prev = &(&pgrp->pg_sigiolst
)->lh_first; } while (0)
;
2315 }
2316
2317 sigio_unlink(sir, &rmlist);
2318 sir->sir_sigio = sigio;
2319
2320 mtx_leave(&sigio_lock);
2321 KERNEL_UNLOCK()_kernel_unlock();
2322
2323 sigio_del(&rmlist);
2324
2325 return (0);
2326
2327fail:
2328 mtx_leave(&sigio_lock);
2329 KERNEL_UNLOCK()_kernel_unlock();
2330
2331 crfree(sigio->sio_ucred);
2332 free(sigio, M_SIGIO40, sizeof(*sigio));
2333
2334 return (error);
2335}
2336
2337void
2338sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2339{
2340 struct sigio *sigio;
2341 pid_t pgid = 0;
2342
2343 mtx_enter(&sigio_lock);
2344 sigio = sir->sir_sigio;
2345 if (sigio != NULL((void *)0))
2346 pgid = sigio->sio_pgid;
2347 mtx_leave(&sigio_lock);
2348
2349 if (cmd == TIOCGPGRP((unsigned long)0x40000000 | ((sizeof(int) & 0x1fff) <<
16) | ((('t')) << 8) | ((119)))
)
2350 pgid = -pgid;
2351
2352 *(int *)data = pgid;
2353}
2354
2355void
2356sigio_copy(struct sigio_ref *dst, struct sigio_ref *src)
2357{
2358 struct sigiolst rmlist;
2359 struct sigio *newsigio, *sigio;
2360
2361 sigio_free(dst);
2362
2363 if (src->sir_sigio == NULL((void *)0))
2364 return;
2365
2366 newsigio = malloc(sizeof(*newsigio), M_SIGIO40, M_WAITOK0x0001);
2367 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2368
2369 mtx_enter(&sigio_lock);
2370
2371 sigio = src->sir_sigio;
2372 if (sigio == NULL((void *)0)) {
2373 mtx_leave(&sigio_lock);
2374 free(newsigio, M_SIGIO40, sizeof(*newsigio));
2375 return;
2376 }
2377
2378 newsigio->sio_pgid = sigio->sio_pgid;
2379 newsigio->sio_ucred = crhold(sigio->sio_ucred);
2380 newsigio->sio_myref = dst;
2381 if (newsigio->sio_pgid > 0) {
2382 newsigio->sio_procsio_u.siu_proc = sigio->sio_procsio_u.siu_proc;
2383 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio,do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_proc->ps_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first; } while
(0)
2384 sio_pgsigio)do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_proc->ps_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first; } while
(0)
;
2385 } else {
2386 newsigio->sio_pgrpsio_u.siu_pgrp = sigio->sio_pgrpsio_u.siu_pgrp;
2387 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio,do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_pgrp->pg_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first; } while
(0)
2388 sio_pgsigio)do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_pgrp->pg_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first; } while
(0)
;
2389 }
2390
2391 sigio_unlink(dst, &rmlist);
2392 dst->sir_sigio = newsigio;
2393
2394 mtx_leave(&sigio_lock);
2395
2396 sigio_del(&rmlist);
2397}