Bug Summary

File:kern/kern_sig.c
Warning:line 1649, column 37
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name kern_sig.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/kern/kern_sig.c
1/* $OpenBSD: kern_sig.c,v 1.320 2023/10/06 08:58:13 claudio Exp $ */
2/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
3
4/*
5 * Copyright (c) 1997 Theo de Raadt. All rights reserved.
6 * Copyright (c) 1982, 1986, 1989, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 */
40
41#include <sys/param.h>
42#include <sys/signalvar.h>
43#include <sys/queue.h>
44#include <sys/namei.h>
45#include <sys/vnode.h>
46#include <sys/event.h>
47#include <sys/proc.h>
48#include <sys/systm.h>
49#include <sys/acct.h>
50#include <sys/fcntl.h>
51#include <sys/filedesc.h>
52#include <sys/wait.h>
53#include <sys/ktrace.h>
54#include <sys/stat.h>
55#include <sys/malloc.h>
56#include <sys/pool.h>
57#include <sys/sched.h>
58#include <sys/user.h>
59#include <sys/syslog.h>
60#include <sys/ttycom.h>
61#include <sys/pledge.h>
62#include <sys/witness.h>
63#include <sys/exec_elf.h>
64
65#include <sys/mount.h>
66#include <sys/syscallargs.h>
67
68#include <uvm/uvm_extern.h>
69#include <machine/tcb.h>
70
71int nosuidcoredump = 1;
72
73int filt_sigattach(struct knote *kn);
74void filt_sigdetach(struct knote *kn);
75int filt_signal(struct knote *kn, long hint);
76
77const struct filterops sig_filtops = {
78 .f_flags = 0,
79 .f_attach = filt_sigattach,
80 .f_detach = filt_sigdetach,
81 .f_event = filt_signal,
82};
83
84/*
85 * The array below categorizes the signals and their default actions.
86 */
87const int sigprop[NSIG33] = {
88 0, /* unused */
89 SA_KILL0x01, /* SIGHUP */
90 SA_KILL0x01, /* SIGINT */
91 SA_KILL0x01|SA_CORE0x02, /* SIGQUIT */
92 SA_KILL0x01|SA_CORE0x02, /* SIGILL */
93 SA_KILL0x01|SA_CORE0x02, /* SIGTRAP */
94 SA_KILL0x01|SA_CORE0x02, /* SIGABRT */
95 SA_KILL0x01|SA_CORE0x02, /* SIGEMT */
96 SA_KILL0x01|SA_CORE0x02, /* SIGFPE */
97 SA_KILL0x01, /* SIGKILL */
98 SA_KILL0x01|SA_CORE0x02, /* SIGBUS */
99 SA_KILL0x01|SA_CORE0x02, /* SIGSEGV */
100 SA_KILL0x01|SA_CORE0x02, /* SIGSYS */
101 SA_KILL0x01, /* SIGPIPE */
102 SA_KILL0x01, /* SIGALRM */
103 SA_KILL0x01, /* SIGTERM */
104 SA_IGNORE0x10, /* SIGURG */
105 SA_STOP0x04, /* SIGSTOP */
106 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTSTP */
107 SA_IGNORE0x10|SA_CONT0x20, /* SIGCONT */
108 SA_IGNORE0x10, /* SIGCHLD */
109 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTTIN */
110 SA_STOP0x04|SA_TTYSTOP0x08, /* SIGTTOU */
111 SA_IGNORE0x10, /* SIGIO */
112 SA_KILL0x01, /* SIGXCPU */
113 SA_KILL0x01, /* SIGXFSZ */
114 SA_KILL0x01, /* SIGVTALRM */
115 SA_KILL0x01, /* SIGPROF */
116 SA_IGNORE0x10, /* SIGWINCH */
117 SA_IGNORE0x10, /* SIGINFO */
118 SA_KILL0x01, /* SIGUSR1 */
119 SA_KILL0x01, /* SIGUSR2 */
120 SA_IGNORE0x10, /* SIGTHR */
121};
122
123#define CONTSIGMASK((1U << ((19)-1))) (sigmask(SIGCONT)(1U << ((19)-1)))
124#define STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
(sigmask(SIGSTOP)(1U << ((17)-1)) | sigmask(SIGTSTP)(1U << ((18)-1)) | \
125 sigmask(SIGTTIN)(1U << ((21)-1)) | sigmask(SIGTTOU)(1U << ((22)-1)))
126
127void setsigvec(struct proc *, int, struct sigaction *);
128
129void proc_stop(struct proc *p, int);
130void proc_stop_sweep(void *);
131void *proc_stop_si;
132
133void setsigctx(struct proc *, int, struct sigctx *);
134void postsig_done(struct proc *, int, sigset_t, int);
135void postsig(struct proc *, int, struct sigctx *);
136int cansignal(struct proc *, struct process *, int);
137
138struct pool sigacts_pool; /* memory pool for sigacts structures */
139
140void sigio_del(struct sigiolst *);
141void sigio_unlink(struct sigio_ref *, struct sigiolst *);
142struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH){ ((void *)0), ((((0xd)) > 0x0 && ((0xd)) < 0x9
) ? 0x9 : ((0xd))), 0x0 }
;
143
144/*
145 * Can thread p, send the signal signum to process qr?
146 */
147int
148cansignal(struct proc *p, struct process *qr, int signum)
149{
150 struct process *pr = p->p_p;
151 struct ucred *uc = p->p_ucred;
152 struct ucred *quc = qr->ps_ucred;
153
154 if (uc->cr_uid == 0)
155 return (1); /* root can always signal */
156
157 if (pr == qr)
158 return (1); /* process can always signal itself */
159
160 /* optimization: if the same creds then the tests below will pass */
161 if (uc == quc)
162 return (1);
163
164 if (signum == SIGCONT19 && qr->ps_sessionps_pgrp->pg_session == pr->ps_sessionps_pgrp->pg_session)
165 return (1); /* SIGCONT in session */
166
167 /*
168 * Using kill(), only certain signals can be sent to setugid
169 * child processes
170 */
171 if (qr->ps_flags & PS_SUGID0x00000010) {
172 switch (signum) {
173 case 0:
174 case SIGKILL9:
175 case SIGINT2:
176 case SIGTERM15:
177 case SIGALRM14:
178 case SIGSTOP17:
179 case SIGTTIN21:
180 case SIGTTOU22:
181 case SIGTSTP18:
182 case SIGHUP1:
183 case SIGUSR130:
184 case SIGUSR231:
185 if (uc->cr_ruid == quc->cr_ruid ||
186 uc->cr_uid == quc->cr_ruid)
187 return (1);
188 }
189 return (0);
190 }
191
192 if (uc->cr_ruid == quc->cr_ruid ||
193 uc->cr_ruid == quc->cr_svuid ||
194 uc->cr_uid == quc->cr_ruid ||
195 uc->cr_uid == quc->cr_svuid)
196 return (1);
197 return (0);
198}
199
200/*
201 * Initialize signal-related data structures.
202 */
203void
204signal_init(void)
205{
206 proc_stop_si = softintr_establish(IPL_SOFTCLOCK0x1, proc_stop_sweep,
207 NULL((void *)0));
208 if (proc_stop_si == NULL((void *)0))
209 panic("signal_init failed to register softintr");
210
211 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE0x0,
212 PR_WAITOK0x0001, "sigapl", NULL((void *)0));
213}
214
215/*
216 * Initialize a new sigaltstack structure.
217 */
218void
219sigstkinit(struct sigaltstack *ss)
220{
221 ss->ss_flags = SS_DISABLE0x0004;
222 ss->ss_size = 0;
223 ss->ss_sp = NULL((void *)0);
224}
225
226/*
227 * Create an initial sigacts structure, using the same signal state
228 * as pr.
229 */
230struct sigacts *
231sigactsinit(struct process *pr)
232{
233 struct sigacts *ps;
234
235 ps = pool_get(&sigacts_pool, PR_WAITOK0x0001);
236 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts))__builtin_memcpy((ps), (pr->ps_sigacts), (sizeof(struct sigacts
)))
;
237 return (ps);
238}
239
240/*
241 * Release a sigacts structure.
242 */
243void
244sigactsfree(struct sigacts *ps)
245{
246 pool_put(&sigacts_pool, ps);
247}
248
249int
250sys_sigaction(struct proc *p, void *v, register_t *retval)
251{
252 struct sys_sigaction_args /* {
253 syscallarg(int) signum;
254 syscallarg(const struct sigaction *) nsa;
255 syscallarg(struct sigaction *) osa;
256 } */ *uap = v;
257 struct sigaction vec;
258#ifdef KTRACE1
259 struct sigaction ovec;
260#endif
261 struct sigaction *sa;
262 const struct sigaction *nsa;
263 struct sigaction *osa;
264 struct sigacts *ps = p->p_p->ps_sigacts;
265 int signum;
266 int bit, error;
267
268 signum = SCARG(uap, signum)((uap)->signum.le.datum);
269 nsa = SCARG(uap, nsa)((uap)->nsa.le.datum);
270 osa = SCARG(uap, osa)((uap)->osa.le.datum);
271
272 if (signum <= 0 || signum >= NSIG33 ||
273 (nsa && (signum == SIGKILL9 || signum == SIGSTOP17)))
274 return (EINVAL22);
275 sa = &vec;
276 if (osa) {
277 mtx_enter(&p->p_p->ps_mtx);
278 sa->sa_handler__sigaction_u.__sa_handler = ps->ps_sigact[signum];
279 sa->sa_mask = ps->ps_catchmask[signum];
280 bit = sigmask(signum)(1U << ((signum)-1));
281 sa->sa_flags = 0;
282 if ((ps->ps_sigonstack & bit) != 0)
283 sa->sa_flags |= SA_ONSTACK0x0001;
284 if ((ps->ps_sigintr & bit) == 0)
285 sa->sa_flags |= SA_RESTART0x0002;
286 if ((ps->ps_sigreset & bit) != 0)
287 sa->sa_flags |= SA_RESETHAND0x0004;
288 if ((ps->ps_siginfo & bit) != 0)
289 sa->sa_flags |= SA_SIGINFO0x0040;
290 if (signum == SIGCHLD20) {
291 if ((ps->ps_sigflags & SAS_NOCLDSTOP0x01) != 0)
292 sa->sa_flags |= SA_NOCLDSTOP0x0008;
293 if ((ps->ps_sigflags & SAS_NOCLDWAIT0x02) != 0)
294 sa->sa_flags |= SA_NOCLDWAIT0x0020;
295 }
296 mtx_leave(&p->p_p->ps_mtx);
297 if ((sa->sa_mask & bit) == 0)
298 sa->sa_flags |= SA_NODEFER0x0010;
299 sa->sa_mask &= ~bit;
300 error = copyout(sa, osa, sizeof (vec));
301 if (error)
302 return (error);
303#ifdef KTRACE1
304 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
305 ovec = vec;
306#endif
307 }
308 if (nsa) {
309 error = copyin(nsa, sa, sizeof (vec));
310 if (error)
311 return (error);
312#ifdef KTRACE1
313 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
314 ktrsigaction(p, sa)ktrstruct((p), "sigaction", (sa), sizeof(struct sigaction));
315#endif
316 setsigvec(p, signum, sa);
317 }
318#ifdef KTRACE1
319 if (osa && KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
320 ktrsigaction(p, &ovec)ktrstruct((p), "sigaction", (&ovec), sizeof(struct sigaction
))
;
321#endif
322 return (0);
323}
324
325void
326setsigvec(struct proc *p, int signum, struct sigaction *sa)
327{
328 struct sigacts *ps = p->p_p->ps_sigacts;
329 int bit;
330
331 bit = sigmask(signum)(1U << ((signum)-1));
332
333 mtx_enter(&p->p_p->ps_mtx);
334 ps->ps_sigact[signum] = sa->sa_handler__sigaction_u.__sa_handler;
335 if ((sa->sa_flags & SA_NODEFER0x0010) == 0)
336 sa->sa_mask |= sigmask(signum)(1U << ((signum)-1));
337 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
338 if (signum == SIGCHLD20) {
339 if (sa->sa_flags & SA_NOCLDSTOP0x0008)
340 atomic_setbits_intx86_atomic_setbits_u32(&ps->ps_sigflags, SAS_NOCLDSTOP0x01);
341 else
342 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDSTOP0x01);
343 /*
344 * If the SA_NOCLDWAIT flag is set or the handler
345 * is SIG_IGN we reparent the dying child to PID 1
346 * (init) which will reap the zombie. Because we use
347 * init to do our dirty work we never set SAS_NOCLDWAIT
348 * for PID 1.
349 * XXX exit1 rework means this is unnecessary?
350 */
351 if (initprocess->ps_sigacts != ps &&
352 ((sa->sa_flags & SA_NOCLDWAIT0x0020) ||
353 sa->sa_handler__sigaction_u.__sa_handler == SIG_IGN(void (*)(int))1))
354 atomic_setbits_intx86_atomic_setbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
355 else
356 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
357 }
358 if ((sa->sa_flags & SA_RESETHAND0x0004) != 0)
359 ps->ps_sigreset |= bit;
360 else
361 ps->ps_sigreset &= ~bit;
362 if ((sa->sa_flags & SA_SIGINFO0x0040) != 0)
363 ps->ps_siginfo |= bit;
364 else
365 ps->ps_siginfo &= ~bit;
366 if ((sa->sa_flags & SA_RESTART0x0002) == 0)
367 ps->ps_sigintr |= bit;
368 else
369 ps->ps_sigintr &= ~bit;
370 if ((sa->sa_flags & SA_ONSTACK0x0001) != 0)
371 ps->ps_sigonstack |= bit;
372 else
373 ps->ps_sigonstack &= ~bit;
374 /*
375 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
376 * and for signals set to SIG_DFL where the default is to ignore.
377 * However, don't put SIGCONT in ps_sigignore,
378 * as we have to restart the process.
379 */
380 if (sa->sa_handler__sigaction_u.__sa_handler == SIG_IGN(void (*)(int))1 ||
381 (sigprop[signum] & SA_IGNORE0x10 && sa->sa_handler__sigaction_u.__sa_handler == SIG_DFL(void (*)(int))0)) {
382 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, bit);
383 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_p->ps_siglist, bit);
384 if (signum != SIGCONT19)
385 ps->ps_sigignore |= bit; /* easier in psignal */
386 ps->ps_sigcatch &= ~bit;
387 } else {
388 ps->ps_sigignore &= ~bit;
389 if (sa->sa_handler__sigaction_u.__sa_handler == SIG_DFL(void (*)(int))0)
390 ps->ps_sigcatch &= ~bit;
391 else
392 ps->ps_sigcatch |= bit;
393 }
394 mtx_leave(&p->p_p->ps_mtx);
395}
396
397/*
398 * Initialize signal state for process 0;
399 * set to ignore signals that are ignored by default.
400 */
401void
402siginit(struct sigacts *ps)
403{
404 int i;
405
406 for (i = 0; i < NSIG33; i++)
407 if (sigprop[i] & SA_IGNORE0x10 && i != SIGCONT19)
408 ps->ps_sigignore |= sigmask(i)(1U << ((i)-1));
409 ps->ps_sigflags = SAS_NOCLDWAIT0x02 | SAS_NOCLDSTOP0x01;
410}
411
412/*
413 * Reset signals for an exec by the specified thread.
414 */
415void
416execsigs(struct proc *p)
417{
418 struct sigacts *ps;
419 int nc, mask;
420
421 ps = p->p_p->ps_sigacts;
422 mtx_enter(&p->p_p->ps_mtx);
423
424 /*
425 * Reset caught signals. Held signals remain held
426 * through p_sigmask (unless they were caught,
427 * and are now ignored by default).
428 */
429 while (ps->ps_sigcatch) {
430 nc = ffs((long)ps->ps_sigcatch);
431 mask = sigmask(nc)(1U << ((nc)-1));
432 ps->ps_sigcatch &= ~mask;
433 if (sigprop[nc] & SA_IGNORE0x10) {
434 if (nc != SIGCONT19)
435 ps->ps_sigignore |= mask;
436 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
437 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_p->ps_siglist, mask);
438 }
439 ps->ps_sigact[nc] = SIG_DFL(void (*)(int))0;
440 }
441 /*
442 * Reset stack state to the user stack.
443 * Clear set of signals caught on the signal stack.
444 */
445 sigstkinit(&p->p_sigstk);
446 atomic_clearbits_intx86_atomic_clearbits_u32(&ps->ps_sigflags, SAS_NOCLDWAIT0x02);
447 if (ps->ps_sigact[SIGCHLD20] == SIG_IGN(void (*)(int))1)
448 ps->ps_sigact[SIGCHLD20] = SIG_DFL(void (*)(int))0;
449 mtx_leave(&p->p_p->ps_mtx);
450}
451
452/*
453 * Manipulate signal mask.
454 * Note that we receive new mask, not pointer,
455 * and return old mask as return value;
456 * the library stub does the rest.
457 */
458int
459sys_sigprocmask(struct proc *p, void *v, register_t *retval)
460{
461 struct sys_sigprocmask_args /* {
462 syscallarg(int) how;
463 syscallarg(sigset_t) mask;
464 } */ *uap = v;
465 int error = 0;
466 sigset_t mask;
467
468 KASSERT(p == curproc)((p == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 468, "p == curproc"))
;
469
470 *retval = p->p_sigmask;
471 mask = SCARG(uap, mask)((uap)->mask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
472
473 switch (SCARG(uap, how)((uap)->how.le.datum)) {
474 case SIG_BLOCK1:
475 atomic_setbits_intx86_atomic_setbits_u32(&p->p_sigmask, mask);
476 break;
477 case SIG_UNBLOCK2:
478 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_sigmask, mask);
479 break;
480 case SIG_SETMASK3:
481 p->p_sigmask = mask;
482 break;
483 default:
484 error = EINVAL22;
485 break;
486 }
487 return (error);
488}
489
490int
491sys_sigpending(struct proc *p, void *v, register_t *retval)
492{
493 *retval = p->p_siglist | p->p_p->ps_siglist;
494 return (0);
495}
496
497/*
498 * Temporarily replace calling proc's signal mask for the duration of a
499 * system call. Original signal mask will be restored by userret().
500 */
501void
502dosigsuspend(struct proc *p, sigset_t newmask)
503{
504 KASSERT(p == curproc)((p == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 504, "p == curproc"))
;
505
506 p->p_oldmask = p->p_sigmask;
507 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
508 p->p_sigmask = newmask;
509}
510
511/*
512 * Suspend thread until signal, providing mask to be set
513 * in the meantime. Note nonstandard calling convention:
514 * libc stub passes mask, not pointer, to save a copyin.
515 */
516int
517sys_sigsuspend(struct proc *p, void *v, register_t *retval)
518{
519 struct sys_sigsuspend_args /* {
520 syscallarg(int) mask;
521 } */ *uap = v;
522
523 dosigsuspend(p, SCARG(uap, mask)((uap)->mask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1))));
524 while (tsleep_nsec(&nowake, PPAUSE40|PCATCH0x100, "sigsusp", INFSLP0xffffffffffffffffULL) == 0)
525 continue;
526 /* always return EINTR rather than ERESTART... */
527 return (EINTR4);
528}
529
530int
531sigonstack(size_t stack)
532{
533 const struct sigaltstack *ss = &curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_sigstk;
534
535 return (ss->ss_flags & SS_DISABLE0x0004 ? 0 :
536 (stack - (size_t)ss->ss_sp < ss->ss_size));
537}
538
539int
540sys_sigaltstack(struct proc *p, void *v, register_t *retval)
541{
542 struct sys_sigaltstack_args /* {
543 syscallarg(const struct sigaltstack *) nss;
544 syscallarg(struct sigaltstack *) oss;
545 } */ *uap = v;
546 struct sigaltstack ss;
547 const struct sigaltstack *nss;
548 struct sigaltstack *oss;
549 int onstack = sigonstack(PROC_STACK(p)((p)->p_md.md_regs->tf_rsp));
550 int error;
551
552 nss = SCARG(uap, nss)((uap)->nss.le.datum);
553 oss = SCARG(uap, oss)((uap)->oss.le.datum);
554
555 if (oss != NULL((void *)0)) {
556 ss = p->p_sigstk;
557 if (onstack)
558 ss.ss_flags |= SS_ONSTACK0x0001;
559 if ((error = copyout(&ss, oss, sizeof(ss))))
560 return (error);
561 }
562 if (nss == NULL((void *)0))
563 return (0);
564 error = copyin(nss, &ss, sizeof(ss));
565 if (error)
566 return (error);
567 if (onstack)
568 return (EPERM1);
569 if (ss.ss_flags & ~SS_DISABLE0x0004)
570 return (EINVAL22);
571 if (ss.ss_flags & SS_DISABLE0x0004) {
572 p->p_sigstk.ss_flags = ss.ss_flags;
573 return (0);
574 }
575 if (ss.ss_size < MINSIGSTKSZ(3U << 12))
576 return (ENOMEM12);
577
578 error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size);
579 if (error)
580 return (error);
581
582 p->p_sigstk = ss;
583 return (0);
584}
585
586int
587sys_kill(struct proc *cp, void *v, register_t *retval)
588{
589 struct sys_kill_args /* {
590 syscallarg(int) pid;
591 syscallarg(int) signum;
592 } */ *uap = v;
593 struct process *pr;
594 int pid = SCARG(uap, pid)((uap)->pid.le.datum);
595 int signum = SCARG(uap, signum)((uap)->signum.le.datum);
596 int error;
597 int zombie = 0;
598
599 if ((error = pledge_kill(cp, pid)) != 0)
600 return (error);
601 if (((u_int)signum) >= NSIG33)
602 return (EINVAL22);
603 if (pid > 0) {
604 if ((pr = prfind(pid)) == NULL((void *)0)) {
605 if ((pr = zombiefind(pid)) == NULL((void *)0))
606 return (ESRCH3);
607 else
608 zombie = 1;
609 }
610 if (!cansignal(cp, pr, signum))
611 return (EPERM1);
612
613 /* kill single process */
614 if (signum && !zombie)
615 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
616 return (0);
617 }
618 switch (pid) {
619 case -1: /* broadcast signal */
620 return (killpg1(cp, signum, 0, 1));
621 case 0: /* signal own process group */
622 return (killpg1(cp, signum, 0, 0));
623 default: /* negative explicit process group */
624 return (killpg1(cp, signum, -pid, 0));
625 }
626}
627
628int
629sys_thrkill(struct proc *cp, void *v, register_t *retval)
630{
631 struct sys_thrkill_args /* {
632 syscallarg(pid_t) tid;
633 syscallarg(int) signum;
634 syscallarg(void *) tcb;
635 } */ *uap = v;
636 struct proc *p;
637 int tid = SCARG(uap, tid)((uap)->tid.le.datum);
638 int signum = SCARG(uap, signum)((uap)->signum.le.datum);
639 void *tcb;
640
641 if (((u_int)signum) >= NSIG33)
642 return (EINVAL22);
643
644 p = tid ? tfind_user(tid, cp->p_p) : cp;
645 if (p == NULL((void *)0))
646 return (ESRCH3);
647
648 /* optionally require the target thread to have the given tcb addr */
649 tcb = SCARG(uap, tcb)((uap)->tcb.le.datum);
650 if (tcb != NULL((void *)0) && tcb != TCB_GET(p)tcb_get(p))
651 return (ESRCH3);
652
653 if (signum)
654 ptsignal(p, signum, STHREAD);
655 return (0);
656}
657
658/*
659 * Common code for kill process group/broadcast kill.
660 * cp is calling process.
661 */
662int
663killpg1(struct proc *cp, int signum, int pgid, int all)
664{
665 struct process *pr;
666 struct pgrp *pgrp;
667 int nfound = 0;
668
669 if (all) {
670 /*
671 * broadcast
672 */
673 LIST_FOREACH(pr, &allprocess, ps_list)for((pr) = ((&allprocess)->lh_first); (pr)!= ((void *)
0); (pr) = ((pr)->ps_list.le_next))
{
674 if (pr->ps_pid <= 1 ||
675 pr->ps_flags & (PS_SYSTEM0x00010000 | PS_NOBROADCASTKILL0x00080000) ||
676 pr == cp->p_p || !cansignal(cp, pr, signum))
677 continue;
678 nfound++;
679 if (signum)
680 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
681 }
682 } else {
683 if (pgid == 0)
684 /*
685 * zero pgid means send to my process group.
686 */
687 pgrp = cp->p_p->ps_pgrp;
688 else {
689 pgrp = pgfind(pgid);
690 if (pgrp == NULL((void *)0))
691 return (ESRCH3);
692 }
693 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)for((pr) = ((&pgrp->pg_members)->lh_first); (pr)!= (
(void *)0); (pr) = ((pr)->ps_pglist.le_next))
{
694 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM0x00010000 ||
695 !cansignal(cp, pr, signum))
696 continue;
697 nfound++;
698 if (signum)
699 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
700 }
701 }
702 return (nfound ? 0 : ESRCH3);
703}
704
705#define CANDELIVER(uid, euid, pr)(euid == 0 || (uid) == (pr)->ps_ucred->cr_ruid || (uid)
== (pr)->ps_ucred->cr_svuid || (uid) == (pr)->ps_ucred
->cr_uid || (euid) == (pr)->ps_ucred->cr_ruid || (euid
) == (pr)->ps_ucred->cr_svuid || (euid) == (pr)->ps_ucred
->cr_uid)
\
706 (euid == 0 || \
707 (uid) == (pr)->ps_ucred->cr_ruid || \
708 (uid) == (pr)->ps_ucred->cr_svuid || \
709 (uid) == (pr)->ps_ucred->cr_uid || \
710 (euid) == (pr)->ps_ucred->cr_ruid || \
711 (euid) == (pr)->ps_ucred->cr_svuid || \
712 (euid) == (pr)->ps_ucred->cr_uid)
713
714#define CANSIGIO(cr, pr)((cr)->cr_uid == 0 || ((cr)->cr_ruid) == ((pr))->ps_ucred
->cr_ruid || ((cr)->cr_ruid) == ((pr))->ps_ucred->
cr_svuid || ((cr)->cr_ruid) == ((pr))->ps_ucred->cr_uid
|| ((cr)->cr_uid) == ((pr))->ps_ucred->cr_ruid || (
(cr)->cr_uid) == ((pr))->ps_ucred->cr_svuid || ((cr)
->cr_uid) == ((pr))->ps_ucred->cr_uid)
\
715 CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr))((cr)->cr_uid == 0 || ((cr)->cr_ruid) == ((pr))->ps_ucred
->cr_ruid || ((cr)->cr_ruid) == ((pr))->ps_ucred->
cr_svuid || ((cr)->cr_ruid) == ((pr))->ps_ucred->cr_uid
|| ((cr)->cr_uid) == ((pr))->ps_ucred->cr_ruid || (
(cr)->cr_uid) == ((pr))->ps_ucred->cr_svuid || ((cr)
->cr_uid) == ((pr))->ps_ucred->cr_uid)
716
717/*
718 * Send a signal to a process group. If checktty is 1,
719 * limit to members which have a controlling terminal.
720 */
721void
722pgsignal(struct pgrp *pgrp, int signum, int checkctty)
723{
724 struct process *pr;
725
726 if (pgrp)
727 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)for((pr) = ((&pgrp->pg_members)->lh_first); (pr)!= (
(void *)0); (pr) = ((pr)->ps_pglist.le_next))
728 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT0x00000001)
729 prsignal(pr, signum)ptsignal((pr)->ps_mainproc, (signum), SPROCESS);
730}
731
732/*
733 * Send a SIGIO or SIGURG signal to a process or process group using stored
734 * credentials rather than those of the current process.
735 */
736void
737pgsigio(struct sigio_ref *sir, int sig, int checkctty)
738{
739 struct process *pr;
740 struct sigio *sigio;
741
742 if (sir->sir_sigio == NULL((void *)0))
743 return;
744
745 KERNEL_LOCK()_kernel_lock();
746 mtx_enter(&sigio_lock);
747 sigio = sir->sir_sigio;
748 if (sigio == NULL((void *)0))
749 goto out;
750 if (sigio->sio_pgid > 0) {
751 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc)((sigio->sio_ucred)->cr_uid == 0 || ((sigio->sio_ucred
)->cr_ruid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_ruid || ((sigio->sio_ucred)->cr_ruid) == ((sigio->
sio_u.siu_proc))->ps_ucred->cr_svuid || ((sigio->sio_ucred
)->cr_ruid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_uid || ((sigio->sio_ucred)->cr_uid) == ((sigio->sio_u
.siu_proc))->ps_ucred->cr_ruid || ((sigio->sio_ucred
)->cr_uid) == ((sigio->sio_u.siu_proc))->ps_ucred->
cr_svuid || ((sigio->sio_ucred)->cr_uid) == ((sigio->
sio_u.siu_proc))->ps_ucred->cr_uid)
)
752 prsignal(sigio->sio_proc, sig)ptsignal((sigio->sio_u.siu_proc)->ps_mainproc, (sig), SPROCESS
)
;
753 } else if (sigio->sio_pgid < 0) {
754 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist)for((pr) = ((&sigio->sio_u.siu_pgrp->pg_members)->
lh_first); (pr)!= ((void *)0); (pr) = ((pr)->ps_pglist.le_next
))
{
755 if (CANSIGIO(sigio->sio_ucred, pr)((sigio->sio_ucred)->cr_uid == 0 || ((sigio->sio_ucred
)->cr_ruid) == ((pr))->ps_ucred->cr_ruid || ((sigio->
sio_ucred)->cr_ruid) == ((pr))->ps_ucred->cr_svuid ||
((sigio->sio_ucred)->cr_ruid) == ((pr))->ps_ucred->
cr_uid || ((sigio->sio_ucred)->cr_uid) == ((pr))->ps_ucred
->cr_ruid || ((sigio->sio_ucred)->cr_uid) == ((pr))->
ps_ucred->cr_svuid || ((sigio->sio_ucred)->cr_uid) ==
((pr))->ps_ucred->cr_uid)
&&
756 (checkctty == 0 || (pr->ps_flags & PS_CONTROLT0x00000001)))
757 prsignal(pr, sig)ptsignal((pr)->ps_mainproc, (sig), SPROCESS);
758 }
759 }
760out:
761 mtx_leave(&sigio_lock);
762 KERNEL_UNLOCK()_kernel_unlock();
763}
764
765/*
766 * Recalculate the signal mask and reset the signal disposition after
767 * usermode frame for delivery is formed.
768 */
769void
770postsig_done(struct proc *p, int signum, sigset_t catchmask, int reset)
771{
772 p->p_ru.ru_nsignals++;
773 atomic_setbits_intx86_atomic_setbits_u32(&p->p_sigmask, catchmask);
774 if (reset != 0) {
775 sigset_t mask = sigmask(signum)(1U << ((signum)-1));
776 struct sigacts *ps = p->p_p->ps_sigacts;
777
778 mtx_enter(&p->p_p->ps_mtx);
779 ps->ps_sigcatch &= ~mask;
780 if (signum != SIGCONT19 && sigprop[signum] & SA_IGNORE0x10)
781 ps->ps_sigignore |= mask;
782 ps->ps_sigact[signum] = SIG_DFL(void (*)(int))0;
783 mtx_leave(&p->p_p->ps_mtx);
784 }
785}
786
787/*
788 * Send a signal caused by a trap to the current thread
789 * If it will be caught immediately, deliver it with correct code.
790 * Otherwise, post it normally.
791 */
792void
793trapsignal(struct proc *p, int signum, u_long trapno, int code,
794 union sigval sigval)
795{
796 struct process *pr = p->p_p;
797 struct sigctx ctx;
798 int mask;
799
800 switch (signum) {
801 case SIGILL4:
802 case SIGBUS10:
803 case SIGSEGV11:
804 pr->ps_acflag |= ATRAP0x00000040;
805 break;
806 }
807
808 mask = sigmask(signum)(1U << ((signum)-1));
809 setsigctx(p, signum, &ctx);
810 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 && ctx.sig_catch != 0 &&
811 (p->p_sigmask & mask) == 0) {
812 siginfo_t si;
813
814 initsiginfo(&si, signum, trapno, code, sigval);
815#ifdef KTRACE1
816 if (KTRPOINT(p, KTR_PSIG)((p)->p_p->ps_traceflag & (1<<(5)) &&
((p)->p_flag & 0x00000001) == 0)
) {
817 ktrpsig(p, signum, ctx.sig_action,
818 p->p_sigmask, code, &si);
819 }
820#endif
821 if (sendsig(ctx.sig_action, signum, p->p_sigmask, &si,
822 ctx.sig_info, ctx.sig_onstack)) {
823 KERNEL_LOCK()_kernel_lock();
824 sigexit(p, SIGILL4);
825 /* NOTREACHED */
826 }
827 postsig_done(p, signum, ctx.sig_catchmask, ctx.sig_reset);
828 } else {
829 p->p_sisig = signum;
830 p->p_sitrapno = trapno; /* XXX for core dump/debugger */
831 p->p_sicode = code;
832 p->p_sigval = sigval;
833
834 /*
835 * If traced, stop if signal is masked, and stay stopped
836 * until released by the debugger. If our parent process
837 * is waiting for us, don't hang as we could deadlock.
838 */
839 if (((pr->ps_flags & (PS_TRACED0x00000200 | PS_PPWAIT0x00000040)) == PS_TRACED0x00000200) &&
840 signum != SIGKILL9 && (p->p_sigmask & mask) != 0) {
841 int s;
842
843 single_thread_set(p, SINGLE_SUSPEND0x01 | SINGLE_NOWAIT0x20);
844 pr->ps_xsig = signum;
845
846 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
847 proc_stop(p, 1);
848 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
849
850 signum = pr->ps_xsig;
851 single_thread_clear(p, 0);
852
853 /*
854 * If we are no longer being traced, or the parent
855 * didn't give us a signal, skip sending the signal.
856 */
857 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 ||
858 signum == 0)
859 return;
860
861 /* update signal info */
862 p->p_sisig = signum;
863 mask = sigmask(signum)(1U << ((signum)-1));
864 }
865
866 /*
867 * Signals like SIGBUS and SIGSEGV should not, when
868 * generated by the kernel, be ignorable or blockable.
869 * If it is and we're not being traced, then just kill
870 * the process.
871 * After vfs_shutdown(9), init(8) cannot receive signals
872 * because new code pages of the signal handler cannot be
873 * mapped from halted storage. init(8) may not die or the
874 * kernel panics. Better loop between signal handler and
875 * page fault trap until the machine is halted.
876 */
877 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 &&
878 (sigprop[signum] & SA_KILL0x01) &&
879 ((p->p_sigmask & mask) || ctx.sig_ignore) &&
880 pr->ps_pid != 1) {
881 KERNEL_LOCK()_kernel_lock();
882 sigexit(p, signum);
883 /* NOTREACHED */
884 }
885 KERNEL_LOCK()_kernel_lock();
886 ptsignal(p, signum, STHREAD);
887 KERNEL_UNLOCK()_kernel_unlock();
888 }
889}
890
891/*
892 * Send the signal to the process. If the signal has an action, the action
893 * is usually performed by the target process rather than the caller; we add
894 * the signal to the set of pending signals for the process.
895 *
896 * Exceptions:
897 * o When a stop signal is sent to a sleeping process that takes the
898 * default action, the process is stopped without awakening it.
899 * o SIGCONT restarts stopped processes (or puts them back to sleep)
900 * regardless of the signal action (eg, blocked or ignored).
901 *
902 * Other ignored signals are discarded immediately.
903 */
904void
905psignal(struct proc *p, int signum)
906{
907 ptsignal(p, signum, SPROCESS);
908}
909
910/*
911 * type = SPROCESS process signal, can be diverted (sigwait())
912 * type = STHREAD thread signal, but should be propagated if unhandled
913 * type = SPROPAGATED propagated to this thread, so don't propagate again
914 */
915void
916ptsignal(struct proc *p, int signum, enum signal_type type)
917{
918 int s, prop;
919 sig_t action;
920 int mask;
921 int *siglist;
922 struct process *pr = p->p_p;
923 struct proc *q;
924 int wakeparent = 0;
925
926 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 926, "_kernel_lock_held()"))
;
927
928#ifdef DIAGNOSTIC1
929 if ((u_int)signum >= NSIG33 || signum == 0)
930 panic("psignal signal number");
931#endif
932
933 /* Ignore signal if the target process is exiting */
934 if (pr->ps_flags & PS_EXITING0x00000008)
935 return;
936
937 mask = sigmask(signum)(1U << ((signum)-1));
938
939 if (type == SPROCESS) {
940 /* Accept SIGKILL to coredumping processes */
941 if (pr->ps_flags & PS_COREDUMP0x00000800 && signum == SIGKILL9) {
942 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_siglist, mask);
943 return;
944 }
945
946 /*
947 * If the current thread can process the signal
948 * immediately (it's unblocked) then have it take it.
949 */
950 q = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
951 if (q != NULL((void *)0) && q->p_p == pr && (q->p_flag & P_WEXIT0x00002000) == 0 &&
952 (q->p_sigmask & mask) == 0)
953 p = q;
954 else {
955 /*
956 * A process-wide signal can be diverted to a
957 * different thread that's in sigwait() for this
958 * signal. If there isn't such a thread, then
959 * pick a thread that doesn't have it blocked so
960 * that the stop/kill consideration isn't
961 * delayed. Otherwise, mark it pending on the
962 * main thread.
963 */
964 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
965 /* ignore exiting threads */
966 if (q->p_flag & P_WEXIT0x00002000)
967 continue;
968
969 /* skip threads that have the signal blocked */
970 if ((q->p_sigmask & mask) != 0)
971 continue;
972
973 /* okay, could send to this thread */
974 p = q;
975
976 /*
977 * sigsuspend, sigwait, ppoll/pselect, etc?
978 * Definitely go to this thread, as it's
979 * already blocked in the kernel.
980 */
981 if (q->p_flag & P_SIGSUSPEND0x00000008)
982 break;
983 }
984 }
985 }
986
987 if (type != SPROPAGATED)
988 knote_locked(&pr->ps_klist, NOTE_SIGNAL0x08000000 | signum);
989
990 prop = sigprop[signum];
991
992 /*
993 * If proc is traced, always give parent a chance.
994 */
995 if (pr->ps_flags & PS_TRACED0x00000200) {
996 action = SIG_DFL(void (*)(int))0;
997 } else {
998 sigset_t sigcatch, sigignore;
999
1000 /*
1001 * If the signal is being ignored,
1002 * then we forget about it immediately.
1003 * (Note: we don't set SIGCONT in ps_sigignore,
1004 * and if it is set to SIG_IGN,
1005 * action will be SIG_DFL here.)
1006 */
1007 mtx_enter(&pr->ps_mtx);
1008 sigignore = pr->ps_sigacts->ps_sigignore;
1009 sigcatch = pr->ps_sigacts->ps_sigcatch;
1010 mtx_leave(&pr->ps_mtx);
1011
1012 if (sigignore & mask)
1013 return;
1014 if (p->p_sigmask & mask) {
1015 action = SIG_HOLD(void (*)(int))3;
1016 } else if (sigcatch & mask) {
1017 action = SIG_CATCH(void (*)(int))2;
1018 } else {
1019 action = SIG_DFL(void (*)(int))0;
1020
1021 if (prop & SA_KILL0x01 && pr->ps_nice > NZERO20)
1022 pr->ps_nice = NZERO20;
1023
1024 /*
1025 * If sending a tty stop signal to a member of an
1026 * orphaned process group, discard the signal here if
1027 * the action is default; don't stop the process below
1028 * if sleeping, and don't clear any pending SIGCONT.
1029 */
1030 if (prop & SA_TTYSTOP0x08 && pr->ps_pgrp->pg_jobc == 0)
1031 return;
1032 }
1033 }
1034 /*
1035 * If delivered to process, mark as pending there. Continue and stop
1036 * signals will be propagated to all threads. So they are always
1037 * marked at thread level.
1038 */
1039 siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist;
1040 if (prop & SA_CONT0x20) {
1041 siglist = &p->p_siglist;
1042 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
);
1043 }
1044 if (prop & SA_STOP0x04) {
1045 siglist = &p->p_siglist;
1046 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, CONTSIGMASK((1U << ((19)-1))));
1047 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_CONTINUED0x00800000);
1048 }
1049
1050 /*
1051 * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
1052 */
1053 if (prop & (SA_CONT0x20 | SA_STOP0x04) && type != SPROPAGATED)
1054 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
1055 if (q != p)
1056 ptsignal(q, signum, SPROPAGATED);
1057
1058 /*
1059 * Defer further processing for signals which are held,
1060 * except that stopped processes must be continued by SIGCONT.
1061 */
1062 if (action == SIG_HOLD(void (*)(int))3 && ((prop & SA_CONT0x20) == 0 ||
1063 p->p_stat != SSTOP4)) {
1064 atomic_setbits_intx86_atomic_setbits_u32(siglist, mask);
1065 return;
1066 }
1067
1068 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1069
1070 switch (p->p_stat) {
1071
1072 case SSLEEP3:
1073 /*
1074 * If process is sleeping uninterruptibly
1075 * we can't interrupt the sleep... the signal will
1076 * be noticed when the process returns through
1077 * trap() or syscall().
1078 */
1079 if ((p->p_flag & P_SINTR0x00000080) == 0)
1080 goto out;
1081 /*
1082 * Process is sleeping and traced... make it runnable
1083 * so it can discover the signal in cursig() and stop
1084 * for the parent.
1085 */
1086 if (pr->ps_flags & PS_TRACED0x00000200)
1087 goto run;
1088 /*
1089 * If SIGCONT is default (or ignored) and process is
1090 * asleep, we are finished; the process should not
1091 * be awakened.
1092 */
1093 if ((prop & SA_CONT0x20) && action == SIG_DFL(void (*)(int))0) {
1094 mask = 0;
1095 goto out;
1096 }
1097 /*
1098 * When a sleeping process receives a stop
1099 * signal, process immediately if possible.
1100 */
1101 if ((prop & SA_STOP0x04) && action == SIG_DFL(void (*)(int))0) {
1102 /*
1103 * If a child holding parent blocked,
1104 * stopping could cause deadlock.
1105 */
1106 if (pr->ps_flags & PS_PPWAIT0x00000040)
1107 goto out;
1108 mask = 0;
1109 pr->ps_xsig = signum;
1110 proc_stop(p, 0);
1111 goto out;
1112 }
1113 /*
1114 * All other (caught or default) signals
1115 * cause the process to run.
1116 */
1117 goto runfast;
1118 /* NOTREACHED */
1119
1120 case SSTOP4:
1121 /*
1122 * If traced process is already stopped,
1123 * then no further action is necessary.
1124 */
1125 if (pr->ps_flags & PS_TRACED0x00000200)
1126 goto out;
1127
1128 /*
1129 * Kill signal always sets processes running.
1130 */
1131 if (signum == SIGKILL9) {
1132 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1133 goto runfast;
1134 }
1135
1136 if (prop & SA_CONT0x20) {
1137 /*
1138 * If SIGCONT is default (or ignored), we continue the
1139 * process but don't leave the signal in p_siglist, as
1140 * it has no further action. If SIGCONT is held, we
1141 * continue the process and leave the signal in
1142 * p_siglist. If the process catches SIGCONT, let it
1143 * handle the signal itself. If it isn't waiting on
1144 * an event, then it goes back to run state.
1145 * Otherwise, process goes back to sleep state.
1146 */
1147 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_CONTINUED0x00800000);
1148 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1149 wakeparent = 1;
1150 if (action == SIG_DFL(void (*)(int))0)
1151 atomic_clearbits_intx86_atomic_clearbits_u32(siglist, mask);
1152 if (action == SIG_CATCH(void (*)(int))2)
1153 goto runfast;
1154 if (p->p_wchan == NULL((void *)0))
1155 goto run;
1156 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_WSLEEP0x00000020);
1157 p->p_stat = SSLEEP3;
1158 goto out;
1159 }
1160
1161 if (prop & SA_STOP0x04) {
1162 /*
1163 * Already stopped, don't need to stop again.
1164 * (If we did the shell could get confused.)
1165 */
1166 mask = 0;
1167 goto out;
1168 }
1169
1170 /*
1171 * If process is sleeping interruptibly, then simulate a
1172 * wakeup so that when it is continued, it will be made
1173 * runnable and can look at the signal. But don't make
1174 * the process runnable, leave it stopped.
1175 */
1176 if (p->p_flag & P_SINTR0x00000080)
1177 unsleep(p);
1178 goto out;
1179
1180 case SONPROC7:
1181 /* set siglist before issuing the ast */
1182 atomic_setbits_intx86_atomic_setbits_u32(siglist, mask);
1183 mask = 0;
1184 signotify(p);
1185 /* FALLTHROUGH */
1186 default:
1187 /*
1188 * SRUN, SIDL, SDEAD do nothing with the signal,
1189 * other than kicking ourselves if we are running.
1190 * It will either never be noticed, or noticed very soon.
1191 */
1192 goto out;
1193 }
1194 /* NOTREACHED */
1195
1196runfast:
1197 /*
1198 * Raise priority to at least PUSER.
1199 */
1200 if (p->p_usrpri > PUSER50)
1201 p->p_usrpri = PUSER50;
1202run:
1203 setrunnable(p);
1204out:
1205 /* finally adjust siglist */
1206 if (mask)
1207 atomic_setbits_intx86_atomic_setbits_u32(siglist, mask);
1208 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1209 if (wakeparent)
1210 wakeup(pr->ps_pptr);
1211}
1212
1213/* fill the signal context which should be used by postsig() and issignal() */
1214void
1215setsigctx(struct proc *p, int signum, struct sigctx *sctx)
1216{
1217 struct sigacts *ps = p->p_p->ps_sigacts;
1218 sigset_t mask;
1219
1220 mtx_enter(&p->p_p->ps_mtx);
1221 mask = sigmask(signum)(1U << ((signum)-1));
1222 sctx->sig_action = ps->ps_sigact[signum];
1223 sctx->sig_catchmask = ps->ps_catchmask[signum];
1224 sctx->sig_reset = (ps->ps_sigreset & mask) != 0;
1225 sctx->sig_info = (ps->ps_siginfo & mask) != 0;
1226 sctx->sig_intr = (ps->ps_sigintr & mask) != 0;
1227 sctx->sig_onstack = (ps->ps_sigonstack & mask) != 0;
1228 sctx->sig_ignore = (ps->ps_sigignore & mask) != 0;
1229 sctx->sig_catch = (ps->ps_sigcatch & mask) != 0;
1230 mtx_leave(&p->p_p->ps_mtx);
1231}
1232
1233/*
1234 * Determine signal that should be delivered to process p, the current
1235 * process, 0 if none.
1236 *
1237 * If the current process has received a signal (should be caught or cause
1238 * termination, should interrupt current syscall), return the signal number.
1239 * Stop signals with default action are processed immediately, then cleared;
1240 * they aren't returned. This is checked after each entry to the system for
1241 * a syscall or trap. The normal call sequence is
1242 *
1243 * while (signum = cursig(curproc, &ctx))
1244 * postsig(signum, &ctx);
1245 *
1246 * Assumes that if the P_SINTR flag is set, we're holding both the
1247 * kernel and scheduler locks.
1248 */
1249int
1250cursig(struct proc *p, struct sigctx *sctx)
1251{
1252 struct process *pr = p->p_p;
1253 int signum, mask, prop;
1254 sigset_t ps_siglist;
1255 int s;
1256
1257 KASSERT(p == curproc)((p == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 1257, "p == curproc"))
;
1258
1259 for (;;) {
1260 ps_siglist = READ_ONCE(pr->ps_siglist)({ typeof(pr->ps_siglist) __tmp = *(volatile typeof(pr->
ps_siglist) *)&(pr->ps_siglist); membar_datadep_consumer
(); __tmp; })
;
1261 membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
1262 mask = SIGPENDING(p)(((p)->p_siglist | (p)->p_p->ps_siglist) & ~(p)->
p_sigmask)
;
1263 if (pr->ps_flags & PS_PPWAIT0x00000040)
1264 mask &= ~STOPSIGMASK((1U << ((17)-1)) | (1U << ((18)-1)) | (1U <<
((21)-1)) | (1U << ((22)-1)))
;
1265 if (mask == 0) /* no signal to send */
1266 return (0);
1267 signum = ffs((long)mask);
1268 mask = sigmask(signum)(1U << ((signum)-1));
1269
1270 /* take the signal! */
1271 if (atomic_cas_uint(&pr->ps_siglist, ps_siglist,_atomic_cas_uint((&pr->ps_siglist), (ps_siglist), (ps_siglist
& ~mask))
1272 ps_siglist & ~mask)_atomic_cas_uint((&pr->ps_siglist), (ps_siglist), (ps_siglist
& ~mask))
!= ps_siglist) {
1273 /* lost race taking the process signal, restart */
1274 continue;
1275 }
1276 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1277 setsigctx(p, signum, sctx);
1278
1279 /*
1280 * We should see pending but ignored signals
1281 * only if PS_TRACED was on when they were posted.
1282 */
1283 if (sctx->sig_ignore && (pr->ps_flags & PS_TRACED0x00000200) == 0)
1284 continue;
1285
1286 /*
1287 * If traced, always stop, and stay stopped until released
1288 * by the debugger. If our parent process is waiting for
1289 * us, don't hang as we could deadlock.
1290 */
1291 if (((pr->ps_flags & (PS_TRACED0x00000200 | PS_PPWAIT0x00000040)) == PS_TRACED0x00000200) &&
1292 signum != SIGKILL9) {
1293 single_thread_set(p, SINGLE_SUSPEND0x01 | SINGLE_NOWAIT0x20);
1294 pr->ps_xsig = signum;
1295
1296 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1297 proc_stop(p, 1);
1298 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1299
1300 /*
1301 * re-take the signal before releasing
1302 * the other threads. Must check the continue
1303 * conditions below and only take the signal if
1304 * those are not true.
1305 */
1306 signum = pr->ps_xsig;
1307 mask = sigmask(signum)(1U << ((signum)-1));
1308 setsigctx(p, signum, sctx);
1309 if (!((pr->ps_flags & PS_TRACED0x00000200) == 0 ||
1310 signum == 0 ||
1311 (p->p_sigmask & mask) != 0)) {
1312 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1313 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_siglist, mask);
1314 }
1315
1316 single_thread_clear(p, 0);
1317
1318 /*
1319 * If we are no longer being traced, or the parent
1320 * didn't give us a signal, look for more signals.
1321 */
1322 if ((pr->ps_flags & PS_TRACED0x00000200) == 0 ||
1323 signum == 0)
1324 continue;
1325
1326 /*
1327 * If the new signal is being masked, look for other
1328 * signals.
1329 */
1330 if ((p->p_sigmask & mask) != 0)
1331 continue;
1332
1333 }
1334
1335 prop = sigprop[signum];
1336
1337 /*
1338 * Decide whether the signal should be returned.
1339 * Return the signal's number, or fall through
1340 * to clear it from the pending mask.
1341 */
1342 switch ((long)sctx->sig_action) {
1343 case (long)SIG_DFL(void (*)(int))0:
1344 /*
1345 * Don't take default actions on system processes.
1346 */
1347 if (pr->ps_pid <= 1) {
1348#ifdef DIAGNOSTIC1
1349 /*
1350 * Are you sure you want to ignore SIGSEGV
1351 * in init? XXX
1352 */
1353 printf("Process (pid %d) got signal"
1354 " %d\n", pr->ps_pid, signum);
1355#endif
1356 break; /* == ignore */
1357 }
1358 /*
1359 * If there is a pending stop signal to process
1360 * with default action, stop here,
1361 * then clear the signal. However,
1362 * if process is member of an orphaned
1363 * process group, ignore tty stop signals.
1364 */
1365 if (prop & SA_STOP0x04) {
1366 if (pr->ps_flags & PS_TRACED0x00000200 ||
1367 (pr->ps_pgrp->pg_jobc == 0 &&
1368 prop & SA_TTYSTOP0x08))
1369 break; /* == ignore */
1370 pr->ps_xsig = signum;
1371 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
1372 proc_stop(p, 1);
1373 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
1374 break;
1375 } else if (prop & SA_IGNORE0x10) {
1376 /*
1377 * Except for SIGCONT, shouldn't get here.
1378 * Default action is to ignore; drop it.
1379 */
1380 break; /* == ignore */
1381 } else
1382 goto keep;
1383 /* NOTREACHED */
1384 case (long)SIG_IGN(void (*)(int))1:
1385 /*
1386 * Masking above should prevent us ever trying
1387 * to take action on an ignored signal other
1388 * than SIGCONT, unless process is traced.
1389 */
1390 if ((prop & SA_CONT0x20) == 0 &&
1391 (pr->ps_flags & PS_TRACED0x00000200) == 0)
1392 printf("%s\n", __func__);
1393 break; /* == ignore */
1394 default:
1395 /*
1396 * This signal has an action, let
1397 * postsig() process it.
1398 */
1399 goto keep;
1400 }
1401 }
1402 /* NOTREACHED */
1403
1404keep:
1405 atomic_setbits_intx86_atomic_setbits_u32(&p->p_siglist, mask); /*leave the signal for later */
1406 return (signum);
1407}
1408
1409/*
1410 * Put the argument process into the stopped state and notify the parent
1411 * via wakeup. Signals are handled elsewhere. The process must not be
1412 * on the run queue.
1413 */
1414void
1415proc_stop(struct proc *p, int sw)
1416{
1417 struct process *pr = p->p_p;
1418
1419#ifdef MULTIPROCESSOR1
1420 SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__
); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert(
"diagnostic ", "/usr/src/sys/kern/kern_sig.c", 1420, "__mp_lock_held(&sched_lock, curcpu())"
)); } while (0)
;
1421#endif
1422
1423 p->p_stat = SSTOP4;
1424 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_WAITED0x00000400);
1425 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_STOPPED0x00008000);
1426 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_SUSPSIG0x08000000);
1427 /*
1428 * We need this soft interrupt to be handled fast.
1429 * Extra calls to softclock don't hurt.
1430 */
1431 softintr_schedule(proc_stop_si)do { struct x86_soft_intrhand *__sih = (proc_stop_si); struct
x86_soft_intr *__si = __sih->sih_intrhead; mtx_enter(&
__si->softintr_lock); if (__sih->sih_pending == 0) { do
{ (__sih)->sih_q.tqe_next = ((void *)0); (__sih)->sih_q
.tqe_prev = (&__si->softintr_q)->tqh_last; *(&__si
->softintr_q)->tqh_last = (__sih); (&__si->softintr_q
)->tqh_last = &(__sih)->sih_q.tqe_next; } while (0)
; __sih->sih_pending = 1; softintr(__si->softintr_ssir)
; } mtx_leave(&__si->softintr_lock); } while ( 0)
;
1432 if (sw)
1433 mi_switch();
1434}
1435
1436/*
1437 * Called from a soft interrupt to send signals to the parents of stopped
1438 * processes.
1439 * We can't do this in proc_stop because it's called with nasty locks held
1440 * and we would need recursive scheduler lock to deal with that.
1441 */
1442void
1443proc_stop_sweep(void *v)
1444{
1445 struct process *pr;
1446
1447 LIST_FOREACH(pr, &allprocess, ps_list)for((pr) = ((&allprocess)->lh_first); (pr)!= ((void *)
0); (pr) = ((pr)->ps_list.le_next))
{
1448 if ((pr->ps_flags & PS_STOPPED0x00008000) == 0)
1449 continue;
1450 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_STOPPED0x00008000);
1451
1452 if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP0x01) == 0)
1453 prsignal(pr->ps_pptr, SIGCHLD)ptsignal((pr->ps_pptr)->ps_mainproc, (20), SPROCESS);
1454 wakeup(pr->ps_pptr);
1455 }
1456}
1457
1458/*
1459 * Take the action for the specified signal
1460 * from the current set of pending signals.
1461 */
1462void
1463postsig(struct proc *p, int signum, struct sigctx *sctx)
1464{
1465 u_long trapno;
1466 int mask, returnmask;
1467 siginfo_t si;
1468 union sigval sigval;
1469 int code;
1470
1471 KASSERT(signum != 0)((signum != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 1471, "signum != 0"))
;
1472
1473 mask = sigmask(signum)(1U << ((signum)-1));
1474 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, mask);
1475 sigval.sival_ptr = NULL((void *)0);
1476
1477 if (p->p_sisig != signum) {
1478 trapno = 0;
1479 code = SI_USER0;
1480 sigval.sival_ptr = NULL((void *)0);
1481 } else {
1482 trapno = p->p_sitrapno;
1483 code = p->p_sicode;
1484 sigval = p->p_sigval;
1485 }
1486 initsiginfo(&si, signum, trapno, code, sigval);
1487
1488#ifdef KTRACE1
1489 if (KTRPOINT(p, KTR_PSIG)((p)->p_p->ps_traceflag & (1<<(5)) &&
((p)->p_flag & 0x00000001) == 0)
) {
1490 ktrpsig(p, signum, sctx->sig_action, p->p_flag & P_SIGSUSPEND0x00000008 ?
1491 p->p_oldmask : p->p_sigmask, code, &si);
1492 }
1493#endif
1494 if (sctx->sig_action == SIG_DFL(void (*)(int))0) {
1495 /*
1496 * Default action, where the default is to kill
1497 * the process. (Other cases were ignored above.)
1498 */
1499 KERNEL_LOCK()_kernel_lock();
1500 sigexit(p, signum);
1501 /* NOTREACHED */
1502 } else {
1503 /*
1504 * If we get here, the signal must be caught.
1505 */
1506#ifdef DIAGNOSTIC1
1507 if (sctx->sig_action == SIG_IGN(void (*)(int))1 || (p->p_sigmask & mask))
1508 panic("postsig action");
1509#endif
1510 /*
1511 * Set the new mask value and also defer further
1512 * occurrences of this signal.
1513 *
1514 * Special case: user has done a sigpause. Here the
1515 * current mask is not of interest, but rather the
1516 * mask from before the sigpause is what we want
1517 * restored after the signal processing is completed.
1518 */
1519 if (p->p_flag & P_SIGSUSPEND0x00000008) {
1520 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
1521 returnmask = p->p_oldmask;
1522 } else {
1523 returnmask = p->p_sigmask;
1524 }
1525 if (p->p_sisig == signum) {
1526 p->p_sisig = 0;
1527 p->p_sitrapno = 0;
1528 p->p_sicode = SI_USER0;
1529 p->p_sigval.sival_ptr = NULL((void *)0);
1530 }
1531
1532 if (sendsig(sctx->sig_action, signum, returnmask, &si,
1533 sctx->sig_info, sctx->sig_onstack)) {
1534 KERNEL_LOCK()_kernel_lock();
1535 sigexit(p, SIGILL4);
1536 /* NOTREACHED */
1537 }
1538 postsig_done(p, signum, sctx->sig_catchmask, sctx->sig_reset);
1539 }
1540}
1541
1542/*
1543 * Force the current process to exit with the specified signal, dumping core
1544 * if appropriate. We bypass the normal tests for masked and caught signals,
1545 * allowing unrecoverable failures to terminate the process without changing
1546 * signal state. Mark the accounting record with the signal termination.
1547 * If dumping core, save the signal number for the debugger. Calls exit and
1548 * does not return.
1549 */
1550void
1551sigexit(struct proc *p, int signum)
1552{
1553 /* Mark process as going away */
1554 atomic_setbits_intx86_atomic_setbits_u32(&p->p_flag, P_WEXIT0x00002000);
1555
1556 p->p_p->ps_acflag |= AXSIG0x00000010;
1557 if (sigprop[signum] & SA_CORE0x02) {
1558 p->p_sisig = signum;
1559
1560 /* if there are other threads, pause them */
1561 if (P_HASSIBLING(p)((p)->p_p->ps_threadcnt > 1))
1562 single_thread_set(p, SINGLE_UNWIND0x02);
1563
1564 if (coredump(p) == 0)
1565 signum |= WCOREFLAG0200;
1566 }
1567 exit1(p, 0, signum, EXIT_NORMAL0x00000001);
1568 /* NOTREACHED */
1569}
1570
1571/*
1572 * Send uncatchable SIGABRT for coredump.
1573 */
1574void
1575sigabort(struct proc *p)
1576{
1577 struct sigaction sa;
1578
1579 memset(&sa, 0, sizeof sa)__builtin_memset((&sa), (0), (sizeof sa));
1580 sa.sa_handler__sigaction_u.__sa_handler = SIG_DFL(void (*)(int))0;
1581 setsigvec(p, SIGABRT6, &sa);
1582 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_sigmask, sigmask(SIGABRT)(1U << ((6)-1)));
1583 psignal(p, SIGABRT6);
1584}
1585
1586/*
1587 * Return 1 if `sig', a given signal, is ignored or masked for `p', a given
1588 * thread, and 0 otherwise.
1589 */
1590int
1591sigismasked(struct proc *p, int sig)
1592{
1593 struct process *pr = p->p_p;
1594 int rv;
1595
1596 mtx_enter(&pr->ps_mtx);
1597 rv = (pr->ps_sigacts->ps_sigignore & sigmask(sig)(1U << ((sig)-1))) ||
1598 (p->p_sigmask & sigmask(sig)(1U << ((sig)-1)));
1599 mtx_leave(&pr->ps_mtx);
1600
1601 return !!rv;
1602}
1603
1604struct coredump_iostate {
1605 struct proc *io_proc;
1606 struct vnode *io_vp;
1607 struct ucred *io_cred;
1608 off_t io_offset;
1609};
1610
1611/*
1612 * Dump core, into a file named "progname.core", unless the process was
1613 * setuid/setgid.
1614 */
1615int
1616coredump(struct proc *p)
1617{
1618#ifdef SMALL_KERNEL
1619 return EPERM1;
1620#else
1621 struct process *pr = p->p_p;
1622 struct vnode *vp;
1623 struct ucred *cred = p->p_ucred;
1624 struct vmspace *vm = p->p_vmspace;
1625 struct nameidata nd;
1626 struct vattr vattr;
1627 struct coredump_iostate io;
1628 int error, len, incrash = 0;
1629 char *name;
1630 const char *dir = "/var/crash";
1631
1632 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_COREDUMP0x00000800);
1633
1634#ifdef PMAP_CHECK_COPYIN(pg_xo == 0)
1635 /* disable copyin checks, so we can write out text sections if needed */
1636 p->p_vmspace->vm_map.check_copyin_count = 0;
1637#endif
1638
1639 /* Don't dump if will exceed file size limit. */
1640 if (USPACE(6 * (1 << 12)) + ptoa(vm->vm_dsize + vm->vm_ssize)((paddr_t)(vm->vm_dsize + vm->vm_ssize) << 12) >= lim_cur(RLIMIT_CORE4))
1641 return (EFBIG27);
1642
1643 name = pool_get(&namei_pool, PR_WAITOK0x0001);
1644
1645 /*
1646 * If the process has inconsistent uids, nosuidcoredump
1647 * determines coredump placement policy.
1648 */
1649 if (((pr->ps_flags & PS_SUGID0x00000010) && (error = suser(p))) ||
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
1650 ((pr->ps_flags & PS_SUGID0x00000010) && nosuidcoredump)) {
1651 if (nosuidcoredump == 3) {
1652 /*
1653 * If the program directory does not exist, dumps of
1654 * that core will silently fail.
1655 */
1656 len = snprintf(name, MAXPATHLEN1024, "%s/%s/%u.core",
1657 dir, pr->ps_comm, pr->ps_pid);
1658 incrash = KERNELPATH0x800000;
1659 } else if (nosuidcoredump == 2) {
1660 len = snprintf(name, MAXPATHLEN1024, "%s/%s.core",
1661 dir, pr->ps_comm);
1662 incrash = KERNELPATH0x800000;
1663 } else {
1664 pool_put(&namei_pool, name);
1665 return (EPERM1);
1666 }
1667 } else
1668 len = snprintf(name, MAXPATHLEN1024, "%s.core", pr->ps_comm);
1669
1670 if (len >= MAXPATHLEN1024) {
1671 pool_put(&namei_pool, name);
1672 return (EACCES13);
1673 }
1674
1675 /*
1676 * Control the UID used to write out. The normal case uses
1677 * the real UID. If the sugid case is going to write into the
1678 * controlled directory, we do so as root.
1679 */
1680 if (incrash == 0) {
1681 cred = crdup(cred);
1682 cred->cr_uid = cred->cr_ruid;
1683 cred->cr_gid = cred->cr_rgid;
1684 } else {
1685 if (p->p_fd->fd_rdir) {
1686 vrele(p->p_fd->fd_rdir);
1687 p->p_fd->fd_rdir = NULL((void *)0);
1688 }
1689 p->p_ucred = crdup(p->p_ucred);
1690 crfree(cred);
1691 cred = p->p_ucred;
1692 crhold(cred);
1693 cred->cr_uid = 0;
1694 cred->cr_gid = 0;
1695 }
1696
1697 /* incrash should be 0 or KERNELPATH only */
1698 NDINIT(&nd, 0, BYPASSUNVEIL | incrash, UIO_SYSSPACE, name, p)ndinitat(&nd, 0, 0x400000 | incrash, UIO_SYSSPACE, -100, name
, p)
;
1699
1700 error = vn_open(&nd, O_CREAT0x0200 | FWRITE0x0002 | O_NOFOLLOW0x0100 | O_NONBLOCK0x0004,
1701 S_IRUSR0000400 | S_IWUSR0000200);
1702
1703 if (error)
1704 goto out;
1705
1706 /*
1707 * Don't dump to non-regular files, files with links, or files
1708 * owned by someone else.
1709 */
1710 vp = nd.ni_vp;
1711 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
1712 VOP_UNLOCK(vp);
1713 vn_close(vp, FWRITE0x0002, cred, p);
1714 goto out;
1715 }
1716 if (vp->v_type != VREG || vattr.va_nlink != 1 ||
1717 vattr.va_mode & ((VREAD00400 | VWRITE00200) >> 3 | (VREAD00400 | VWRITE00200) >> 6) ||
1718 vattr.va_uid != cred->cr_uid) {
1719 error = EACCES13;
1720 VOP_UNLOCK(vp);
1721 vn_close(vp, FWRITE0x0002, cred, p);
1722 goto out;
1723 }
1724 VATTR_NULL(&vattr)vattr_null(&vattr);
1725 vattr.va_size = 0;
1726 VOP_SETATTR(vp, &vattr, cred, p);
1727 pr->ps_acflag |= ACORE0x00000008;
1728
1729 io.io_proc = p;
1730 io.io_vp = vp;
1731 io.io_cred = cred;
1732 io.io_offset = 0;
1733 VOP_UNLOCK(vp);
1734 vref(vp);
1735 error = vn_close(vp, FWRITE0x0002, cred, p);
1736 if (error == 0)
1737 error = coredump_elf(p, &io);
1738 vrele(vp);
1739out:
1740 crfree(cred);
1741 pool_put(&namei_pool, name);
1742 return (error);
1743#endif
1744}
1745
1746#ifndef SMALL_KERNEL
1747int
1748coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len)
1749{
1750 struct coredump_iostate *io = cookie;
1751 off_t coffset = 0;
1752 size_t csize;
1753 int chunk, error;
1754
1755 csize = len;
1756 do {
1757 if (sigmask(SIGKILL)(1U << ((9)-1)) &
1758 (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist))
1759 return (EINTR4);
1760
1761 /* Rest of the loop sleeps with lock held, so... */
1762 yield();
1763
1764 chunk = MIN(csize, MAXPHYS)(((csize)<((64 * 1024)))?(csize):((64 * 1024)));
1765 error = vn_rdwr(UIO_WRITE, io->io_vp,
1766 (caddr_t)data + coffset, chunk,
1767 io->io_offset + coffset, segflg,
1768 IO_UNIT0x01, io->io_cred, NULL((void *)0), io->io_proc);
1769 if (error) {
1770 struct process *pr = io->io_proc->p_p;
1771
1772 if (error == ENOSPC28)
1773 log(LOG_ERR3,
1774 "coredump of %s(%d) failed, filesystem full\n",
1775 pr->ps_comm, pr->ps_pid);
1776 else
1777 log(LOG_ERR3,
1778 "coredump of %s(%d), write failed: errno %d\n",
1779 pr->ps_comm, pr->ps_pid, error);
1780 return (error);
1781 }
1782
1783 coffset += chunk;
1784 csize -= chunk;
1785 } while (csize > 0);
1786
1787 io->io_offset += len;
1788 return (0);
1789}
1790
1791void
1792coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
1793{
1794 struct coredump_iostate *io = cookie;
1795
1796 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
1797}
1798
1799#endif /* !SMALL_KERNEL */
1800
1801/*
1802 * Nonexistent system call-- signal process (may want to handle it).
1803 * Flag error in case process won't see signal immediately (blocked or ignored).
1804 */
1805int
1806sys_nosys(struct proc *p, void *v, register_t *retval)
1807{
1808 ptsignal(p, SIGSYS12, STHREAD);
1809 return (ENOSYS78);
1810}
1811
1812int
1813sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
1814{
1815 struct sys___thrsigdivert_args /* {
1816 syscallarg(sigset_t) sigmask;
1817 syscallarg(siginfo_t *) info;
1818 syscallarg(const struct timespec *) timeout;
1819 } */ *uap = v;
1820 struct sigctx ctx;
1821 sigset_t mask = SCARG(uap, sigmask)((uap)->sigmask.le.datum) &~ sigcantmask((1U << ((9)-1)) | (1U << ((17)-1)));
1822 siginfo_t si;
1823 uint64_t nsecs = INFSLP0xffffffffffffffffULL;
1824 int timeinvalid = 0;
1825 int error = 0;
1826
1827 memset(&si, 0, sizeof(si))__builtin_memset((&si), (0), (sizeof(si)));
1828
1829 if (SCARG(uap, timeout)((uap)->timeout.le.datum) != NULL((void *)0)) {
1830 struct timespec ts;
1831 if ((error = copyin(SCARG(uap, timeout)((uap)->timeout.le.datum), &ts, sizeof(ts))) != 0)
1832 return (error);
1833#ifdef KTRACE1
1834 if (KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
1835 ktrreltimespec(p, &ts)ktrstruct((p), "reltimespec", (&ts), sizeof(struct timespec
))
;
1836#endif
1837 if (!timespecisvalid(&ts)((&ts)->tv_nsec >= 0 && (&ts)->tv_nsec
< 1000000000L)
)
1838 timeinvalid = 1;
1839 else
1840 nsecs = TIMESPEC_TO_NSEC(&ts);
1841 }
1842
1843 dosigsuspend(p, p->p_sigmask &~ mask);
1844 for (;;) {
1845 si.si_signo = cursig(p, &ctx);
1846 if (si.si_signo != 0) {
1847 sigset_t smask = sigmask(si.si_signo)(1U << ((si.si_signo)-1));
1848 if (smask & mask) {
1849 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_siglist, smask);
1850 error = 0;
1851 break;
1852 }
1853 }
1854
1855 /* per-POSIX, delay this error until after the above */
1856 if (timeinvalid)
1857 error = EINVAL22;
1858 /* per-POSIX, return immediately if timeout is zero-valued */
1859 if (nsecs == 0)
1860 error = EAGAIN35;
1861
1862 if (error != 0)
1863 break;
1864
1865 error = tsleep_nsec(&nowake, PPAUSE40|PCATCH0x100, "sigwait", nsecs);
1866 }
1867
1868 if (error == 0) {
1869 *retval = si.si_signo;
1870 if (SCARG(uap, info)((uap)->info.le.datum) != NULL((void *)0)) {
1871 error = copyout(&si, SCARG(uap, info)((uap)->info.le.datum), sizeof(si));
1872#ifdef KTRACE1
1873 if (error == 0 && KTRPOINT(p, KTR_STRUCT)((p)->p_p->ps_traceflag & (1<<(8)) &&
((p)->p_flag & 0x00000001) == 0)
)
1874 ktrsiginfo(p, &si)ktrstruct(p, "siginfo", (&si), sizeof(siginfo_t));
1875#endif
1876 }
1877 } else if (error == ERESTART-1 && SCARG(uap, timeout)((uap)->timeout.le.datum) != NULL((void *)0)) {
1878 /*
1879 * Restarting is wrong if there's a timeout, as it'll be
1880 * for the same interval again
1881 */
1882 error = EINTR4;
1883 }
1884
1885 return (error);
1886}
1887
1888void
1889initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
1890{
1891 memset(si, 0, sizeof(*si))__builtin_memset((si), (0), (sizeof(*si)));
1892
1893 si->si_signo = sig;
1894 si->si_code = code;
1895 if (code == SI_USER0) {
1896 si->si_value_data._proc._pdata._kill._value = val;
1897 } else {
1898 switch (sig) {
1899 case SIGSEGV11:
1900 case SIGILL4:
1901 case SIGBUS10:
1902 case SIGFPE8:
1903 si->si_addr_data._fault._addr = val.sival_ptr;
1904 si->si_trapno_data._fault._trapno = trapno;
1905 break;
1906 case SIGXFSZ25:
1907 break;
1908 }
1909 }
1910}
1911
1912int
1913filt_sigattach(struct knote *kn)
1914{
1915 struct process *pr = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p;
1916 int s;
1917
1918 if (kn->kn_idkn_kevent.ident >= NSIG33)
1919 return EINVAL22;
1920
1921 kn->kn_ptr.p_process = pr;
1922 kn->kn_flagskn_kevent.flags |= EV_CLEAR0x0020; /* automatically set */
1923
1924 s = splhigh()splraise(0xd);
1925 klist_insert_locked(&pr->ps_klist, kn);
1926 splx(s)spllower(s);
1927
1928 return (0);
1929}
1930
1931void
1932filt_sigdetach(struct knote *kn)
1933{
1934 struct process *pr = kn->kn_ptr.p_process;
1935 int s;
1936
1937 s = splhigh()splraise(0xd);
1938 klist_remove_locked(&pr->ps_klist, kn);
1939 splx(s)spllower(s);
1940}
1941
1942/*
1943 * signal knotes are shared with proc knotes, so we apply a mask to
1944 * the hint in order to differentiate them from process hints. This
1945 * could be avoided by using a signal-specific knote list, but probably
1946 * isn't worth the trouble.
1947 */
1948int
1949filt_signal(struct knote *kn, long hint)
1950{
1951
1952 if (hint & NOTE_SIGNAL0x08000000) {
1953 hint &= ~NOTE_SIGNAL0x08000000;
1954
1955 if (kn->kn_idkn_kevent.ident == hint)
1956 kn->kn_datakn_kevent.data++;
1957 }
1958 return (kn->kn_datakn_kevent.data != 0);
1959}
1960
1961void
1962userret(struct proc *p)
1963{
1964 struct sigctx ctx;
1965 int signum;
1966
1967 if (p->p_flag & P_SUSPSINGLE0x00080000)
1968 single_thread_check(p, 0);
1969
1970 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
1971 if (p->p_flag & P_PROFPEND0x00000002) {
1972 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_PROFPEND0x00000002);
1973 KERNEL_LOCK()_kernel_lock();
1974 psignal(p, SIGPROF27);
1975 KERNEL_UNLOCK()_kernel_unlock();
1976 }
1977 if (p->p_flag & P_ALRMPEND0x00000004) {
1978 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_ALRMPEND0x00000004);
1979 KERNEL_LOCK()_kernel_lock();
1980 psignal(p, SIGVTALRM26);
1981 KERNEL_UNLOCK()_kernel_unlock();
1982 }
1983
1984 if (SIGPENDING(p)(((p)->p_siglist | (p)->p_p->ps_siglist) & ~(p)->
p_sigmask)
!= 0) {
1985 while ((signum = cursig(p, &ctx)) != 0)
1986 postsig(p, signum, &ctx);
1987 }
1988
1989 /*
1990 * If P_SIGSUSPEND is still set here, then we still need to restore
1991 * the original sigmask before returning to userspace. Also, this
1992 * might unmask some pending signals, so we need to check a second
1993 * time for signals to post.
1994 */
1995 if (p->p_flag & P_SIGSUSPEND0x00000008) {
1996 atomic_clearbits_intx86_atomic_clearbits_u32(&p->p_flag, P_SIGSUSPEND0x00000008);
1997 p->p_sigmask = p->p_oldmask;
1998
1999 while ((signum = cursig(p, &ctx)) != 0)
2000 postsig(p, signum, &ctx);
2001 }
2002
2003 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning")(void)0;
2004
2005 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
2006}
2007
2008int
2009single_thread_check_locked(struct proc *p, int deep, int s)
2010{
2011 struct process *pr = p->p_p;
2012
2013 SCHED_ASSERT_LOCKED()do { do { if (splassert_ctl > 0) { splassert_check(0xc, __func__
); } } while (0); ((__mp_lock_held(&sched_lock, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;}))) ? (void)0 : __assert(
"diagnostic ", "/usr/src/sys/kern/kern_sig.c", 2013, "__mp_lock_held(&sched_lock, curcpu())"
)); } while (0)
;
2014
2015 if (pr->ps_single == NULL((void *)0) || pr->ps_single == p)
2016 return (0);
2017
2018 do {
2019 /* if we're in deep, we need to unwind to the edge */
2020 if (deep) {
2021 if (pr->ps_flags & PS_SINGLEUNWIND0x00002000)
2022 return (ERESTART-1);
2023 if (pr->ps_flags & PS_SINGLEEXIT0x00001000)
2024 return (EINTR4);
2025 }
2026
2027 if (atomic_dec_int_nv(&pr->ps_singlecount)_atomic_sub_int_nv((&pr->ps_singlecount), 1) == 0)
2028 wakeup(&pr->ps_singlecount);
2029
2030 if (pr->ps_flags & PS_SINGLEEXIT0x00001000) {
2031 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2032 KERNEL_LOCK()_kernel_lock();
2033 exit1(p, 0, 0, EXIT_THREAD_NOCHECK0x00000003);
2034 /* NOTREACHED */
2035 }
2036
2037 /* not exiting and don't need to unwind, so suspend */
2038 p->p_stat = SSTOP4;
2039 mi_switch();
2040 } while (pr->ps_single != NULL((void *)0));
2041
2042 return (0);
2043}
2044
2045int
2046single_thread_check(struct proc *p, int deep)
2047{
2048 int s, error;
2049
2050 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
2051 error = single_thread_check_locked(p, deep, s);
2052 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2053
2054 return error;
2055}
2056
2057/*
2058 * Stop other threads in the process. The mode controls how and
2059 * where the other threads should stop:
2060 * - SINGLE_SUSPEND: stop wherever they are, will later be released (via
2061 * single_thread_clear())
2062 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
2063 * (by setting to SINGLE_EXIT) or released as with SINGLE_SUSPEND
2064 * - SINGLE_EXIT: unwind to kernel boundary and exit
2065 */
2066int
2067single_thread_set(struct proc *p, int flags)
2068{
2069 struct process *pr = p->p_p;
2070 struct proc *q;
2071 int error, s, mode = flags & SINGLE_MASK0x0f;
2072
2073 KASSERT(curproc == p)((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc == p) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2073, "curproc == p"))
;
2074
2075 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
2076 error = single_thread_check_locked(p, flags & SINGLE_DEEP0x10, s);
2077 if (error) {
2078 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2079 return error;
2080 }
2081
2082 switch (mode) {
2083 case SINGLE_SUSPEND0x01:
2084 break;
2085 case SINGLE_UNWIND0x02:
2086 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000);
2087 break;
2088 case SINGLE_EXIT0x03:
2089 atomic_setbits_intx86_atomic_setbits_u32(&pr->ps_flags, PS_SINGLEEXIT0x00001000);
2090 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000);
2091 break;
2092#ifdef DIAGNOSTIC1
2093 default:
2094 panic("single_thread_mode = %d", mode);
2095#endif
2096 }
2097 pr->ps_singlecount = 0;
2098 membar_producer()do { __asm volatile("" ::: "memory"); } while (0);
2099 pr->ps_single = p;
2100 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
2101 if (q == p)
2102 continue;
2103 if (q->p_flag & P_WEXIT0x00002000) {
2104 if (mode == SINGLE_EXIT0x03) {
2105 if (q->p_stat == SSTOP4) {
2106 setrunnable(q);
2107 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2108 }
2109 }
2110 continue;
2111 }
2112 atomic_setbits_intx86_atomic_setbits_u32(&q->p_flag, P_SUSPSINGLE0x00080000);
2113 switch (q->p_stat) {
2114 case SIDL1:
2115 case SRUN2:
2116 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2117 break;
2118 case SSLEEP3:
2119 /* if it's not interruptible, then just have to wait */
2120 if (q->p_flag & P_SINTR0x00000080) {
2121 /* merely need to suspend? just stop it */
2122 if (mode == SINGLE_SUSPEND0x01) {
2123 q->p_stat = SSTOP4;
2124 break;
2125 }
2126 /* need to unwind or exit, so wake it */
2127 setrunnable(q);
2128 }
2129 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2130 break;
2131 case SSTOP4:
2132 if (mode == SINGLE_EXIT0x03) {
2133 setrunnable(q);
2134 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2135 }
2136 break;
2137 case SDEAD6:
2138 break;
2139 case SONPROC7:
2140 atomic_inc_int(&pr->ps_singlecount)_atomic_inc_int(&pr->ps_singlecount);
2141 signotify(q);
2142 break;
2143 }
2144 }
2145 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2146
2147 if ((flags & SINGLE_NOWAIT0x20) == 0)
2148 single_thread_wait(pr, 1);
2149
2150 return 0;
2151}
2152
2153/*
2154 * Wait for other threads to stop. If recheck is false then the function
2155 * returns non-zero if the caller needs to restart the check else 0 is
2156 * returned. If recheck is true the return value is always 0.
2157 */
2158int
2159single_thread_wait(struct process *pr, int recheck)
2160{
2161 int wait;
2162
2163 /* wait until they're all suspended */
2164 wait = pr->ps_singlecount > 0;
2165 while (wait) {
2166 sleep_setup(&pr->ps_singlecount, PWAIT32, "suspend");
2167 wait = pr->ps_singlecount > 0;
2168 sleep_finish(0, wait);
2169 if (!recheck)
2170 break;
2171 }
2172
2173 return wait;
2174}
2175
2176void
2177single_thread_clear(struct proc *p, int flag)
2178{
2179 struct process *pr = p->p_p;
2180 struct proc *q;
2181 int s;
2182
2183 KASSERT(pr->ps_single == p)((pr->ps_single == p) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/kern_sig.c"
, 2183, "pr->ps_single == p"))
;
2184 KASSERT(curproc == p)((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc == p) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2184, "curproc == p"))
;
2185
2186 SCHED_LOCK(s)do { s = splraise(0xc); __mp_lock(&sched_lock); } while (
0)
;
2187 pr->ps_single = NULL((void *)0);
2188 atomic_clearbits_intx86_atomic_clearbits_u32(&pr->ps_flags, PS_SINGLEUNWIND0x00002000 | PS_SINGLEEXIT0x00001000);
2189 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
2190 if (q == p || (q->p_flag & P_SUSPSINGLE0x00080000) == 0)
2191 continue;
2192 atomic_clearbits_intx86_atomic_clearbits_u32(&q->p_flag, P_SUSPSINGLE0x00080000);
2193
2194 /*
2195 * if the thread was only stopped for single threading
2196 * then clearing that either makes it runnable or puts
2197 * it back into some sleep queue
2198 */
2199 if (q->p_stat == SSTOP4 && (q->p_flag & flag) == 0) {
2200 if (q->p_wchan == NULL((void *)0))
2201 setrunnable(q);
2202 else {
2203 atomic_clearbits_intx86_atomic_clearbits_u32(&q->p_flag, P_WSLEEP0x00000020);
2204 q->p_stat = SSLEEP3;
2205 }
2206 }
2207 }
2208 SCHED_UNLOCK(s)do { __mp_unlock(&sched_lock); spllower(s); } while ( 0);
2209}
2210
2211void
2212sigio_del(struct sigiolst *rmlist)
2213{
2214 struct sigio *sigio;
2215
2216 while ((sigio = LIST_FIRST(rmlist)((rmlist)->lh_first)) != NULL((void *)0)) {
2217 LIST_REMOVE(sigio, sio_pgsigio)do { if ((sigio)->sio_pgsigio.le_next != ((void *)0)) (sigio
)->sio_pgsigio.le_next->sio_pgsigio.le_prev = (sigio)->
sio_pgsigio.le_prev; *(sigio)->sio_pgsigio.le_prev = (sigio
)->sio_pgsigio.le_next; ((sigio)->sio_pgsigio.le_prev) =
((void *)-1); ((sigio)->sio_pgsigio.le_next) = ((void *)-
1); } while (0)
;
2218 crfree(sigio->sio_ucred);
2219 free(sigio, M_SIGIO40, sizeof(*sigio));
2220 }
2221}
2222
2223void
2224sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist)
2225{
2226 struct sigio *sigio;
2227
2228 MUTEX_ASSERT_LOCKED(&sigio_lock)do { if (((&sigio_lock)->mtx_owner != ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})) && !(panicstr ||
db_active)) panic("mutex %p not held in %s", (&sigio_lock
), __func__); } while (0)
;
2229
2230 sigio = sir->sir_sigio;
2231 if (sigio != NULL((void *)0)) {
2232 KASSERT(sigio->sio_myref == sir)((sigio->sio_myref == sir) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/kern_sig.c", 2232, "sigio->sio_myref == sir"
))
;
2233 sir->sir_sigio = NULL((void *)0);
2234
2235 if (sigio->sio_pgid > 0)
2236 sigio->sio_procsio_u.siu_proc = NULL((void *)0);
2237 else
2238 sigio->sio_pgrpsio_u.siu_pgrp = NULL((void *)0);
2239 LIST_REMOVE(sigio, sio_pgsigio)do { if ((sigio)->sio_pgsigio.le_next != ((void *)0)) (sigio
)->sio_pgsigio.le_next->sio_pgsigio.le_prev = (sigio)->
sio_pgsigio.le_prev; *(sigio)->sio_pgsigio.le_prev = (sigio
)->sio_pgsigio.le_next; ((sigio)->sio_pgsigio.le_prev) =
((void *)-1); ((sigio)->sio_pgsigio.le_next) = ((void *)-
1); } while (0)
;
2240
2241 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (rmlist)->lh_first
) != ((void *)0)) (rmlist)->lh_first->sio_pgsigio.le_prev
= &(sigio)->sio_pgsigio.le_next; (rmlist)->lh_first
= (sigio); (sigio)->sio_pgsigio.le_prev = &(rmlist)->
lh_first; } while (0)
;
2242 }
2243}
2244
2245void
2246sigio_free(struct sigio_ref *sir)
2247{
2248 struct sigiolst rmlist;
2249
2250 if (sir->sir_sigio == NULL((void *)0))
2251 return;
2252
2253 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2254
2255 mtx_enter(&sigio_lock);
2256 sigio_unlink(sir, &rmlist);
2257 mtx_leave(&sigio_lock);
2258
2259 sigio_del(&rmlist);
2260}
2261
2262void
2263sigio_freelist(struct sigiolst *sigiolst)
2264{
2265 struct sigiolst rmlist;
2266 struct sigio *sigio;
2267
2268 if (LIST_EMPTY(sigiolst)(((sigiolst)->lh_first) == ((void *)0)))
2269 return;
2270
2271 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2272
2273 mtx_enter(&sigio_lock);
2274 while ((sigio = LIST_FIRST(sigiolst)((sigiolst)->lh_first)) != NULL((void *)0))
2275 sigio_unlink(sigio->sio_myref, &rmlist);
2276 mtx_leave(&sigio_lock);
2277
2278 sigio_del(&rmlist);
2279}
2280
2281int
2282sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2283{
2284 struct sigiolst rmlist;
2285 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
2286 struct pgrp *pgrp = NULL((void *)0);
2287 struct process *pr = NULL((void *)0);
2288 struct sigio *sigio;
2289 int error;
2290 pid_t pgid = *(int *)data;
2291
2292 if (pgid == 0) {
2293 sigio_free(sir);
2294 return (0);
2295 }
2296
2297 if (cmd == TIOCSPGRP((unsigned long)0x80000000 | ((sizeof(int) & 0x1fff) <<
16) | ((('t')) << 8) | ((118)))
) {
2298 if (pgid < 0)
2299 return (EINVAL22);
2300 pgid = -pgid;
2301 }
2302
2303 sigio = malloc(sizeof(*sigio), M_SIGIO40, M_WAITOK0x0001);
2304 sigio->sio_pgid = pgid;
2305 sigio->sio_ucred = crhold(p->p_ucred);
2306 sigio->sio_myref = sir;
2307
2308 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2309
2310 /*
2311 * The kernel lock, and not sleeping between prfind()/pgfind() and
2312 * linking of the sigio ensure that the process or process group does
2313 * not disappear unexpectedly.
2314 */
2315 KERNEL_LOCK()_kernel_lock();
2316 mtx_enter(&sigio_lock);
2317
2318 if (pgid > 0) {
2319 pr = prfind(pgid);
2320 if (pr == NULL((void *)0)) {
2321 error = ESRCH3;
2322 goto fail;
2323 }
2324
2325 /*
2326 * Policy - Don't allow a process to FSETOWN a process
2327 * in another session.
2328 *
2329 * Remove this test to allow maximum flexibility or
2330 * restrict FSETOWN to the current process or process
2331 * group for maximum safety.
2332 */
2333 if (pr->ps_sessionps_pgrp->pg_session != p->p_p->ps_sessionps_pgrp->pg_session) {
2334 error = EPERM1;
2335 goto fail;
2336 }
2337
2338 if ((pr->ps_flags & PS_EXITING0x00000008) != 0) {
2339 error = ESRCH3;
2340 goto fail;
2341 }
2342 } else /* if (pgid < 0) */ {
2343 pgrp = pgfind(-pgid);
2344 if (pgrp == NULL((void *)0)) {
2345 error = ESRCH3;
2346 goto fail;
2347 }
2348
2349 /*
2350 * Policy - Don't allow a process to FSETOWN a process
2351 * in another session.
2352 *
2353 * Remove this test to allow maximum flexibility or
2354 * restrict FSETOWN to the current process or process
2355 * group for maximum safety.
2356 */
2357 if (pgrp->pg_session != p->p_p->ps_sessionps_pgrp->pg_session) {
2358 error = EPERM1;
2359 goto fail;
2360 }
2361 }
2362
2363 if (pgid > 0) {
2364 sigio->sio_procsio_u.siu_proc = pr;
2365 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (&pr->ps_sigiolst
)->lh_first) != ((void *)0)) (&pr->ps_sigiolst)->
lh_first->sio_pgsigio.le_prev = &(sigio)->sio_pgsigio
.le_next; (&pr->ps_sigiolst)->lh_first = (sigio); (
sigio)->sio_pgsigio.le_prev = &(&pr->ps_sigiolst
)->lh_first; } while (0)
;
2366 } else {
2367 sigio->sio_pgrpsio_u.siu_pgrp = pgrp;
2368 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio)do { if (((sigio)->sio_pgsigio.le_next = (&pgrp->pg_sigiolst
)->lh_first) != ((void *)0)) (&pgrp->pg_sigiolst)->
lh_first->sio_pgsigio.le_prev = &(sigio)->sio_pgsigio
.le_next; (&pgrp->pg_sigiolst)->lh_first = (sigio);
(sigio)->sio_pgsigio.le_prev = &(&pgrp->pg_sigiolst
)->lh_first; } while (0)
;
2369 }
2370
2371 sigio_unlink(sir, &rmlist);
2372 sir->sir_sigio = sigio;
2373
2374 mtx_leave(&sigio_lock);
2375 KERNEL_UNLOCK()_kernel_unlock();
2376
2377 sigio_del(&rmlist);
2378
2379 return (0);
2380
2381fail:
2382 mtx_leave(&sigio_lock);
2383 KERNEL_UNLOCK()_kernel_unlock();
2384
2385 crfree(sigio->sio_ucred);
2386 free(sigio, M_SIGIO40, sizeof(*sigio));
2387
2388 return (error);
2389}
2390
2391void
2392sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2393{
2394 struct sigio *sigio;
2395 pid_t pgid = 0;
2396
2397 mtx_enter(&sigio_lock);
2398 sigio = sir->sir_sigio;
2399 if (sigio != NULL((void *)0))
2400 pgid = sigio->sio_pgid;
2401 mtx_leave(&sigio_lock);
2402
2403 if (cmd == TIOCGPGRP((unsigned long)0x40000000 | ((sizeof(int) & 0x1fff) <<
16) | ((('t')) << 8) | ((119)))
)
2404 pgid = -pgid;
2405
2406 *(int *)data = pgid;
2407}
2408
2409void
2410sigio_copy(struct sigio_ref *dst, struct sigio_ref *src)
2411{
2412 struct sigiolst rmlist;
2413 struct sigio *newsigio, *sigio;
2414
2415 sigio_free(dst);
2416
2417 if (src->sir_sigio == NULL((void *)0))
2418 return;
2419
2420 newsigio = malloc(sizeof(*newsigio), M_SIGIO40, M_WAITOK0x0001);
2421 LIST_INIT(&rmlist)do { ((&rmlist)->lh_first) = ((void *)0); } while (0);
2422
2423 mtx_enter(&sigio_lock);
2424
2425 sigio = src->sir_sigio;
2426 if (sigio == NULL((void *)0)) {
2427 mtx_leave(&sigio_lock);
2428 free(newsigio, M_SIGIO40, sizeof(*newsigio));
2429 return;
2430 }
2431
2432 newsigio->sio_pgid = sigio->sio_pgid;
2433 newsigio->sio_ucred = crhold(sigio->sio_ucred);
2434 newsigio->sio_myref = dst;
2435 if (newsigio->sio_pgid > 0) {
2436 newsigio->sio_procsio_u.siu_proc = sigio->sio_procsio_u.siu_proc;
2437 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio,do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_proc->ps_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first; } while
(0)
2438 sio_pgsigio)do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_proc->ps_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_proc->ps_sigiolst)->lh_first; } while
(0)
;
2439 } else {
2440 newsigio->sio_pgrpsio_u.siu_pgrp = sigio->sio_pgrpsio_u.siu_pgrp;
2441 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio,do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_pgrp->pg_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first; } while
(0)
2442 sio_pgsigio)do { if (((newsigio)->sio_pgsigio.le_next = (&newsigio
->sio_u.siu_pgrp->pg_sigiolst)->lh_first) != ((void *
)0)) (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
->sio_pgsigio.le_prev = &(newsigio)->sio_pgsigio.le_next
; (&newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first
= (newsigio); (newsigio)->sio_pgsigio.le_prev = &(&
newsigio->sio_u.siu_pgrp->pg_sigiolst)->lh_first; } while
(0)
;
2443 }
2444
2445 sigio_unlink(dst, &rmlist);
2446 dst->sir_sigio = newsigio;
2447
2448 mtx_leave(&sigio_lock);
2449
2450 sigio_del(&rmlist);
2451}