Bug Summary

File:kern/exec_elf.c
Warning:line 840, column 7
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name exec_elf.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/kern/exec_elf.c
1/* $OpenBSD: exec_elf.c,v 1.165 2021/12/09 00:26:10 guenther Exp $ */
2
3/*
4 * Copyright (c) 1996 Per Fogelstrom
5 * All rights reserved.
6 *
7 * Copyright (c) 1994 Christos Zoulas
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34/*
35 * Copyright (c) 2001 Wasabi Systems, Inc.
36 * All rights reserved.
37 *
38 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed for the NetBSD Project by
51 * Wasabi Systems, Inc.
52 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
53 * or promote products derived from this software without specific prior
54 * written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/proc.h>
73#include <sys/malloc.h>
74#include <sys/pool.h>
75#include <sys/mount.h>
76#include <sys/namei.h>
77#include <sys/vnode.h>
78#include <sys/core.h>
79#include <sys/syslog.h>
80#include <sys/exec.h>
81#include <sys/exec_elf.h>
82#include <sys/fcntl.h>
83#include <sys/ptrace.h>
84#include <sys/syscall.h>
85#include <sys/signalvar.h>
86#include <sys/stat.h>
87#include <sys/pledge.h>
88
89#include <sys/mman.h>
90
91#include <uvm/uvm_extern.h>
92
93#include <machine/reg.h>
94#include <machine/exec.h>
95
96int elf_load_file(struct proc *, char *, struct exec_package *,
97 struct elf_args *);
98int elf_check_header(Elf_EhdrElf64_Ehdr *);
99int elf_read_from(struct proc *, struct vnode *, u_long, void *, int);
100void elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
101 Elf_PhdrElf64_Phdr *, Elf_AddrElf64_Addr *, Elf_AddrElf64_Addr *, int *, int);
102int elf_os_pt_note_name(Elf_NoteElf64_Note *);
103int elf_os_pt_note(struct proc *, struct exec_package *, Elf_EhdrElf64_Ehdr *, int *);
104
105/* round up and down to page boundaries. */
106#define ELF_ROUND(a, b)(((a) + (b) - 1) & ~((b) - 1)) (((a) + (b) - 1) & ~((b) - 1))
107#define ELF_TRUNC(a, b)((a) & ~((b) - 1)) ((a) & ~((b) - 1))
108
109/*
110 * We limit the number of program headers to 32, this should
111 * be a reasonable limit for ELF, the most we have seen so far is 12
112 */
113#define ELF_MAX_VALID_PHDR32 32
114
115#define ELF_NOTE_NAME_OPENBSD0x01 0x01
116
117struct elf_note_name {
118 char *name;
119 int id;
120} elf_note_names[] = {
121 { "OpenBSD", ELF_NOTE_NAME_OPENBSD0x01 },
122};
123
124#define ELFROUNDSIZEsizeof(Elf64_Word) sizeof(Elf_WordElf64_Word)
125#define elfround(x)(((((x))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word)))*(sizeof
(Elf64_Word)))
roundup((x), ELFROUNDSIZE)(((((x))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word)))*(sizeof
(Elf64_Word)))
126
127
128/*
129 * Check header for validity; return 0 for ok, ENOEXEC if error
130 */
131int
132elf_check_header(Elf_EhdrElf64_Ehdr *ehdr)
133{
134 /*
135 * We need to check magic, class size, endianness, and version before
136 * we look at the rest of the Elf_Ehdr structure. These few elements
137 * are represented in a machine independent fashion.
138 */
139 if (!IS_ELF(*ehdr)((*ehdr).e_ident[0] == 0x7f && (*ehdr).e_ident[1] == 'E'
&& (*ehdr).e_ident[2] == 'L' && (*ehdr).e_ident
[3] == 'F')
||
140 ehdr->e_ident[EI_CLASS4] != ELF_TARG_CLASS2 ||
141 ehdr->e_ident[EI_DATA5] != ELF_TARG_DATA1 ||
142 ehdr->e_ident[EI_VERSION6] != ELF_TARG_VER1)
143 return (ENOEXEC8);
144
145 /* Now check the machine dependent header */
146 if (ehdr->e_machine != ELF_TARG_MACH62 ||
147 ehdr->e_version != ELF_TARG_VER1)
148 return (ENOEXEC8);
149
150 /* Don't allow an insane amount of sections. */
151 if (ehdr->e_phnum > ELF_MAX_VALID_PHDR32)
152 return (ENOEXEC8);
153
154 return (0);
155}
156
157/*
158 * Load a psection at the appropriate address
159 */
160void
161elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
162 Elf_PhdrElf64_Phdr *ph, Elf_AddrElf64_Addr *addr, Elf_AddrElf64_Addr *size, int *prot, int flags)
163{
164 u_long msize, lsize, psize, rm, rf;
165 long diff, offset, bdiff;
166 Elf_AddrElf64_Addr base;
167
168 /*
169 * If the user specified an address, then we load there.
170 */
171 if (*addr != ELF_NO_ADDR((__uint64_t) ~0)) {
172 if (ph->p_align > 1) {
173 *addr = ELF_TRUNC(*addr, ph->p_align)((*addr) & ~((ph->p_align) - 1));
174 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align)((ph->p_vaddr) & ~((ph->p_align) - 1));
175 /* page align vaddr */
176 base = *addr + trunc_page(ph->p_vaddr)((ph->p_vaddr) & ~((1 << 12) - 1))
177 - ELF_TRUNC(ph->p_vaddr, ph->p_align)((ph->p_vaddr) & ~((ph->p_align) - 1));
178 } else {
179 diff = 0;
180 base = *addr + trunc_page(ph->p_vaddr)((ph->p_vaddr) & ~((1 << 12) - 1)) - ph->p_vaddr;
181 }
182 } else {
183 *addr = ph->p_vaddr;
184 if (ph->p_align > 1)
185 *addr = ELF_TRUNC(*addr, ph->p_align)((*addr) & ~((ph->p_align) - 1));
186 base = trunc_page(ph->p_vaddr)((ph->p_vaddr) & ~((1 << 12) - 1));
187 diff = ph->p_vaddr - *addr;
188 }
189 bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr)((ph->p_vaddr) & ~((1 << 12) - 1));
190
191 /*
192 * Enforce W^X and map W|X segments without X permission
193 * initially. The dynamic linker will make these read-only
194 * and add back X permission after relocation processing.
195 * Static executables with W|X segments will probably crash.
196 */
197 *prot |= (ph->p_flags & PF_R0x4) ? PROT_READ0x01 : 0;
198 *prot |= (ph->p_flags & PF_W0x2) ? PROT_WRITE0x02 : 0;
199 if ((ph->p_flags & PF_W0x2) == 0)
200 *prot |= (ph->p_flags & PF_X0x1) ? PROT_EXEC0x04 : 0;
201
202 msize = ph->p_memsz + diff;
203 offset = ph->p_offset - bdiff;
204 lsize = ph->p_filesz + bdiff;
205 psize = round_page(lsize)(((lsize) + ((1 << 12) - 1)) & ~((1 << 12) - 1
))
;
206
207 /*
208 * Because the pagedvn pager can't handle zero fill of the last
209 * data page if it's not page aligned we map the last page readvn.
210 */
211 if (ph->p_flags & PF_W0x2) {
212 psize = trunc_page(lsize)((lsize) & ~((1 << 12) - 1));
213 if (psize > 0)
214 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp,do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_pagedvn); __vcp->ev_len = (psize); __vcp->ev_addr
= (base); if ((__vcp->ev_vp = (vp)) != ((struct vnode *)(
(void *)0))) vref(vp); __vcp->ev_offset = (offset); __vcp->
ev_prot = (*prot); __vcp->ev_flags = (flags); } while (0)
215 offset, *prot, flags)do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_pagedvn); __vcp->ev_len = (psize); __vcp->ev_addr
= (base); if ((__vcp->ev_vp = (vp)) != ((struct vnode *)(
(void *)0))) vref(vp); __vcp->ev_offset = (offset); __vcp->
ev_prot = (*prot); __vcp->ev_flags = (flags); } while (0)
;
216 if (psize != lsize) {
217 NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize,do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_readvn); __vcp->ev_len = (lsize - psize); __vcp->
ev_addr = (base + psize); if ((__vcp->ev_vp = (vp)) != ((struct
vnode *)((void *)0))) vref(vp); __vcp->ev_offset = (offset
+ psize); __vcp->ev_prot = (*prot); __vcp->ev_flags = (
flags); } while (0)
218 base + psize, vp, offset + psize, *prot, flags)do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_readvn); __vcp->ev_len = (lsize - psize); __vcp->
ev_addr = (base + psize); if ((__vcp->ev_vp = (vp)) != ((struct
vnode *)((void *)0))) vref(vp); __vcp->ev_offset = (offset
+ psize); __vcp->ev_prot = (*prot); __vcp->ev_flags = (
flags); } while (0)
;
219 }
220 } else {
221 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset,do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_pagedvn); __vcp->ev_len = (psize); __vcp->ev_addr
= (base); if ((__vcp->ev_vp = (vp)) != ((struct vnode *)(
(void *)0))) vref(vp); __vcp->ev_offset = (offset); __vcp->
ev_prot = (*prot); __vcp->ev_flags = (flags); } while (0)
222 *prot, flags)do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_pagedvn); __vcp->ev_len = (psize); __vcp->ev_addr
= (base); if ((__vcp->ev_vp = (vp)) != ((struct vnode *)(
(void *)0))) vref(vp); __vcp->ev_offset = (offset); __vcp->
ev_prot = (*prot); __vcp->ev_flags = (flags); } while (0)
;
223 }
224
225 /*
226 * Check if we need to extend the size of the segment
227 */
228 rm = round_page(*addr + ph->p_memsz + diff)(((*addr + ph->p_memsz + diff) + ((1 << 12) - 1)) &
~((1 << 12) - 1))
;
229 rf = round_page(*addr + ph->p_filesz + diff)(((*addr + ph->p_filesz + diff) + ((1 << 12) - 1)) &
~((1 << 12) - 1))
;
230
231 if (rm != rf) {
232 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0,do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_zero); __vcp->ev_len = (rm - rf); __vcp->ev_addr
= (rf); if ((__vcp->ev_vp = (((struct vnode *)((void *)0)
))) != ((struct vnode *)((void *)0))) vref(((struct vnode *)(
(void *)0))); __vcp->ev_offset = (0); __vcp->ev_prot = (
*prot); __vcp->ev_flags = (flags); } while (0)
233 *prot, flags)do { struct exec_vmcmd *__vcp; if ((vcset)->evs_used >=
(vcset)->evs_cnt) vmcmdset_extend(vcset); __vcp = &(vcset
)->evs_cmds[(vcset)->evs_used++]; __vcp->ev_proc = (
vmcmd_map_zero); __vcp->ev_len = (rm - rf); __vcp->ev_addr
= (rf); if ((__vcp->ev_vp = (((struct vnode *)((void *)0)
))) != ((struct vnode *)((void *)0))) vref(((struct vnode *)(
(void *)0))); __vcp->ev_offset = (0); __vcp->ev_prot = (
*prot); __vcp->ev_flags = (flags); } while (0)
;
234 }
235 *size = msize;
236}
237
238/*
239 * Read from vnode into buffer at offset.
240 */
241int
242elf_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf,
243 int size)
244{
245 int error;
246 size_t resid;
247
248 if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
249 0, p->p_ucred, &resid, p)) != 0)
250 return error;
251 /*
252 * See if we got all of it
253 */
254 if (resid != 0)
255 return (ENOEXEC8);
256 return (0);
257}
258
259/*
260 * Load a file (interpreter/library) pointed to by path [stolen from
261 * coff_load_shlib()]. Made slightly generic so it might be used externally.
262 */
263int
264elf_load_file(struct proc *p, char *path, struct exec_package *epp,
265 struct elf_args *ap)
266{
267 int error, i;
268 struct nameidata nd;
269 Elf_EhdrElf64_Ehdr eh;
270 Elf_PhdrElf64_Phdr *ph = NULL((void *)0);
271 u_long phsize = 0;
272 Elf_AddrElf64_Addr addr;
273 struct vnode *vp;
274 Elf_PhdrElf64_Phdr *base_ph = NULL((void *)0);
275 struct interp_ld_sec {
276 Elf_AddrElf64_Addr vaddr;
277 u_long memsz;
278 } loadmap[ELF_MAX_VALID_PHDR32];
279 int nload, idx = 0;
280 Elf_AddrElf64_Addr pos;
281 int file_align;
282 int loop;
283 size_t randomizequota = ELF_RANDOMIZE_LIMIT1024*1024;
284
285 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p)ndinitat(&nd, 0, 0x0040 | 0x0004, UIO_SYSSPACE, -100, path
, p)
;
286 nd.ni_pledge = PLEDGE_RPATH0x0000000000000001ULL;
287 nd.ni_unveil = UNVEIL_READ0x01;
288 if ((error = namei(&nd)) != 0) {
289 return (error);
290 }
291 vp = nd.ni_vp;
292 if (vp->v_type != VREG) {
293 error = EACCES13;
294 goto bad;
295 }
296 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
297 goto bad;
298 if (vp->v_mount->mnt_flag & MNT_NOEXEC0x00000004) {
299 error = EACCES13;
300 goto bad;
301 }
302 if ((error = VOP_ACCESS(vp, VREAD00400, p->p_ucred, p)) != 0)
303 goto bad1;
304 if ((error = elf_read_from(p, nd.ni_vp, 0, &eh, sizeof(eh))) != 0)
305 goto bad1;
306
307 if (elf_check_header(&eh) || eh.e_type != ET_DYN3) {
308 error = ENOEXEC8;
309 goto bad1;
310 }
311
312 ph = mallocarray(eh.e_phnum, sizeof(Elf_PhdrElf64_Phdr), M_TEMP127, M_WAITOK0x0001);
313 phsize = eh.e_phnum * sizeof(Elf_PhdrElf64_Phdr);
314
315 if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff, ph, phsize)) != 0)
316 goto bad1;
317
318 for (i = 0; i < eh.e_phnum; i++) {
319 if (ph[i].p_type == PT_LOAD1) {
320 if (ph[i].p_filesz > ph[i].p_memsz ||
321 ph[i].p_memsz == 0) {
322 error = EINVAL22;
323 goto bad1;
324 }
325 loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr)((ph[i].p_vaddr) & ~((1 << 12) - 1));
326 loadmap[idx].memsz = round_page (ph[i].p_vaddr +(((ph[i].p_vaddr + ph[i].p_memsz - loadmap[idx].vaddr) + ((1 <<
12) - 1)) & ~((1 << 12) - 1))
327 ph[i].p_memsz - loadmap[idx].vaddr)(((ph[i].p_vaddr + ph[i].p_memsz - loadmap[idx].vaddr) + ((1 <<
12) - 1)) & ~((1 << 12) - 1))
;
328 file_align = ph[i].p_align;
329 idx++;
330 }
331 }
332 nload = idx;
333
334 /*
335 * Load the interpreter where a non-fixed mmap(NULL, ...)
336 * would (i.e. something safely out of the way).
337 */
338 pos = uvm_map_hint(p->p_vmspace, PROT_EXEC0x04, VM_MIN_ADDRESS(1 << 12),
339 VM_MAXUSER_ADDRESS0x00007f7fffffc000);
340 pos = ELF_ROUND(pos, file_align)(((pos) + (file_align) - 1) & ~((file_align) - 1));
341
342 loop = 0;
343 for (i = 0; i < nload;/**/) {
344 vaddr_t addr;
345 struct uvm_object *uobj;
346 off_t uoff;
347 size_t size;
348
349#ifdef this_needs_fixing
350 if (i == 0) {
351 uobj = &vp->v_uvm.u_obj;
352 /* need to fix uoff */
353 } else {
354#endif
355 uobj = NULL((void *)0);
356 uoff = 0;
357#ifdef this_needs_fixing
358 }
359#endif
360
361 addr = trunc_page(pos + loadmap[i].vaddr)((pos + loadmap[i].vaddr) & ~((1 << 12) - 1));
362 size = round_page(addr + loadmap[i].memsz)(((addr + loadmap[i].memsz) + ((1 << 12) - 1)) & ~(
(1 << 12) - 1))
- addr;
363
364 /* CRAP - map_findspace does not avoid daddr+BRKSIZ */
365 if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) &&
366 (addr < (vaddr_t)p->p_vmspace->vm_daddr + BRKSIZ((paddr_t)8*1024*1024*1024)))
367 addr = round_page((vaddr_t)p->p_vmspace->vm_daddr +((((vaddr_t)p->p_vmspace->vm_daddr + ((paddr_t)8*1024*1024
*1024)) + ((1 << 12) - 1)) & ~((1 << 12) - 1)
)
368 BRKSIZ)((((vaddr_t)p->p_vmspace->vm_daddr + ((paddr_t)8*1024*1024
*1024)) + ((1 << 12) - 1)) & ~((1 << 12) - 1)
)
;
369
370 if (uvm_map_mquery(&p->p_vmspace->vm_map, &addr, size,
371 (i == 0 ? uoff : UVM_UNKNOWN_OFFSET((voff_t) -1)), 0) != 0) {
372 if (loop == 0) {
373 loop = 1;
374 i = 0;
375 pos = 0;
376 continue;
377 }
378 error = ENOMEM12;
379 goto bad1;
380 }
381 if (addr != pos + loadmap[i].vaddr) {
382 /* base changed. */
383 pos = addr - trunc_page(loadmap[i].vaddr)((loadmap[i].vaddr) & ~((1 << 12) - 1));
384 pos = ELF_ROUND(pos,file_align)(((pos) + (file_align) - 1) & ~((file_align) - 1));
385 i = 0;
386 continue;
387 }
388
389 i++;
390 }
391
392 /*
393 * Load all the necessary sections
394 */
395 for (i = 0; i < eh.e_phnum; i++) {
396 Elf_AddrElf64_Addr size = 0;
397 int prot = 0;
398 int flags;
399
400 switch (ph[i].p_type) {
401 case PT_LOAD1:
402 if (base_ph == NULL((void *)0)) {
403 flags = VMCMD_BASE0x0002;
404 addr = pos;
405 base_ph = &ph[i];
406 } else {
407 flags = VMCMD_RELATIVE0x0001;
408 addr = ph[i].p_vaddr - base_ph->p_vaddr;
409 }
410 elf_load_psection(&epp->ep_vmcmds, nd.ni_vp,
411 &ph[i], &addr, &size, &prot, flags | VMCMD_SYSCALL0x0008);
412 /* If entry is within this section it must be text */
413 if (eh.e_entry >= ph[i].p_vaddr &&
414 eh.e_entry < (ph[i].p_vaddr + size)) {
415 epp->ep_entry = addr + eh.e_entry -
416 ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align)((ph[i].p_vaddr) & ~((ph[i].p_align) - 1));
417 if (flags == VMCMD_RELATIVE0x0001)
418 epp->ep_entry += pos;
419 ap->arg_interp = pos;
420 }
421 addr += size;
422 break;
423
424 case PT_DYNAMIC2:
425 case PT_PHDR6:
426 case PT_NOTE4:
427 break;
428
429 case PT_OPENBSD_RANDOMIZE0x65a3dbe6:
430 if (ph[i].p_memsz > randomizequota) {
431 error = ENOMEM12;
432 goto bad1;
433 }
434 randomizequota -= ph[i].p_memsz;
435 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,do { struct exec_vmcmd *__vcp; if ((&epp->ep_vmcmds)->
evs_used >= (&epp->ep_vmcmds)->evs_cnt) vmcmdset_extend
(&epp->ep_vmcmds); __vcp = &(&epp->ep_vmcmds
)->evs_cmds[(&epp->ep_vmcmds)->evs_used++]; __vcp
->ev_proc = (vmcmd_randomize); __vcp->ev_len = (ph[i].p_memsz
); __vcp->ev_addr = (ph[i].p_vaddr + pos); if ((__vcp->
ev_vp = (((struct vnode *)((void *)0)))) != ((struct vnode *)
((void *)0))) vref(((struct vnode *)((void *)0))); __vcp->
ev_offset = (0); __vcp->ev_prot = (0); __vcp->ev_flags =
(0); } while (0)
436 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0)do { struct exec_vmcmd *__vcp; if ((&epp->ep_vmcmds)->
evs_used >= (&epp->ep_vmcmds)->evs_cnt) vmcmdset_extend
(&epp->ep_vmcmds); __vcp = &(&epp->ep_vmcmds
)->evs_cmds[(&epp->ep_vmcmds)->evs_used++]; __vcp
->ev_proc = (vmcmd_randomize); __vcp->ev_len = (ph[i].p_memsz
); __vcp->ev_addr = (ph[i].p_vaddr + pos); if ((__vcp->
ev_vp = (((struct vnode *)((void *)0)))) != ((struct vnode *)
((void *)0))) vref(((struct vnode *)((void *)0))); __vcp->
ev_offset = (0); __vcp->ev_prot = (0); __vcp->ev_flags =
(0); } while (0)
;
437 break;
438
439 default:
440 break;
441 }
442 }
443
444 vn_marktext(nd.ni_vp);
445
446bad1:
447 VOP_CLOSE(nd.ni_vp, FREAD0x0001, p->p_ucred, p);
448bad:
449 free(ph, M_TEMP127, phsize);
450
451 vput(nd.ni_vp);
452 return (error);
453}
454
455/*
456 * Prepare an Elf binary's exec package
457 *
458 * First, set of the various offsets/lengths in the exec package.
459 *
460 * Then, mark the text image busy (so it can be demand paged) or error out if
461 * this is not possible. Finally, set up vmcmds for the text, data, bss, and
462 * stack segments.
463 */
464int
465exec_elf_makecmds(struct proc *p, struct exec_package *epp)
466{
467 Elf_EhdrElf64_Ehdr *eh = epp->ep_hdr;
468 Elf_PhdrElf64_Phdr *ph, *pp, *base_ph = NULL((void *)0);
469 Elf_AddrElf64_Addr phdr = 0, exe_base = 0;
470 int error, i, has_phdr = 0, names = 0;
471 char *interp = NULL((void *)0);
472 u_long phsize;
473 size_t randomizequota = ELF_RANDOMIZE_LIMIT1024*1024;
474
475 if (epp->ep_hdrvalid < sizeof(Elf_EhdrElf64_Ehdr))
476 return (ENOEXEC8);
477
478 if (elf_check_header(eh) ||
479 (eh->e_type != ET_EXEC2 && eh->e_type != ET_DYN3))
480 return (ENOEXEC8);
481
482 /*
483 * check if vnode is in open for writing, because we want to demand-
484 * page out of it. if it is, don't do it, for various reasons.
485 */
486 if (epp->ep_vp->v_writecount != 0) {
487#ifdef DIAGNOSTIC1
488 if (epp->ep_vp->v_flag & VTEXT0x0002)
489 panic("exec: a VTEXT vnode has writecount != 0");
490#endif
491 return (ETXTBSY26);
492 }
493 /*
494 * Allocate space to hold all the program headers, and read them
495 * from the file
496 */
497 ph = mallocarray(eh->e_phnum, sizeof(Elf_PhdrElf64_Phdr), M_TEMP127, M_WAITOK0x0001);
498 phsize = eh->e_phnum * sizeof(Elf_PhdrElf64_Phdr);
499
500 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, ph,
501 phsize)) != 0)
502 goto bad;
503
504 epp->ep_tsize = ELF_NO_ADDR((__uint64_t) ~0);
505 epp->ep_dsize = ELF_NO_ADDR((__uint64_t) ~0);
506
507 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
508 if (pp->p_type == PT_INTERP3 && !interp) {
509 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN1024)
510 goto bad;
511 interp = pool_get(&namei_pool, PR_WAITOK0x0001);
512 if ((error = elf_read_from(p, epp->ep_vp,
513 pp->p_offset, interp, pp->p_filesz)) != 0) {
514 goto bad;
515 }
516 if (interp[pp->p_filesz - 1] != '\0')
517 goto bad;
518 } else if (pp->p_type == PT_LOAD1) {
519 if (pp->p_filesz > pp->p_memsz ||
520 pp->p_memsz == 0) {
521 error = EINVAL22;
522 goto bad;
523 }
524 if (base_ph == NULL((void *)0))
525 base_ph = pp;
526 } else if (pp->p_type == PT_PHDR6) {
527 has_phdr = 1;
528 }
529 }
530
531 if (eh->e_type == ET_DYN3) {
532 /* need phdr and load sections for PIE */
533 if (!has_phdr || base_ph == NULL((void *)0)) {
534 error = EINVAL22;
535 goto bad;
536 }
537 /* randomize exe_base for PIE */
538 exe_base = uvm_map_pie(base_ph->p_align);
539 }
540
541 /*
542 * Verify this is an OpenBSD executable. If it's marked that way
543 * via a PT_NOTE then also check for a PT_OPENBSD_WXNEEDED segment.
544 */
545 if ((error = elf_os_pt_note(p, epp, epp->ep_hdr, &names)) != 0)
546 goto bad;
547 if (eh->e_ident[EI_OSABI7] == ELFOSABI_OPENBSD12)
548 names |= ELF_NOTE_NAME_OPENBSD0x01;
549
550 /*
551 * Load all the necessary sections
552 */
553 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
554 Elf_AddrElf64_Addr addr, size = 0;
555 int prot = 0;
556 int flags = 0;
557
558 switch (pp->p_type) {
559 case PT_LOAD1:
560 if (exe_base != 0) {
561 if (pp == base_ph) {
562 flags = VMCMD_BASE0x0002;
563 addr = exe_base;
564 } else {
565 flags = VMCMD_RELATIVE0x0001;
566 addr = pp->p_vaddr - base_ph->p_vaddr;
567 }
568 } else
569 addr = ELF_NO_ADDR((__uint64_t) ~0);
570
571 /* Permit system calls in specific main-programs */
572 if (interp == NULL((void *)0)) {
573 /* statics. Also block the ld.so syscall-grant */
574 flags |= VMCMD_SYSCALL0x0008;
575 p->p_vmspace->vm_map.flags |= VM_MAP_SYSCALL_ONCE0x80;
576 }
577
578 /*
579 * Calculates size of text and data segments
580 * by starting at first and going to end of last.
581 * 'rwx' sections are treated as data.
582 * this is correct for BSS_PLT, but may not be
583 * for DATA_PLT, is fine for TEXT_PLT.
584 */
585 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
586 pp, &addr, &size, &prot, flags);
587
588 /*
589 * Update exe_base in case alignment was off.
590 * For PIE, addr is relative to exe_base so
591 * adjust it (non PIE exe_base is 0 so no change).
592 */
593 if (flags == VMCMD_BASE0x0002)
594 exe_base = addr;
595 else
596 addr += exe_base;
597
598 /*
599 * Decide whether it's text or data by looking
600 * at the protection of the section
601 */
602 if (prot & PROT_WRITE0x02) {
603 /* data section */
604 if (epp->ep_dsize == ELF_NO_ADDR((__uint64_t) ~0)) {
605 epp->ep_daddr = addr;
606 epp->ep_dsize = size;
607 } else {
608 if (addr < epp->ep_daddr) {
609 epp->ep_dsize =
610 epp->ep_dsize +
611 epp->ep_daddr -
612 addr;
613 epp->ep_daddr = addr;
614 } else
615 epp->ep_dsize = addr+size -
616 epp->ep_daddr;
617 }
618 } else if (prot & PROT_EXEC0x04) {
619 /* text section */
620 if (epp->ep_tsize == ELF_NO_ADDR((__uint64_t) ~0)) {
621 epp->ep_taddr = addr;
622 epp->ep_tsize = size;
623 } else {
624 if (addr < epp->ep_taddr) {
625 epp->ep_tsize =
626 epp->ep_tsize +
627 epp->ep_taddr -
628 addr;
629 epp->ep_taddr = addr;
630 } else
631 epp->ep_tsize = addr+size -
632 epp->ep_taddr;
633 }
634 }
635 break;
636
637 case PT_SHLIB5:
638 error = ENOEXEC8;
639 goto bad;
640
641 case PT_INTERP3:
642 /* Already did this one */
643 case PT_DYNAMIC2:
644 case PT_NOTE4:
645 break;
646
647 case PT_PHDR6:
648 /* Note address of program headers (in text segment) */
649 phdr = pp->p_vaddr;
650 break;
651
652 case PT_OPENBSD_RANDOMIZE0x65a3dbe6:
653 if (ph[i].p_memsz > randomizequota) {
654 error = ENOMEM12;
655 goto bad;
656 }
657 randomizequota -= ph[i].p_memsz;
658 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,do { struct exec_vmcmd *__vcp; if ((&epp->ep_vmcmds)->
evs_used >= (&epp->ep_vmcmds)->evs_cnt) vmcmdset_extend
(&epp->ep_vmcmds); __vcp = &(&epp->ep_vmcmds
)->evs_cmds[(&epp->ep_vmcmds)->evs_used++]; __vcp
->ev_proc = (vmcmd_randomize); __vcp->ev_len = (ph[i].p_memsz
); __vcp->ev_addr = (ph[i].p_vaddr + exe_base); if ((__vcp
->ev_vp = (((struct vnode *)((void *)0)))) != ((struct vnode
*)((void *)0))) vref(((struct vnode *)((void *)0))); __vcp->
ev_offset = (0); __vcp->ev_prot = (0); __vcp->ev_flags =
(0); } while (0)
659 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0)do { struct exec_vmcmd *__vcp; if ((&epp->ep_vmcmds)->
evs_used >= (&epp->ep_vmcmds)->evs_cnt) vmcmdset_extend
(&epp->ep_vmcmds); __vcp = &(&epp->ep_vmcmds
)->evs_cmds[(&epp->ep_vmcmds)->evs_used++]; __vcp
->ev_proc = (vmcmd_randomize); __vcp->ev_len = (ph[i].p_memsz
); __vcp->ev_addr = (ph[i].p_vaddr + exe_base); if ((__vcp
->ev_vp = (((struct vnode *)((void *)0)))) != ((struct vnode
*)((void *)0))) vref(((struct vnode *)((void *)0))); __vcp->
ev_offset = (0); __vcp->ev_prot = (0); __vcp->ev_flags =
(0); } while (0)
;
660 break;
661
662 default:
663 /*
664 * Not fatal, we don't need to understand everything
665 * :-)
666 */
667 break;
668 }
669 }
670
671 phdr += exe_base;
672
673 /*
674 * Strangely some linux programs may have all load sections marked
675 * writeable, in this case, textsize is not -1, but rather 0;
676 */
677 if (epp->ep_tsize == ELF_NO_ADDR((__uint64_t) ~0))
678 epp->ep_tsize = 0;
679 /*
680 * Another possibility is that it has all load sections marked
681 * read-only. Fake a zero-sized data segment right after the
682 * text segment.
683 */
684 if (epp->ep_dsize == ELF_NO_ADDR((__uint64_t) ~0)) {
685 epp->ep_daddr = round_page(epp->ep_taddr + epp->ep_tsize)(((epp->ep_taddr + epp->ep_tsize) + ((1 << 12) - 1
)) & ~((1 << 12) - 1))
;
686 epp->ep_dsize = 0;
687 }
688
689 epp->ep_interp = interp;
690 epp->ep_entry = eh->e_entry + exe_base;
691
692 /*
693 * Check if we found a dynamically linked binary and arrange to load
694 * its interpreter when the exec file is released.
695 */
696 if (interp || eh->e_type == ET_DYN3) {
697 struct elf_args *ap;
698
699 ap = malloc(sizeof(*ap), M_TEMP127, M_WAITOK0x0001);
700
701 ap->arg_phaddr = phdr;
702 ap->arg_phentsize = eh->e_phentsize;
703 ap->arg_phnum = eh->e_phnum;
704 ap->arg_entry = eh->e_entry + exe_base;
705 ap->arg_interp = exe_base;
706
707 epp->ep_args = ap;
708 }
709
710 free(ph, M_TEMP127, phsize);
711 vn_marktext(epp->ep_vp);
712 return (exec_setup_stack(p, epp));
713
714bad:
715 if (interp)
716 pool_put(&namei_pool, interp);
717 free(ph, M_TEMP127, phsize);
718 kill_vmcmds(&epp->ep_vmcmds);
719 if (error == 0)
720 return (ENOEXEC8);
721 return (error);
722}
723
724/*
725 * Phase II of load. It is now safe to load the interpreter. Info collected
726 * when loading the program is available for setup of the interpreter.
727 */
728int
729exec_elf_fixup(struct proc *p, struct exec_package *epp)
730{
731 char *interp;
732 int error = 0;
733 struct elf_args *ap;
734 AuxInfoAux64Info ai[ELF_AUX_ENTRIES9], *a;
735
736 ap = epp->ep_args;
737 if (ap == NULL((void *)0)) {
738 return (0);
739 }
740
741 interp = epp->ep_interp;
742
743 if (interp &&
744 (error = elf_load_file(p, interp, epp, ap)) != 0) {
745 free(ap, M_TEMP127, sizeof *ap);
746 pool_put(&namei_pool, interp);
747 kill_vmcmds(&epp->ep_vmcmds);
748 return (error);
749 }
750 /*
751 * We have to do this ourselves...
752 */
753 error = exec_process_vmcmds(p, epp);
754
755 /*
756 * Push extra arguments on the stack needed by dynamically
757 * linked binaries
758 */
759 if (error == 0) {
760 memset(&ai, 0, sizeof ai)__builtin_memset((&ai), (0), (sizeof ai));
761 a = ai;
762
763 a->au_id = AUX_phdr;
764 a->au_v = ap->arg_phaddr;
765 a++;
766
767 a->au_id = AUX_phent;
768 a->au_v = ap->arg_phentsize;
769 a++;
770
771 a->au_id = AUX_phnum;
772 a->au_v = ap->arg_phnum;
773 a++;
774
775 a->au_id = AUX_pagesz;
776 a->au_v = PAGE_SIZE(1 << 12);
777 a++;
778
779 a->au_id = AUX_base;
780 a->au_v = ap->arg_interp;
781 a++;
782
783 a->au_id = AUX_flags;
784 a->au_v = 0;
785 a++;
786
787 a->au_id = AUX_entry;
788 a->au_v = ap->arg_entry;
789 a++;
790
791 a->au_id = AUX_openbsd_timekeep;
792 a->au_v = p->p_p->ps_timekeep;
793 a++;
794
795 a->au_id = AUX_null;
796 a->au_v = 0;
797 a++;
798
799 error = copyout(ai, epp->ep_auxinfo, sizeof ai);
800 }
801 free(ap, M_TEMP127, sizeof *ap);
802 if (interp)
803 pool_put(&namei_pool, interp);
804 return (error);
805}
806
807int
808elf_os_pt_note_name(Elf_NoteElf64_Note *np)
809{
810 int i, j;
811
812 for (i = 0; i < nitems(elf_note_names)(sizeof((elf_note_names)) / sizeof((elf_note_names)[0])); i++) {
813 size_t namlen = strlen(elf_note_names[i].name);
814 if (np->namesz < namlen)
815 continue;
816 /* verify name padding (after the NUL) is NUL */
817 for (j = namlen + 1; j < elfround(np->namesz)(((((np->namesz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
; j++)
818 if (((char *)(np + 1))[j] != '\0')
819 continue;
820 /* verify desc padding is NUL */
821 for (j = np->descsz; j < elfround(np->descsz)(((((np->descsz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
; j++)
822 if (((char *)(np + 1))[j] != '\0')
823 continue;
824 if (strcmp((char *)(np + 1), elf_note_names[i].name) == 0)
825 return elf_note_names[i].id;
826 }
827 return (0);
828}
829
830int
831elf_os_pt_note(struct proc *p, struct exec_package *epp, Elf_EhdrElf64_Ehdr *eh, int *namesp)
832{
833 Elf_PhdrElf64_Phdr *hph, *ph;
834 Elf_NoteElf64_Note *np = NULL((void *)0);
835 size_t phsize, offset, pfilesz = 0, total;
836 int error, names = 0;
837
838 hph = mallocarray(eh->e_phnum, sizeof(Elf_PhdrElf64_Phdr), M_TEMP127, M_WAITOK0x0001);
839 phsize = eh->e_phnum * sizeof(Elf_PhdrElf64_Phdr);
840 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
841 hph, phsize)) != 0)
842 goto out1;
843
844 for (ph = hph; ph < &hph[eh->e_phnum]; ph++) {
845 if (ph->p_type == PT_OPENBSD_WXNEEDED0x65a3dbe7) {
846 epp->ep_flags |= EXEC_WXNEEDED0x0020;
847 continue;
848 }
849
850 if (ph->p_type != PT_NOTE4 || ph->p_filesz > 1024)
851 continue;
852
853 if (np && ph->p_filesz != pfilesz) {
854 free(np, M_TEMP127, pfilesz);
855 np = NULL((void *)0);
856 }
857 if (!np)
858 np = malloc(ph->p_filesz, M_TEMP127, M_WAITOK0x0001);
859 pfilesz = ph->p_filesz;
860 if ((error = elf_read_from(p, epp->ep_vp, ph->p_offset,
861 np, ph->p_filesz)) != 0)
862 goto out2;
863
864 for (offset = 0; offset < ph->p_filesz; offset += total) {
865 Elf_NoteElf64_Note *np2 = (Elf_NoteElf64_Note *)((char *)np + offset);
866
867 if (offset + sizeof(Elf_NoteElf64_Note) > ph->p_filesz)
868 break;
869 total = sizeof(Elf_NoteElf64_Note) + elfround(np2->namesz)(((((np2->namesz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
+
870 elfround(np2->descsz)(((((np2->descsz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
;
871 if (offset + total > ph->p_filesz)
872 break;
873 names |= elf_os_pt_note_name(np2);
874 }
875 }
876
877out2:
878 free(np, M_TEMP127, pfilesz);
879out1:
880 free(hph, M_TEMP127, phsize);
881 *namesp = names;
882 return ((names & ELF_NOTE_NAME_OPENBSD0x01) ? 0 : ENOEXEC8);
883}
884
885/*
886 * Start of routines related to dumping core
887 */
888
889#ifdef SMALL_KERNEL
890int
891coredump_elf(struct proc *p, void *cookie)
892{
893 return EPERM1;
894}
895#else /* !SMALL_KERNEL */
896
897struct writesegs_state {
898 off_t notestart;
899 off_t secstart;
900 off_t secoff;
901 struct proc *p;
902 void *iocookie;
903 Elf_PhdrElf64_Phdr *psections;
904 size_t psectionslen;
905 size_t notesize;
906 int npsections;
907};
908
909uvm_coredump_setup_cb coredump_setup_elf;
910uvm_coredump_walk_cb coredump_walk_elf;
911
912int coredump_notes_elf(struct proc *, void *, size_t *);
913int coredump_note_elf(struct proc *, void *, size_t *);
914int coredump_writenote_elf(struct proc *, void *, Elf_NoteElf64_Note *,
915 const char *, void *);
916
917int
918coredump_elf(struct proc *p, void *cookie)
919{
920#ifdef DIAGNOSTIC1
921 off_t offset;
922#endif
923 struct writesegs_state ws;
924 size_t notesize;
925 int error, i;
926
927 ws.p = p;
928 ws.iocookie = cookie;
929 ws.psections = NULL((void *)0);
930
931 /*
932 * Walk the map to get all the segment offsets and lengths,
933 * write out the ELF header.
934 */
935 error = uvm_coredump_walkmap(p, coredump_setup_elf,
936 coredump_walk_elf, &ws);
937 if (error)
938 goto out;
939
940 error = coredump_write(cookie, UIO_SYSSPACE, ws.psections,
941 ws.psectionslen);
942 if (error)
943 goto out;
944
945 /* Write out the notes. */
946 error = coredump_notes_elf(p, cookie, &notesize);
947 if (error)
948 goto out;
949
950#ifdef DIAGNOSTIC1
951 if (notesize != ws.notesize)
952 panic("coredump: notesize changed: %zu != %zu",
953 ws.notesize, notesize);
954 offset = ws.notestart + notesize;
955 if (offset != ws.secstart)
956 panic("coredump: offset %lld != secstart %lld",
957 (long long) offset, (long long) ws.secstart);
958#endif
959
960 /* Pass 3: finally, write the sections themselves. */
961 for (i = 0; i < ws.npsections - 1; i++) {
962 Elf_PhdrElf64_Phdr *pent = &ws.psections[i];
963 if (pent->p_filesz == 0)
964 continue;
965
966#ifdef DIAGNOSTIC1
967 if (offset != pent->p_offset)
968 panic("coredump: offset %lld != p_offset[%d] %lld",
969 (long long) offset, i,
970 (long long) pent->p_filesz);
971#endif
972
973 error = coredump_write(cookie, UIO_USERSPACE,
974 (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz);
975 if (error)
976 goto out;
977
978 coredump_unmap(cookie, (vaddr_t)pent->p_vaddr,
979 (vaddr_t)pent->p_vaddr + pent->p_filesz);
980
981#ifdef DIAGNOSTIC1
982 offset += ws.psections[i].p_filesz;
983#endif
984 }
985
986out:
987 free(ws.psections, M_TEMP127, ws.psectionslen);
988 return (error);
989}
990
991
992/*
993 * Normally we lay out core files like this:
994 * [ELF Header] [Program headers] [Notes] [data for PT_LOAD segments]
995 *
996 * However, if there's >= 65535 segments then it overflows the field
997 * in the ELF header, so the standard specifies putting a magic
998 * number there and saving the real count in the .sh_info field of
999 * the first *section* header...which requires generating a section
1000 * header. To avoid confusing tools, we include an .shstrtab section
1001 * as well so all the indexes look valid. So in this case we lay
1002 * out the core file like this:
1003 * [ELF Header] [Section Headers] [.shstrtab] [Program headers] \
1004 * [Notes] [data for PT_LOAD segments]
1005 *
1006 * The 'shstrtab' structure below is data for the second of the two
1007 * section headers, plus the .shstrtab itself, in one const buffer.
1008 */
1009static const struct {
1010 Elf_ShdrElf64_Shdr shdr;
1011 char shstrtab[sizeof(ELF_SHSTRTAB".shstrtab") + 1];
1012} shstrtab = {
1013 .shdr = {
1014 .sh_name = 1, /* offset in .shstrtab below */
1015 .sh_type = SHT_STRTAB3,
1016 .sh_offset = sizeof(Elf_EhdrElf64_Ehdr) + 2*sizeof(Elf_ShdrElf64_Shdr),
1017 .sh_size = sizeof(ELF_SHSTRTAB".shstrtab") + 1,
1018 .sh_addralign = 1,
1019 },
1020 .shstrtab = "\0" ELF_SHSTRTAB".shstrtab",
1021};
1022
1023int
1024coredump_setup_elf(int segment_count, void *cookie)
1025{
1026 Elf_EhdrElf64_Ehdr ehdr;
1027 struct writesegs_state *ws = cookie;
1028 Elf_PhdrElf64_Phdr *note;
1029 int error;
1030
1031 /* Get the count of segments, plus one for the PT_NOTE */
1032 ws->npsections = segment_count + 1;
1033
1034 /* Get the size of the notes. */
1035 error = coredump_notes_elf(ws->p, NULL((void *)0), &ws->notesize);
1036 if (error)
1037 return error;
1038
1039 /* Setup the ELF header */
1040 memset(&ehdr, 0, sizeof(ehdr))__builtin_memset((&ehdr), (0), (sizeof(ehdr)));
1041 memcpy(ehdr.e_ident, ELFMAG, SELFMAG)__builtin_memcpy((ehdr.e_ident), ("\177ELF"), (4));
1042 ehdr.e_ident[EI_CLASS4] = ELF_TARG_CLASS2;
1043 ehdr.e_ident[EI_DATA5] = ELF_TARG_DATA1;
1044 ehdr.e_ident[EI_VERSION6] = EV_CURRENT1;
1045 /* XXX Should be the OSABI/ABI version of the executable. */
1046 ehdr.e_ident[EI_OSABI7] = ELFOSABI_SYSV0;
1047 ehdr.e_ident[EI_ABIVERSION8] = 0;
1048 ehdr.e_type = ET_CORE4;
1049 /* XXX This should be the e_machine of the executable. */
1050 ehdr.e_machine = ELF_TARG_MACH62;
1051 ehdr.e_version = EV_CURRENT1;
1052 ehdr.e_entry = 0;
1053 ehdr.e_flags = 0;
1054 ehdr.e_ehsize = sizeof(ehdr);
1055 ehdr.e_phentsize = sizeof(Elf_PhdrElf64_Phdr);
1056
1057 if (ws->npsections < PN_XNUM0xffff) {
1058 ehdr.e_phoff = sizeof(ehdr);
1059 ehdr.e_shoff = 0;
1060 ehdr.e_phnum = ws->npsections;
1061 ehdr.e_shentsize = 0;
1062 ehdr.e_shnum = 0;
1063 ehdr.e_shstrndx = 0;
1064 } else {
1065 /* too many segments, use extension setup */
1066 ehdr.e_shoff = sizeof(ehdr);
1067 ehdr.e_phnum = PN_XNUM0xffff;
1068 ehdr.e_shentsize = sizeof(Elf_ShdrElf64_Shdr);
1069 ehdr.e_shnum = 2;
1070 ehdr.e_shstrndx = 1;
1071 ehdr.e_phoff = shstrtab.shdr.sh_offset + shstrtab.shdr.sh_size;
1072 }
1073
1074 /* Write out the ELF header. */
1075 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr));
1076 if (error)
1077 return error;
1078
1079 /*
1080 * If an section header is needed to store extension info, write
1081 * it out after the ELF header and before the program header.
1082 */
1083 if (ehdr.e_shnum != 0) {
1084 Elf_ShdrElf64_Shdr shdr = { .sh_info = ws->npsections };
1085 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shdr,
1086 sizeof shdr);
1087 if (error)
1088 return error;
1089 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shstrtab,
1090 sizeof(shstrtab.shdr) + sizeof(shstrtab.shstrtab));
1091 if (error)
1092 return error;
1093 }
1094
1095 /*
1096 * Allocate the segment header array and setup to collect
1097 * the section sizes and offsets
1098 */
1099 ws->psections = mallocarray(ws->npsections, sizeof(Elf_PhdrElf64_Phdr),
1100 M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008);
1101 if (ws->psections == NULL((void *)0))
1102 return ENOMEM12;
1103 ws->psectionslen = ws->npsections * sizeof(Elf_PhdrElf64_Phdr);
1104
1105 ws->notestart = ehdr.e_phoff + ws->psectionslen;
1106 ws->secstart = ws->notestart + ws->notesize;
1107 ws->secoff = ws->secstart;
1108
1109 /* Fill in the PT_NOTE segment header in the last slot */
1110 note = &ws->psections[ws->npsections - 1];
1111 note->p_type = PT_NOTE4;
1112 note->p_offset = ws->notestart;
1113 note->p_vaddr = 0;
1114 note->p_paddr = 0;
1115 note->p_filesz = ws->notesize;
1116 note->p_memsz = 0;
1117 note->p_flags = PF_R0x4;
1118 note->p_align = ELFROUNDSIZEsizeof(Elf64_Word);
1119
1120 return (0);
1121}
1122
1123int
1124coredump_walk_elf(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
1125 int nsegment, void *cookie)
1126{
1127 struct writesegs_state *ws = cookie;
1128 Elf_PhdrElf64_Phdr phdr;
1129 vsize_t size, realsize;
1130
1131 size = end - start;
1132 realsize = realend - start;
1133
1134 phdr.p_type = PT_LOAD1;
1135 phdr.p_offset = ws->secoff;
1136 phdr.p_vaddr = start;
1137 phdr.p_paddr = 0;
1138 phdr.p_filesz = realsize;
1139 phdr.p_memsz = size;
1140 phdr.p_flags = 0;
1141 if (prot & PROT_READ0x01)
1142 phdr.p_flags |= PF_R0x4;
1143 if (prot & PROT_WRITE0x02)
1144 phdr.p_flags |= PF_W0x2;
1145 if (prot & PROT_EXEC0x04)
1146 phdr.p_flags |= PF_X0x1;
1147 phdr.p_align = PAGE_SIZE(1 << 12);
1148
1149 ws->secoff += phdr.p_filesz;
1150 ws->psections[nsegment] = phdr;
1151
1152 return (0);
1153}
1154
1155int
1156coredump_notes_elf(struct proc *p, void *iocookie, size_t *sizep)
1157{
1158 struct ps_strings pss;
1159 struct iovec iov;
1160 struct uio uio;
1161 struct elfcore_procinfo cpi;
1162 Elf_NoteElf64_Note nhdr;
1163 struct process *pr = p->p_p;
1164 struct proc *q;
1165 size_t size, notesize;
1166 int error;
1167
1168 size = 0;
1169
1170 /* First, write an elfcore_procinfo. */
1171 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD"))(((((sizeof("OpenBSD")))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
+
1172 elfround(sizeof(cpi))(((((sizeof(cpi)))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
;
1173 if (iocookie) {
1174 memset(&cpi, 0, sizeof(cpi))__builtin_memset((&cpi), (0), (sizeof(cpi)));
1175
1176 cpi.cpi_version = ELFCORE_PROCINFO_VERSION1;
1177 cpi.cpi_cpisize = sizeof(cpi);
1178 cpi.cpi_signo = p->p_sisig;
1179 cpi.cpi_sigcode = p->p_sicode;
1180
1181 cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist;
1182 cpi.cpi_sigmask = p->p_sigmask;
1183 cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore;
1184 cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch;
1185
1186 cpi.cpi_pid = pr->ps_pid;
1187 cpi.cpi_ppid = pr->ps_ppid;
1188 cpi.cpi_pgrp = pr->ps_pgidps_pgrp->pg_id;
1189 if (pr->ps_sessionps_pgrp->pg_session->s_leader)
1190 cpi.cpi_sid = pr->ps_sessionps_pgrp->pg_session->s_leader->ps_pid;
1191 else
1192 cpi.cpi_sid = 0;
1193
1194 cpi.cpi_ruid = p->p_ucred->cr_ruid;
1195 cpi.cpi_euid = p->p_ucred->cr_uid;
1196 cpi.cpi_svuid = p->p_ucred->cr_svuid;
1197
1198 cpi.cpi_rgid = p->p_ucred->cr_rgid;
1199 cpi.cpi_egid = p->p_ucred->cr_gid;
1200 cpi.cpi_svgid = p->p_ucred->cr_svgid;
1201
1202 (void)strlcpy(cpi.cpi_name, pr->ps_comm, sizeof(cpi.cpi_name));
1203
1204 nhdr.namesz = sizeof("OpenBSD");
1205 nhdr.descsz = sizeof(cpi);
1206 nhdr.type = NT_OPENBSD_PROCINFO10;
1207
1208 error = coredump_writenote_elf(p, iocookie, &nhdr,
1209 "OpenBSD", &cpi);
1210 if (error)
1211 return (error);
1212 }
1213 size += notesize;
1214
1215 /* Second, write an NT_OPENBSD_AUXV note. */
1216 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD"))(((((sizeof("OpenBSD")))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
+
1217 elfround(ELF_AUX_WORDS * sizeof(char *))((((((sizeof(Aux64Info) * 9 / sizeof(char *)) * sizeof(char *
)))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word)))*(sizeof(Elf64_Word
)))
;
1218 if (iocookie) {
1219 iov.iov_base = &pss;
1220 iov.iov_len = sizeof(pss);
1221 uio.uio_iov = &iov;
1222 uio.uio_iovcnt = 1;
1223 uio.uio_offset = (off_t)pr->ps_strings;
1224 uio.uio_resid = sizeof(pss);
1225 uio.uio_segflg = UIO_SYSSPACE;
1226 uio.uio_rw = UIO_READ;
1227 uio.uio_procp = NULL((void *)0);
1228
1229 error = uvm_io(&p->p_vmspace->vm_map, &uio, 0);
1230 if (error)
1231 return (error);
1232
1233 if (pss.ps_envstr == NULL((void *)0))
1234 return (EIO5);
1235
1236 nhdr.namesz = sizeof("OpenBSD");
1237 nhdr.descsz = ELF_AUX_WORDS(sizeof(Aux64Info) * 9 / sizeof(char *)) * sizeof(char *);
1238 nhdr.type = NT_OPENBSD_AUXV11;
1239
1240 error = coredump_write(iocookie, UIO_SYSSPACE,
1241 &nhdr, sizeof(nhdr));
1242 if (error)
1243 return (error);
1244
1245 error = coredump_write(iocookie, UIO_SYSSPACE,
1246 "OpenBSD", elfround(nhdr.namesz)(((((nhdr.namesz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
);
1247 if (error)
1248 return (error);
1249
1250 error = coredump_write(iocookie, UIO_USERSPACE,
1251 pss.ps_envstr + pss.ps_nenvstr + 1, nhdr.descsz);
1252 if (error)
1253 return (error);
1254 }
1255 size += notesize;
1256
1257#ifdef PT_WCOOKIE
1258 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD"))(((((sizeof("OpenBSD")))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
+
1259 elfround(sizeof(register_t))(((((sizeof(register_t)))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
;
1260 if (iocookie) {
1261 register_t wcookie;
1262
1263 nhdr.namesz = sizeof("OpenBSD");
1264 nhdr.descsz = sizeof(register_t);
1265 nhdr.type = NT_OPENBSD_WCOOKIE23;
1266
1267 wcookie = process_get_wcookie(p);
1268 error = coredump_writenote_elf(p, iocookie, &nhdr,
1269 "OpenBSD", &wcookie);
1270 if (error)
1271 return (error);
1272 }
1273 size += notesize;
1274#endif
1275
1276 /*
1277 * Now write the register info for the thread that caused the
1278 * coredump.
1279 */
1280 error = coredump_note_elf(p, iocookie, &notesize);
1281 if (error)
1282 return (error);
1283 size += notesize;
1284
1285 /*
1286 * Now, for each thread, write the register info and any other
1287 * per-thread notes. Since we're dumping core, all the other
1288 * threads in the process have been stopped and the list can't
1289 * change.
1290 */
1291 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)for((q) = ((&pr->ps_threads)->tqh_first); (q) != ((
void *)0); (q) = ((q)->p_thr_link.tqe_next))
{
1292 if (q == p) /* we've taken care of this thread */
1293 continue;
1294 error = coredump_note_elf(q, iocookie, &notesize);
1295 if (error)
1296 return (error);
1297 size += notesize;
1298 }
1299
1300 *sizep = size;
1301 return (0);
1302}
1303
1304int
1305coredump_note_elf(struct proc *p, void *iocookie, size_t *sizep)
1306{
1307 Elf_NoteElf64_Note nhdr;
1308 int size, notesize, error;
1309 int namesize;
1310 char name[64+ELFROUNDSIZEsizeof(Elf64_Word)];
1311 struct reg intreg;
1312#ifdef PT_GETFPREGS(32 + 3)
1313 struct fpreg freg;
1314#endif
1315
1316 size = 0;
1317
1318 snprintf(name, sizeof(name)-ELFROUNDSIZEsizeof(Elf64_Word), "%s@%d",
1319 "OpenBSD", p->p_tid + THREAD_PID_OFFSET100000);
1320 namesize = strlen(name) + 1;
1321 memset(name + namesize, 0, elfround(namesize) - namesize)__builtin_memset((name + namesize), (0), ((((((namesize))+((sizeof
(Elf64_Word))-1))/(sizeof(Elf64_Word)))*(sizeof(Elf64_Word)))
- namesize))
;
1322
1323 notesize = sizeof(nhdr) + elfround(namesize)(((((namesize))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word)
))*(sizeof(Elf64_Word)))
+ elfround(sizeof(intreg))(((((sizeof(intreg)))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
;
1324 if (iocookie) {
1325 error = process_read_regs(p, &intreg);
1326 if (error)
1327 return (error);
1328
1329 nhdr.namesz = namesize;
1330 nhdr.descsz = sizeof(intreg);
1331 nhdr.type = NT_OPENBSD_REGS20;
1332
1333 error = coredump_writenote_elf(p, iocookie, &nhdr,
1334 name, &intreg);
1335 if (error)
1336 return (error);
1337
1338 }
1339 size += notesize;
1340
1341#ifdef PT_GETFPREGS(32 + 3)
1342 notesize = sizeof(nhdr) + elfround(namesize)(((((namesize))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word)
))*(sizeof(Elf64_Word)))
+ elfround(sizeof(freg))(((((sizeof(freg)))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
;
1343 if (iocookie) {
1344 error = process_read_fpregs(p, &freg);
1345 if (error)
1346 return (error);
1347
1348 nhdr.namesz = namesize;
1349 nhdr.descsz = sizeof(freg);
1350 nhdr.type = NT_OPENBSD_FPREGS21;
1351
1352 error = coredump_writenote_elf(p, iocookie, &nhdr, name, &freg);
1353 if (error)
1354 return (error);
1355 }
1356 size += notesize;
1357#endif
1358
1359 *sizep = size;
1360 /* XXX Add hook for machdep per-LWP notes. */
1361 return (0);
1362}
1363
1364int
1365coredump_writenote_elf(struct proc *p, void *cookie, Elf_NoteElf64_Note *nhdr,
1366 const char *name, void *data)
1367{
1368 int error;
1369
1370 error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr));
1371 if (error)
1372 return error;
1373
1374 error = coredump_write(cookie, UIO_SYSSPACE, name,
1375 elfround(nhdr->namesz)(((((nhdr->namesz))+((sizeof(Elf64_Word))-1))/(sizeof(Elf64_Word
)))*(sizeof(Elf64_Word)))
);
1376 if (error)
1377 return error;
1378
1379 return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz);
1380}
1381#endif /* !SMALL_KERNEL */