File: | src/libexec/ld.so/loader.c |
Warning: | line 770, column 3 Value stored to 'fails' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: loader.c,v 1.218 2023/12/19 16:13:22 deraadt Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1998 Per Fogelstrom, Opsycon AB |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
16 | * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
19 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
21 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 | * SUCH DAMAGE. |
26 | * |
27 | */ |
28 | |
29 | #define _DYN_LOADER |
30 | |
31 | #include <sys/types.h> |
32 | #include <sys/mman.h> |
33 | #include <sys/syscall.h> |
34 | #include <sys/exec.h> |
35 | #ifdef __i386__ |
36 | # include <machine/vmparam.h> |
37 | #endif |
38 | #include <string.h> |
39 | #include <link.h> |
40 | #include <limits.h> /* NAME_MAX */ |
41 | #include <dlfcn.h> |
42 | #include <tib.h> |
43 | |
44 | #include "syscall.h" |
45 | #include "util.h" |
46 | #include "resolve.h" |
47 | #include "path.h" |
48 | #include "sod.h" |
49 | |
50 | /* |
51 | * Local decls. |
52 | */ |
53 | unsigned long _dl_boot(const char **, char **, const long, long *) __boot__attribute__((section(".boot.text"))); |
54 | void _dl_debug_state(void); |
55 | void _dl_setup_env(const char *_argv0, char **_envp) __boot__attribute__((section(".boot.text"))); |
56 | void _dl_dtors(void); |
57 | void _dl_dopreload(char *_paths) __boot__attribute__((section(".boot.text"))); |
58 | void _dl_fixup_user_env(void) __boot__attribute__((section(".boot.text"))); |
59 | void _dl_call_preinit(elf_object_t *) __boot__attribute__((section(".boot.text"))); |
60 | void _dl_call_init_recurse(elf_object_t *object, int initfirst); |
61 | void _dl_clean_boot(void); |
62 | static inline void unprotect_if_textrel(elf_object_t *_object); |
63 | static inline void reprotect_if_textrel(elf_object_t *_object); |
64 | static void _dl_rreloc(elf_object_t *_object); |
65 | |
66 | int _dl_pagesz __relro__attribute__((section(".data.rel.ro"))) = 4096; |
67 | int _dl_bindnow __relro__attribute__((section(".data.rel.ro"))) = 0; |
68 | int _dl_debug __relro__attribute__((section(".data.rel.ro"))) = 0; |
69 | int _dl_trust __relro__attribute__((section(".data.rel.ro"))) = 0; |
70 | char **_dl_libpath __relro__attribute__((section(".data.rel.ro"))) = NULL((void *)0); |
71 | const char **_dl_argv __relro__attribute__((section(".data.rel.ro"))) = NULL((void *)0); |
72 | int _dl_argc __relro__attribute__((section(".data.rel.ro"))) = 0; |
73 | |
74 | char *_dl_preload __boot_data__attribute__((section(".boot.data"))) = NULL((void *)0); |
75 | char *_dl_tracefmt1 __boot_data__attribute__((section(".boot.data"))) = NULL((void *)0); |
76 | char *_dl_tracefmt2 __boot_data__attribute__((section(".boot.data"))) = NULL((void *)0); |
77 | char *_dl_traceprog __boot_data__attribute__((section(".boot.data"))) = NULL((void *)0); |
78 | void *_dl_exec_hint __boot_data__attribute__((section(".boot.data"))) = NULL((void *)0); |
79 | |
80 | char **environ = NULL((void *)0); |
81 | char *__progname = NULL((void *)0); |
82 | |
83 | int _dl_traceld; |
84 | struct r_debug *_dl_debug_map; |
85 | |
86 | static dl_cb_cb _dl_cb_cb; |
87 | const struct dl_cb_0 callbacks_0 = { |
88 | .dl_allocate_tib = &_dl_allocate_tib, |
89 | .dl_free_tib = &_dl_free_tib, |
90 | #if DO_CLEAN_BOOT1 |
91 | .dl_clean_boot = &_dl_clean_boot, |
92 | #endif |
93 | .dlopen = &dlopen, |
94 | .dlclose = &dlclose, |
95 | .dlsym = &dlsym, |
96 | .dladdr = &dladdr, |
97 | .dlctl = &dlctl, |
98 | .dlerror = &dlerror, |
99 | .dl_iterate_phdr = &dl_iterate_phdr, |
100 | }; |
101 | |
102 | |
103 | /* |
104 | * Run dtors for a single object. |
105 | */ |
106 | void |
107 | _dl_run_dtors(elf_object_t *obj) |
108 | { |
109 | if (obj->dynDyn.u.fini_array) { |
110 | int num = obj->dynDyn.u.fini_arraysz / sizeof(Elf_AddrElf64_Addr); |
111 | int i; |
112 | |
113 | DL_DEB(("doing finiarray obj %p @%p: [%s]\n",do { if (_dl_debug) _dl_printf ("doing finiarray obj %p @%p: [%s]\n" , obj, obj->Dyn.u.fini_array, obj->load_name) ; } while (0) |
114 | obj, obj->dyn.fini_array, obj->load_name))do { if (_dl_debug) _dl_printf ("doing finiarray obj %p @%p: [%s]\n" , obj, obj->Dyn.u.fini_array, obj->load_name) ; } while (0); |
115 | for (i = num; i > 0; i--) |
116 | (*obj->dynDyn.u.fini_array[i-1])(); |
117 | } |
118 | |
119 | if (obj->dynDyn.u.fini) { |
120 | DL_DEB(("doing dtors obj %p @%p: [%s]\n",do { if (_dl_debug) _dl_printf ("doing dtors obj %p @%p: [%s]\n" , obj, obj->Dyn.u.fini, obj->load_name) ; } while (0) |
121 | obj, obj->dyn.fini, obj->load_name))do { if (_dl_debug) _dl_printf ("doing dtors obj %p @%p: [%s]\n" , obj, obj->Dyn.u.fini, obj->load_name) ; } while (0); |
122 | (*obj->dynDyn.u.fini)(); |
123 | } |
124 | } |
125 | |
126 | /* |
127 | * Run dtors for all objects that are eligible. |
128 | */ |
129 | void |
130 | _dl_run_all_dtors(void) |
131 | { |
132 | elf_object_t *node; |
133 | int fini_complete; |
134 | int skip_initfirst; |
135 | int initfirst_skipped; |
136 | |
137 | fini_complete = 0; |
138 | skip_initfirst = 1; |
139 | initfirst_skipped = 0; |
140 | |
141 | while (fini_complete == 0) { |
142 | fini_complete = 1; |
143 | for (node = _dl_objects; |
144 | node != NULL((void *)0); |
145 | node = node->next) { |
146 | if ((node->dynDyn.u.fini || node->dynDyn.u.fini_array) && |
147 | (OBJECT_REF_CNT(node)((node->refcount + node->opencount + node->grprefcount )) == 0) && |
148 | (node->status & STAT_INIT_DONE0x004) && |
149 | ((node->status & STAT_FINI_DONE0x008) == 0)) { |
150 | if (skip_initfirst && |
151 | (node->obj_flags & DF_1_INITFIRST0x00000020)) |
152 | initfirst_skipped = 1; |
153 | else |
154 | node->status |= STAT_FINI_READY0x010; |
155 | } |
156 | } |
157 | for (node = _dl_objects; |
158 | node != NULL((void *)0); |
159 | node = node->next ) { |
160 | if ((node->dynDyn.u.fini || node->dynDyn.u.fini_array) && |
161 | (OBJECT_REF_CNT(node)((node->refcount + node->opencount + node->grprefcount )) == 0) && |
162 | (node->status & STAT_INIT_DONE0x004) && |
163 | ((node->status & STAT_FINI_DONE0x008) == 0) && |
164 | (!skip_initfirst || |
165 | (node->obj_flags & DF_1_INITFIRST0x00000020) == 0)) { |
166 | struct object_vector vec = node->child_vec; |
167 | int i; |
168 | |
169 | for (i = 0; i < vec.len; i++) |
170 | vec.vec[i]->status &= ~STAT_FINI_READY0x010; |
171 | } |
172 | } |
173 | |
174 | for (node = _dl_objects; |
175 | node != NULL((void *)0); |
176 | node = node->next ) { |
177 | if (node->status & STAT_FINI_READY0x010) { |
178 | fini_complete = 0; |
179 | node->status |= STAT_FINI_DONE0x008; |
180 | node->status &= ~STAT_FINI_READY0x010; |
181 | _dl_run_dtors(node); |
182 | } |
183 | } |
184 | |
185 | if (fini_complete && initfirst_skipped) |
186 | fini_complete = initfirst_skipped = skip_initfirst = 0; |
187 | } |
188 | } |
189 | |
190 | /* |
191 | * Routine to walk through all of the objects except the first |
192 | * (main executable). |
193 | * |
194 | * Big question, should dlopen()ed objects be unloaded before or after |
195 | * the destructor for the main application runs? |
196 | */ |
197 | void |
198 | _dl_dtors(void) |
199 | { |
200 | _dl_thread_kern_stop(); |
201 | |
202 | /* ORDER? */ |
203 | _dl_unload_dlopen(); |
204 | |
205 | DL_DEB(("doing dtors\n"))do { if (_dl_debug) _dl_printf ("doing dtors\n") ; } while (0 ); |
206 | |
207 | _dl_objects->opencount--; |
208 | _dl_notify_unload_shlib(_dl_objects); |
209 | |
210 | _dl_run_all_dtors(); |
211 | } |
212 | |
213 | #if DO_CLEAN_BOOT1 |
214 | void |
215 | _dl_clean_boot(void) |
216 | { |
217 | extern char boot_text_start[], boot_text_end[]; |
218 | #if 0 /* XXX breaks boehm-gc?!? */ |
219 | extern char boot_data_start[], boot_data_end[]; |
220 | #endif |
221 | |
222 | _dl_mmap(boot_text_start, boot_text_end - boot_text_start, |
223 | PROT_NONE0x00, MAP_FIXED0x0010 | MAP_PRIVATE0x0002 | MAP_ANON0x1000, -1, 0); |
224 | _dl_mimmutable(boot_text_start, boot_text_end - boot_text_start); |
225 | #if 0 /* XXX breaks boehm-gc?!? */ |
226 | _dl_mmap(boot_data_start, boot_data_end - boot_data_start, |
227 | PROT_NONE0x00, MAP_FIXED0x0010 | MAP_PRIVATE0x0002 | MAP_ANON0x1000, -1, 0); |
228 | _dl_mimmutable(boot_data_start, boot_data_end - boot_data_start); |
229 | #endif |
230 | } |
231 | #endif /* DO_CLEAN_BOOT */ |
232 | |
233 | void |
234 | _dl_dopreload(char *paths) |
235 | { |
236 | char *cp, *dp; |
237 | elf_object_t *shlib; |
238 | int count; |
239 | |
240 | dp = paths = _dl_strdup(paths); |
241 | if (dp == NULL((void *)0)) |
242 | _dl_oom(); |
243 | |
244 | /* preallocate child_vec for the LD_PRELOAD objects */ |
245 | count = 1; |
246 | while (*dp++ != '\0') |
247 | if (*dp == ':') |
248 | count++; |
249 | object_vec_grow(&_dl_objects->child_vec, count); |
250 | |
251 | dp = paths; |
252 | while ((cp = _dl_strsep(&dp, ":")) != NULL((void *)0)) { |
253 | shlib = _dl_load_shlib(cp, _dl_objects, OBJTYPE_LIB3, |
254 | _dl_objects->obj_flags, 1); |
255 | if (shlib == NULL((void *)0)) |
256 | _dl_die("can't preload library '%s'", cp); |
257 | _dl_add_object(shlib); |
258 | _dl_link_child(shlib, _dl_objects); |
259 | } |
260 | _dl_free(paths); |
261 | return; |
262 | } |
263 | |
264 | /* |
265 | * grab interesting environment variables, zap bad env vars if |
266 | * issetugid, and set the exported environ and __progname variables |
267 | */ |
268 | void |
269 | _dl_setup_env(const char *argv0, char **envp) |
270 | { |
271 | static char progname_storage[NAME_MAX255+1] = ""; |
272 | |
273 | /* |
274 | * Don't allow someone to change the search paths if he runs |
275 | * a suid program without credentials high enough. |
276 | */ |
277 | _dl_trust = !_dl_issetugid(); |
278 | if (!_dl_trust) { /* Zap paths if s[ug]id... */ |
279 | _dl_unsetenv("LD_DEBUG", envp); |
280 | _dl_unsetenv("LD_LIBRARY_PATH", envp); |
281 | _dl_unsetenv("LD_PRELOAD", envp); |
282 | _dl_unsetenv("LD_BIND_NOW", envp); |
283 | } else { |
284 | /* |
285 | * Get paths to various things we are going to use. |
286 | */ |
287 | _dl_debug = _dl_getenv("LD_DEBUG", envp) != NULL((void *)0); |
288 | _dl_libpath = _dl_split_path(_dl_getenv("LD_LIBRARY_PATH", |
289 | envp)); |
290 | _dl_preload = _dl_getenv("LD_PRELOAD", envp); |
291 | _dl_bindnow = _dl_getenv("LD_BIND_NOW", envp) != NULL((void *)0); |
292 | } |
293 | |
294 | /* these are usable even in setugid processes */ |
295 | _dl_traceld = _dl_getenv("LD_TRACE_LOADED_OBJECTS", envp) != NULL((void *)0); |
296 | _dl_tracefmt1 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT1", envp); |
297 | _dl_tracefmt2 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT2", envp); |
298 | _dl_traceprog = _dl_getenv("LD_TRACE_LOADED_OBJECTS_PROGNAME", envp); |
299 | |
300 | environ = envp; |
301 | |
302 | _dl_trace_setup(envp); |
303 | |
304 | if (argv0 != NULL((void *)0)) { /* NULL ptr if argc = 0 */ |
305 | const char *p = _dl_strrchr(argv0, '/'); |
306 | |
307 | if (p == NULL((void *)0)) |
308 | p = argv0; |
309 | else |
310 | p++; |
311 | _dl_strlcpy(progname_storage, p, sizeof(progname_storage)); |
312 | } |
313 | __progname = progname_storage; |
314 | } |
315 | |
316 | int |
317 | _dl_load_dep_libs(elf_object_t *object, int flags, int booting) |
318 | { |
319 | elf_object_t *dynobj, *obj; |
320 | Elf_DynElf64_Dyn *dynp; |
321 | unsigned int loop; |
322 | int libcount; |
323 | int depflags, nodelete = 0; |
324 | |
325 | dynobj = object; |
326 | while (dynobj) { |
327 | DL_DEB(("examining: '%s'\n", dynobj->load_name))do { if (_dl_debug) _dl_printf ("examining: '%s'\n", dynobj-> load_name) ; } while (0); |
328 | libcount = 0; |
329 | |
330 | /* propagate DF_1_NOW to deplibs (can be set by dynamic tags) */ |
331 | depflags = flags | (dynobj->obj_flags & DF_1_NOW0x00000001); |
332 | if (booting || object->nodelete) |
333 | nodelete = 1; |
334 | |
335 | for (dynp = dynobj->load_dyn; dynp->d_tag; dynp++) { |
336 | if (dynp->d_tag == DT_NEEDED1) { |
337 | libcount++; |
338 | } |
339 | } |
340 | |
341 | if ( libcount != 0) { |
342 | struct listent { |
343 | Elf_DynElf64_Dyn *dynp; |
344 | elf_object_t *depobj; |
345 | } *liblist; |
346 | int *randomlist; |
347 | |
348 | liblist = _dl_reallocarray(NULL((void *)0), libcount, |
349 | sizeof(struct listent)); |
350 | randomlist = _dl_reallocarray(NULL((void *)0), libcount, |
351 | sizeof(int)); |
352 | |
353 | if (liblist == NULL((void *)0) || randomlist == NULL((void *)0)) |
354 | _dl_oom(); |
355 | |
356 | for (dynp = dynobj->load_dyn, loop = 0; dynp->d_tag; |
357 | dynp++) |
358 | if (dynp->d_tag == DT_NEEDED1) |
359 | liblist[loop++].dynp = dynp; |
360 | |
361 | /* Randomize these */ |
362 | for (loop = 0; loop < libcount; loop++) |
363 | randomlist[loop] = loop; |
364 | |
365 | for (loop = 1; loop < libcount; loop++) { |
366 | unsigned int rnd; |
367 | int cur; |
368 | rnd = _dl_arc4random(); |
369 | rnd = rnd % (loop+1); |
370 | cur = randomlist[rnd]; |
371 | randomlist[rnd] = randomlist[loop]; |
372 | randomlist[loop] = cur; |
373 | } |
374 | |
375 | for (loop = 0; loop < libcount; loop++) { |
376 | elf_object_t *depobj; |
377 | const char *libname; |
378 | libname = dynobj->dynDyn.u.strtab; |
379 | libname += |
380 | liblist[randomlist[loop]].dynp->d_un.d_val; |
381 | DL_DEB(("loading: %s required by %s\n", libname,do { if (_dl_debug) _dl_printf ("loading: %s required by %s\n" , libname, dynobj->load_name) ; } while (0) |
382 | dynobj->load_name))do { if (_dl_debug) _dl_printf ("loading: %s required by %s\n" , libname, dynobj->load_name) ; } while (0); |
383 | depobj = _dl_load_shlib(libname, dynobj, |
384 | OBJTYPE_LIB3, depflags, nodelete); |
385 | if (depobj == 0) { |
386 | if (booting) { |
387 | _dl_die( |
388 | "can't load library '%s'", |
389 | libname); |
390 | } |
391 | DL_DEB(("dlopen: failed to open %s\n",do { if (_dl_debug) _dl_printf ("dlopen: failed to open %s\n" , libname) ; } while (0) |
392 | libname))do { if (_dl_debug) _dl_printf ("dlopen: failed to open %s\n" , libname) ; } while (0); |
393 | _dl_free(liblist); |
394 | _dl_free(randomlist); |
395 | return (1); |
396 | } |
397 | liblist[randomlist[loop]].depobj = depobj; |
398 | } |
399 | |
400 | object_vec_grow(&dynobj->child_vec, libcount); |
401 | for (loop = 0; loop < libcount; loop++) { |
402 | _dl_add_object(liblist[loop].depobj); |
403 | _dl_link_child(liblist[loop].depobj, dynobj); |
404 | } |
405 | _dl_free(liblist); |
406 | _dl_free(randomlist); |
407 | } |
408 | dynobj = dynobj->next; |
409 | } |
410 | |
411 | _dl_cache_grpsym_list_setup(object); |
412 | |
413 | for (obj = _dl_objects; booting && obj != NULL((void *)0); obj = obj->next) { |
414 | char *soname = (char *)obj->Dyn.info[DT_SONAME14]; |
415 | struct sym_res sr; |
416 | |
417 | if (!soname || _dl_strncmp(soname, "libc.so.", 8)) |
418 | continue; |
419 | sr = _dl_find_symbol("execve", |
420 | SYM_SEARCH_SELF0x01|SYM_PLT0x20|SYM_WARNNOTFOUND0x10, NULL((void *)0), obj); |
421 | if (sr.sym) |
422 | _dl_pinsyscall(SYS_execve59, |
423 | (void *)sr.obj->obj_base + sr.sym->st_value, |
424 | sr.sym->st_size); |
425 | _dl_memset(&sr, 0, sizeof sr); |
426 | break; |
427 | } |
428 | return(0); |
429 | } |
430 | |
431 | |
432 | /* do any RWX -> RX fixups for executable PLTs and apply GNU_RELRO */ |
433 | static inline void |
434 | _dl_self_relro(long loff) |
435 | { |
436 | Elf_EhdrElf64_Ehdr *ehdp; |
437 | Elf_PhdrElf64_Phdr *phdp; |
438 | int i; |
439 | |
440 | ehdp = (Elf_EhdrElf64_Ehdr *)loff; |
441 | phdp = (Elf_PhdrElf64_Phdr *)(loff + ehdp->e_phoff); |
442 | for (i = 0; i < ehdp->e_phnum; i++, phdp++) { |
443 | switch (phdp->p_type) { |
444 | #if defined(__alpha__) || defined(__hppa__) || defined(__powerpc__) || \ |
445 | defined(__sparc64__) |
446 | case PT_LOAD1: |
447 | if ((phdp->p_flags & (PF_X0x1 | PF_W0x2)) != (PF_X0x1 | PF_W0x2)) |
448 | break; |
449 | _dl_mprotect((void *)(phdp->p_vaddr + loff), |
450 | phdp->p_memsz, PROT_READ0x01); |
451 | break; |
452 | #endif |
453 | case PT_GNU_RELRO0x6474e552: |
454 | _dl_mprotect((void *)(phdp->p_vaddr + loff), |
455 | phdp->p_memsz, PROT_READ0x01); |
456 | _dl_mimmutable((void *)(phdp->p_vaddr + loff), |
457 | phdp->p_memsz); |
458 | break; |
459 | } |
460 | } |
461 | } |
462 | |
463 | |
464 | #define PFLAGS(X)((((X) & 0x4) ? 0x01 : 0) | (((X) & 0x2) ? 0x02 : 0) | (((X) & 0x1) ? 0x04 : 0)) ((((X) & PF_R0x4) ? PROT_READ0x01 : 0) | \ |
465 | (((X) & PF_W0x2) ? PROT_WRITE0x02 : 0) | \ |
466 | (((X) & PF_X0x1) ? PROT_EXEC0x04 : 0)) |
467 | |
468 | /* |
469 | * To avoid kbind(2) becoming a powerful gadget, it is called inline to a |
470 | * function. Therefore we cannot create a precise pinsyscall label. Instead |
471 | * create a duplicate entry to force the kernel's pinsyscall code to skip |
472 | * validation, rather than labelling it illegal. kbind(2) remains safe |
473 | * because it self-protects by checking its calling address. |
474 | */ |
475 | #define __STRINGIFY(x)"x" #x |
476 | #define STRINGIFY(x)"x" __STRINGIFY(x)"x" |
477 | #ifdef __arm__ |
478 | __asm__(".pushsection .openbsd.syscalls,\"\",%progbits;" |
479 | ".p2align 2;" |
480 | ".long 0;" |
481 | ".long " STRINGIFY(SYS_kbind)"86" ";" |
482 | ".popsection"); |
483 | #else |
484 | __asm__(".pushsection .openbsd.syscalls,\"\",@progbits;" |
485 | ".p2align 2;" |
486 | ".long 0;" |
487 | ".long " STRINGIFY(SYS_kbind)"86" ";" |
488 | ".popsection"); |
489 | #endif |
490 | |
491 | /* |
492 | * This is the dynamic loader entrypoint. When entering here, depending |
493 | * on architecture type, the stack and registers are set up according |
494 | * to the architectures ABI specification. The first thing required |
495 | * to do is to dig out all information we need to accomplish our task. |
496 | */ |
497 | unsigned long |
498 | _dl_boot(const char **argv, char **envp, const long dyn_loff, long *dl_data) |
499 | { |
500 | struct elf_object *exe_obj; /* Pointer to executable object */ |
501 | struct elf_object *dyn_obj; /* Pointer to ld.so object */ |
502 | struct r_debug **map_link; /* Where to put pointer for gdb */ |
503 | struct r_debug *debug_map; |
504 | struct load_list *next_load, *load_list = NULL((void *)0); |
505 | Elf_DynElf64_Dyn *dynp; |
506 | Elf_PhdrElf64_Phdr *phdp; |
507 | Elf_EhdrElf64_Ehdr *ehdr; |
508 | char *us = NULL((void *)0); |
509 | unsigned int loop; |
510 | int failed; |
511 | struct dep_node *n; |
512 | Elf_AddrElf64_Addr minva, maxva, exe_loff, exec_end, cur_exec_end; |
513 | Elf_AddrElf64_Addr relro_addr = 0, relro_size = 0; |
514 | Elf_PhdrElf64_Phdr *ptls = NULL((void *)0); |
515 | int align; |
516 | |
517 | if (dl_data[AUX_pagesz] != 0) |
518 | _dl_pagesz = dl_data[AUX_pagesz]; |
519 | _dl_malloc_init(); |
520 | |
521 | _dl_argv = argv; |
522 | while (_dl_argv[_dl_argc] != NULL((void *)0)) |
523 | _dl_argc++; |
524 | _dl_setup_env(argv[0], envp); |
525 | |
526 | /* |
527 | * Make read-only the GOT and PLT and variables initialized |
528 | * during the ld.so setup above. |
529 | */ |
530 | _dl_self_relro(dyn_loff); |
531 | |
532 | align = _dl_pagesz - 1; |
533 | |
534 | #define ROUND_PG(x)(((x) + align) & ~(align)) (((x) + align) & ~(align)) |
535 | #define TRUNC_PG(x)((x) & ~(align)) ((x) & ~(align)) |
536 | |
537 | if (_dl_bindnow) { |
538 | /* Lazy binding disabled, so disable kbind */ |
539 | _dl_kbind(NULL((void *)0), 0, 0); |
540 | } |
541 | |
542 | DL_DEB(("ld.so loading: '%s'\n", __progname))do { if (_dl_debug) _dl_printf ("ld.so loading: '%s'\n", __progname ) ; } while (0); |
543 | |
544 | /* init this in runtime, not statically */ |
545 | TAILQ_INIT(&_dlopened_child_list)do { (&_dlopened_child_list)->tqh_first = ((void *)0); (&_dlopened_child_list)->tqh_last = &(&_dlopened_child_list )->tqh_first; } while (0); |
546 | |
547 | exe_obj = NULL((void *)0); |
548 | _dl_loading_object = NULL((void *)0); |
549 | |
550 | minva = ELF_NO_ADDR((__uint64_t) ~0); |
551 | maxva = exe_loff = exec_end = 0; |
552 | |
553 | /* |
554 | * Examine the user application and set up object information. |
555 | */ |
556 | phdp = (Elf_PhdrElf64_Phdr *)dl_data[AUX_phdr]; |
557 | for (loop = 0; loop < dl_data[AUX_phnum]; loop++) { |
558 | switch (phdp->p_type) { |
559 | case PT_PHDR6: |
560 | exe_loff = (Elf_AddrElf64_Addr)dl_data[AUX_phdr] - phdp->p_vaddr; |
561 | us += exe_loff; |
562 | DL_DEB(("exe load offset: 0x%lx\n", exe_loff))do { if (_dl_debug) _dl_printf ("exe load offset: 0x%lx\n", exe_loff ) ; } while (0); |
563 | break; |
564 | case PT_DYNAMIC2: |
565 | minva = TRUNC_PG(minva)((minva) & ~(align)); |
566 | maxva = ROUND_PG(maxva)(((maxva) + align) & ~(align)); |
567 | exe_obj = _dl_finalize_object(argv[0] ? argv[0] : "", |
568 | (Elf_DynElf64_Dyn *)(phdp->p_vaddr + exe_loff), |
569 | (Elf_PhdrElf64_Phdr *)dl_data[AUX_phdr], |
570 | dl_data[AUX_phnum], OBJTYPE_EXE2, minva + exe_loff, |
571 | exe_loff); |
572 | _dl_add_object(exe_obj); |
573 | break; |
574 | case PT_INTERP3: |
575 | us += phdp->p_vaddr; |
576 | break; |
577 | case PT_LOAD1: |
578 | if (phdp->p_vaddr < minva) |
579 | minva = phdp->p_vaddr; |
580 | if (phdp->p_vaddr > maxva) |
581 | maxva = phdp->p_vaddr + phdp->p_memsz; |
582 | |
583 | next_load = _dl_calloc(1, sizeof(struct load_list)); |
584 | if (next_load == NULL((void *)0)) |
585 | _dl_oom(); |
586 | next_load->next = load_list; |
587 | load_list = next_load; |
588 | next_load->start = (char *)TRUNC_PG(phdp->p_vaddr)((phdp->p_vaddr) & ~(align)) + exe_loff; |
589 | next_load->size = (phdp->p_vaddr & align) + phdp->p_filesz; |
590 | next_load->prot = PFLAGS(phdp->p_flags)((((phdp->p_flags) & 0x4) ? 0x01 : 0) | (((phdp->p_flags ) & 0x2) ? 0x02 : 0) | (((phdp->p_flags) & 0x1) ? 0x04 : 0)); |
591 | cur_exec_end = (Elf_AddrElf64_Addr)next_load->start + next_load->size; |
592 | if ((next_load->prot & PROT_EXEC0x04) != 0 && |
593 | cur_exec_end > exec_end) |
594 | exec_end = cur_exec_end; |
595 | break; |
596 | case PT_TLS7: |
597 | if (phdp->p_filesz > phdp->p_memsz) |
598 | _dl_die("invalid tls data"); |
599 | ptls = phdp; |
600 | break; |
601 | case PT_GNU_RELRO0x6474e552: |
602 | relro_addr = phdp->p_vaddr + exe_loff; |
603 | relro_size = phdp->p_memsz; |
604 | break; |
605 | } |
606 | phdp++; |
607 | } |
608 | exe_obj->load_list = load_list; |
609 | exe_obj->obj_flags |= DF_1_GLOBAL0x00000002; |
610 | exe_obj->nodelete = 1; |
611 | exe_obj->load_size = maxva - minva; |
612 | exe_obj->relro_addr = relro_addr; |
613 | exe_obj->relro_size = relro_size; |
614 | _dl_set_sod(exe_obj->load_name, &exe_obj->sod); |
615 | |
616 | #ifdef __i386__ |
617 | if (exec_end > I386_MAX_EXE_ADDR) |
618 | _dl_exec_hint = (void *)ROUND_PG(exec_end-I386_MAX_EXE_ADDR)(((exec_end-I386_MAX_EXE_ADDR) + align) & ~(align)); |
619 | DL_DEB(("_dl_exec_hint: 0x%lx\n", _dl_exec_hint))do { if (_dl_debug) _dl_printf ("_dl_exec_hint: 0x%lx\n", _dl_exec_hint ) ; } while (0); |
620 | #endif |
621 | |
622 | /* TLS bits in the base executable */ |
623 | if (ptls != NULL((void *)0) && ptls->p_memsz) |
624 | _dl_set_tls(exe_obj, ptls, exe_loff, NULL((void *)0)); |
625 | |
626 | n = _dl_malloc(sizeof *n); |
627 | if (n == NULL((void *)0)) |
628 | _dl_oom(); |
629 | n->data = exe_obj; |
630 | TAILQ_INSERT_TAIL(&_dlopened_child_list, n, next_sib)do { (n)->next_sib.tqe_next = ((void *)0); (n)->next_sib .tqe_prev = (&_dlopened_child_list)->tqh_last; *(& _dlopened_child_list)->tqh_last = (n); (&_dlopened_child_list )->tqh_last = &(n)->next_sib.tqe_next; } while (0); |
631 | exe_obj->opencount++; |
632 | |
633 | if (_dl_preload != NULL((void *)0)) |
634 | _dl_dopreload(_dl_preload); |
635 | |
636 | _dl_load_dep_libs(exe_obj, exe_obj->obj_flags, 1); |
637 | |
638 | /* |
639 | * Now add the dynamic loader itself last in the object list |
640 | * so we can use the _dl_ code when serving dl.... calls. |
641 | * Intentionally left off the exe child_vec. |
642 | */ |
643 | dynp = (Elf_DynElf64_Dyn *)((void *)_DYNAMIC); |
644 | ehdr = (Elf_EhdrElf64_Ehdr *)dl_data[AUX_base]; |
645 | dyn_obj = _dl_finalize_object(us, dynp, |
646 | (Elf_PhdrElf64_Phdr *)((char *)dl_data[AUX_base] + ehdr->e_phoff), |
647 | ehdr->e_phnum, OBJTYPE_LDR1, dl_data[AUX_base], dyn_loff); |
648 | _dl_add_object(dyn_obj); |
649 | |
650 | dyn_obj->refcount++; |
651 | _dl_link_grpsym(dyn_obj); |
652 | |
653 | dyn_obj->status |= STAT_RELOC_DONE0x001; |
654 | _dl_set_sod(dyn_obj->load_name, &dyn_obj->sod); |
655 | |
656 | /* calculate the offsets for static TLS allocations */ |
657 | _dl_allocate_tls_offsets(); |
658 | |
659 | /* |
660 | * Make something to help gdb when poking around in the code. |
661 | * Do this poking at the .dynamic section now, before relocation |
662 | * renders it read-only |
663 | */ |
664 | map_link = NULL((void *)0); |
665 | #ifdef __mips__ |
666 | for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) { |
667 | if (dynp->d_tag == DT_MIPS_RLD_MAP_REL) { |
668 | map_link = (struct r_debug **) |
669 | (dynp->d_un.d_ptr + (Elf_AddrElf64_Addr)dynp); |
670 | break; |
671 | } else if (dynp->d_tag == DT_MIPS_RLD_MAP) { |
672 | map_link = (struct r_debug **) |
673 | (dynp->d_un.d_ptr + exe_loff); |
674 | break; |
675 | } |
676 | } |
677 | #endif |
678 | if (map_link == NULL((void *)0)) { |
679 | for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) { |
680 | if (dynp->d_tag == DT_DEBUG21) { |
681 | map_link = (struct r_debug **)&dynp->d_un.d_ptr; |
682 | break; |
683 | } |
684 | } |
685 | if (dynp->d_tag != DT_DEBUG21) |
686 | DL_DEB(("failed to mark DTDEBUG\n"))do { if (_dl_debug) _dl_printf ("failed to mark DTDEBUG\n") ; } while (0); |
687 | } |
688 | if (map_link) { |
689 | debug_map = _dl_malloc(sizeof(*debug_map)); |
690 | if (debug_map == NULL((void *)0)) |
691 | _dl_oom(); |
692 | debug_map->r_version = 1; |
693 | debug_map->r_map = (struct link_map *)_dl_objects; |
694 | debug_map->r_brk = (Elf_AddrElf64_Addr)_dl_debug_state; |
695 | debug_map->r_state = RT_CONSISTENT; |
696 | debug_map->r_ldbase = dyn_loff; |
697 | _dl_debug_map = debug_map; |
698 | #ifdef __mips__ |
699 | relro_addr = exe_obj->relro_addr; |
700 | if (dynp->d_tag == DT_DEBUG21 && |
701 | ((Elf_AddrElf64_Addr)map_link + sizeof(*map_link) <= relro_addr || |
702 | (Elf_AddrElf64_Addr)map_link >= relro_addr + exe_obj->relro_size)) { |
703 | _dl_mprotect(map_link, sizeof(*map_link), |
704 | PROT_READ0x01|PROT_WRITE0x02); |
705 | *map_link = _dl_debug_map; |
706 | _dl_mprotect(map_link, sizeof(*map_link), |
707 | PROT_READ0x01|PROT_EXEC0x04); |
708 | } else |
709 | #endif |
710 | *map_link = _dl_debug_map; |
711 | } |
712 | |
713 | |
714 | /* |
715 | * Everything should be in place now for doing the relocation |
716 | * and binding. Call _dl_rtld to do the job. Fingers crossed. |
717 | */ |
718 | |
719 | failed = 0; |
720 | if (!_dl_traceld) |
721 | failed = _dl_rtld(_dl_objects); |
722 | |
723 | if (_dl_debug || _dl_traceld) { |
724 | if (_dl_traceld) |
725 | _dl_pledge("stdio rpath", NULL((void *)0)); |
726 | _dl_show_objects(NULL((void *)0)); |
727 | } |
728 | |
729 | DL_DEB(("dynamic loading done, %s.\n",do { if (_dl_debug) _dl_printf ("dynamic loading done, %s.\n" , (failed == 0) ? "success":"failed") ; } while (0) |
730 | (failed == 0) ? "success":"failed"))do { if (_dl_debug) _dl_printf ("dynamic loading done, %s.\n" , (failed == 0) ? "success":"failed") ; } while (0); |
731 | |
732 | if (failed != 0) |
733 | _dl_die("relocation failed"); |
734 | |
735 | if (_dl_traceld) |
736 | _dl_exit(0); |
737 | |
738 | _dl_loading_object = NULL((void *)0); |
739 | |
740 | /* set up the TIB for the initial thread */ |
741 | _dl_allocate_first_tib(); |
742 | |
743 | _dl_fixup_user_env(); |
744 | |
745 | _dl_debug_state(); |
746 | |
747 | /* |
748 | * Do not run init code if run from ldd. |
749 | */ |
750 | if (_dl_objects->next != NULL((void *)0)) { |
751 | _dl_call_preinit(_dl_objects); |
752 | _dl_call_init(_dl_objects); |
753 | } |
754 | |
755 | DL_DEB(("entry point: 0x%lx\n", dl_data[AUX_entry]))do { if (_dl_debug) _dl_printf ("entry point: 0x%lx\n", dl_data [AUX_entry]) ; } while (0); |
756 | |
757 | /* |
758 | * Return the entry point. |
759 | */ |
760 | return(dl_data[AUX_entry]); |
761 | } |
762 | |
763 | int |
764 | _dl_rtld(elf_object_t *object) |
765 | { |
766 | struct load_list *llist; |
767 | int fails = 0; |
768 | |
769 | if (object->next) |
770 | fails += _dl_rtld(object->next); |
Value stored to 'fails' is never read | |
771 | |
772 | if (object->status & STAT_RELOC_DONE0x001) |
773 | return 0; |
774 | |
775 | /* |
776 | * Do relocation information first, then GOT. |
777 | */ |
778 | unprotect_if_textrel(object); |
779 | _dl_rreloc(object); |
780 | fails =_dl_md_reloc(object, DT_REL17, DT_RELSZ18); |
781 | fails += _dl_md_reloc(object, DT_RELA7, DT_RELASZ8); |
782 | reprotect_if_textrel(object); |
783 | |
784 | /* |
785 | * We do lazy resolution by default, doing eager resolution if |
786 | * - the object requests it with -znow, OR |
787 | * - LD_BIND_NOW is set and this object isn't being ltraced |
788 | * |
789 | * Note that -znow disables ltrace for the object: on at least |
790 | * amd64 'ld' doesn't generate the trampoline for lazy relocation |
791 | * when -znow is used. |
792 | */ |
793 | fails += _dl_md_reloc_got(object, !(object->obj_flags & DF_1_NOW0x00000001) && |
794 | !(_dl_bindnow && !object->traced)); |
795 | |
796 | /* |
797 | * Look for W&X segments and make them read-only. |
798 | */ |
799 | for (llist = object->load_list; llist != NULL((void *)0); llist = llist->next) { |
800 | if ((llist->prot & PROT_WRITE0x02) && (llist->prot & PROT_EXEC0x04)) { |
801 | _dl_mprotect(llist->start, llist->size, |
802 | llist->prot & ~PROT_WRITE0x02); |
803 | } |
804 | } |
805 | |
806 | /* |
807 | * TEXTREL binaries are loaded without immutable on un-writeable sections. |
808 | * After text relocations are finished, these regions can become |
809 | * immutable. OPENBSD_MUTABLE section always overlaps writeable LOADs, |
810 | * so don't be afraid. |
811 | */ |
812 | if (object->dynDyn.u.textrel) { |
813 | for (llist = object->load_list; llist != NULL((void *)0); llist = llist->next) |
814 | if ((llist->prot & PROT_WRITE0x02) == 0) |
815 | _dl_mimmutable(llist->start, llist->size); |
816 | } |
817 | |
818 | if (fails == 0) |
819 | object->status |= STAT_RELOC_DONE0x001; |
820 | |
821 | return (fails); |
822 | } |
823 | |
824 | void |
825 | _dl_call_preinit(elf_object_t *object) |
826 | { |
827 | if (object->dynDyn.u.preinit_array) { |
828 | int num = object->dynDyn.u.preinit_arraysz / sizeof(Elf_AddrElf64_Addr); |
829 | int i; |
830 | |
831 | DL_DEB(("doing preinitarray obj %p @%p: [%s]\n",do { if (_dl_debug) _dl_printf ("doing preinitarray obj %p @%p: [%s]\n" , object, object->Dyn.u.preinit_array, object->load_name ) ; } while (0) |
832 | object, object->dyn.preinit_array, object->load_name))do { if (_dl_debug) _dl_printf ("doing preinitarray obj %p @%p: [%s]\n" , object, object->Dyn.u.preinit_array, object->load_name ) ; } while (0); |
833 | for (i = 0; i < num; i++) |
834 | (*object->dynDyn.u.preinit_array[i])(_dl_argc, _dl_argv, |
835 | environ, &_dl_cb_cb); |
836 | } |
837 | } |
838 | |
839 | void |
840 | _dl_call_init(elf_object_t *object) |
841 | { |
842 | _dl_call_init_recurse(object, 1); |
843 | _dl_call_init_recurse(object, 0); |
844 | } |
845 | |
846 | static void |
847 | _dl_relro(elf_object_t *object) |
848 | { |
849 | /* |
850 | * Handle GNU_RELRO |
851 | */ |
852 | if (object->relro_addr != 0 && object->relro_size != 0) { |
853 | Elf_AddrElf64_Addr addr = object->relro_addr; |
854 | |
855 | DL_DEB(("protect RELRO [0x%lx,0x%lx) in %s\n",do { if (_dl_debug) _dl_printf ("protect RELRO [0x%lx,0x%lx) in %s\n" , addr, addr + object->relro_size, object->load_name) ; } while (0) |
856 | addr, addr + object->relro_size, object->load_name))do { if (_dl_debug) _dl_printf ("protect RELRO [0x%lx,0x%lx) in %s\n" , addr, addr + object->relro_size, object->load_name) ; } while (0); |
857 | _dl_mprotect((void *)addr, object->relro_size, PROT_READ0x01); |
858 | |
859 | /* if library will never be unloaded, RELRO can be immutable */ |
860 | if (object->nodelete) |
861 | _dl_mimmutable((void *)addr, object->relro_size); |
862 | } |
863 | } |
864 | |
865 | void |
866 | _dl_call_init_recurse(elf_object_t *object, int initfirst) |
867 | { |
868 | struct object_vector vec; |
869 | int visited_flag = initfirst ? STAT_VISIT_INITFIRST0x100 : STAT_VISIT_INIT0x200; |
870 | int i; |
871 | |
872 | object->status |= visited_flag; |
873 | |
874 | for (vec = object->child_vec, i = 0; i < vec.len; i++) { |
875 | if (vec.vec[i]->status & visited_flag) |
876 | continue; |
877 | _dl_call_init_recurse(vec.vec[i], initfirst); |
878 | } |
879 | |
880 | if (object->status & STAT_INIT_DONE0x004) |
881 | return; |
882 | |
883 | if (initfirst && (object->obj_flags & DF_1_INITFIRST0x00000020) == 0) |
884 | return; |
885 | |
886 | if (!initfirst) { |
887 | _dl_relro(object); |
888 | _dl_apply_immutable(object); |
889 | } |
890 | |
891 | if (object->dynDyn.u.init) { |
892 | DL_DEB(("doing ctors obj %p @%p: [%s]\n",do { if (_dl_debug) _dl_printf ("doing ctors obj %p @%p: [%s]\n" , object, object->Dyn.u.init, object->load_name) ; } while (0) |
893 | object, object->dyn.init, object->load_name))do { if (_dl_debug) _dl_printf ("doing ctors obj %p @%p: [%s]\n" , object, object->Dyn.u.init, object->load_name) ; } while (0); |
894 | (*object->dynDyn.u.init)(); |
895 | } |
896 | |
897 | if (object->dynDyn.u.init_array) { |
898 | int num = object->dynDyn.u.init_arraysz / sizeof(Elf_AddrElf64_Addr); |
899 | int i; |
900 | |
901 | DL_DEB(("doing initarray obj %p @%p: [%s]\n",do { if (_dl_debug) _dl_printf ("doing initarray obj %p @%p: [%s]\n" , object, object->Dyn.u.init_array, object->load_name) ; } while (0) |
902 | object, object->dyn.init_array, object->load_name))do { if (_dl_debug) _dl_printf ("doing initarray obj %p @%p: [%s]\n" , object, object->Dyn.u.init_array, object->load_name) ; } while (0); |
903 | for (i = 0; i < num; i++) |
904 | (*object->dynDyn.u.init_array[i])(_dl_argc, _dl_argv, |
905 | environ, &_dl_cb_cb); |
906 | } |
907 | |
908 | if (initfirst) { |
909 | _dl_relro(object); |
910 | _dl_apply_immutable(object); |
911 | } |
912 | |
913 | object->status |= STAT_INIT_DONE0x004; |
914 | } |
915 | |
916 | char * |
917 | _dl_getenv(const char *var, char **env) |
918 | { |
919 | const char *ep; |
920 | |
921 | while ((ep = *env++)) { |
922 | const char *vp = var; |
923 | |
924 | while (*vp && *vp == *ep) { |
925 | vp++; |
926 | ep++; |
927 | } |
928 | if (*vp == '\0' && *ep++ == '=') |
929 | return((char *)ep); |
930 | } |
931 | return(NULL((void *)0)); |
932 | } |
933 | |
934 | void |
935 | _dl_unsetenv(const char *var, char **env) |
936 | { |
937 | char *ep; |
938 | |
939 | while ((ep = *env)) { |
940 | const char *vp = var; |
941 | |
942 | while (*vp && *vp == *ep) { |
943 | vp++; |
944 | ep++; |
945 | } |
946 | if (*vp == '\0' && *ep++ == '=') { |
947 | char **P; |
948 | |
949 | for (P = env;; ++P) |
950 | if (!(*P = *(P + 1))) |
951 | break; |
952 | } else |
953 | env++; |
954 | } |
955 | } |
956 | |
957 | static inline void |
958 | fixup_sym(struct elf_object *dummy_obj, const char *name, void *addr) |
959 | { |
960 | struct sym_res sr; |
961 | |
962 | sr = _dl_find_symbol(name, SYM_SEARCH_ALL0x00|SYM_NOWARNNOTFOUND0x00|SYM_PLT0x20, |
963 | NULL((void *)0), dummy_obj); |
964 | if (sr.sym != NULL((void *)0)) { |
965 | void *p = (void *)(sr.sym->st_value + sr.obj->obj_base); |
966 | if (p != addr) { |
967 | DL_DEB(("setting %s %p@%s[%p] from %p\n", name,do { if (_dl_debug) _dl_printf ("setting %s %p@%s[%p] from %p\n" , name, p, sr.obj->load_name, (void *)sr.obj, addr) ; } while (0) |
968 | p, sr.obj->load_name, (void *)sr.obj, addr))do { if (_dl_debug) _dl_printf ("setting %s %p@%s[%p] from %p\n" , name, p, sr.obj->load_name, (void *)sr.obj, addr) ; } while (0); |
969 | *(void **)p = *(void **)addr; |
970 | } |
971 | } |
972 | } |
973 | |
974 | /* |
975 | * _dl_fixup_user_env() |
976 | * |
977 | * Set the user environment so that programs can use the environment |
978 | * while running constructors. Specifically, MALLOC_OPTIONS= for malloc() |
979 | */ |
980 | void |
981 | _dl_fixup_user_env(void) |
982 | { |
983 | struct elf_object dummy_obj; |
984 | |
985 | dummy_obj.dynDyn.u.symbolic = 0; |
986 | dummy_obj.load_name = "ld.so"; |
987 | fixup_sym(&dummy_obj, "environ", &environ); |
988 | fixup_sym(&dummy_obj, "__progname", &__progname); |
989 | } |
990 | |
991 | const void * |
992 | _dl_cb_cb(int version) |
993 | { |
994 | DL_DEB(("version %d callbacks requested\n", version))do { if (_dl_debug) _dl_printf ("version %d callbacks requested\n" , version) ; } while (0); |
995 | if (version == 0) |
996 | return &callbacks_0; |
997 | return NULL((void *)0); |
998 | } |
999 | |
1000 | static inline void |
1001 | unprotect_if_textrel(elf_object_t *object) |
1002 | { |
1003 | struct load_list *ll; |
1004 | |
1005 | if (__predict_false(object->dyn.textrel == 1)__builtin_expect(((object->Dyn.u.textrel == 1) != 0), 0)) { |
1006 | for (ll = object->load_list; ll != NULL((void *)0); ll = ll->next) { |
1007 | if ((ll->prot & PROT_WRITE0x02) == 0) |
1008 | _dl_mprotect(ll->start, ll->size, |
1009 | PROT_READ0x01 | PROT_WRITE0x02); |
1010 | } |
1011 | } |
1012 | } |
1013 | |
1014 | static inline void |
1015 | reprotect_if_textrel(elf_object_t *object) |
1016 | { |
1017 | struct load_list *ll; |
1018 | |
1019 | if (__predict_false(object->dyn.textrel == 1)__builtin_expect(((object->Dyn.u.textrel == 1) != 0), 0)) { |
1020 | for (ll = object->load_list; ll != NULL((void *)0); ll = ll->next) { |
1021 | if ((ll->prot & PROT_WRITE0x02) == 0) |
1022 | _dl_mprotect(ll->start, ll->size, ll->prot); |
1023 | } |
1024 | } |
1025 | } |
1026 | |
1027 | static void |
1028 | _dl_rreloc(elf_object_t *object) |
1029 | { |
1030 | const Elf_RelrElf64_Relr *reloc, *rend; |
1031 | Elf_AddrElf64_Addr loff = object->obj_base; |
1032 | |
1033 | reloc = object->dynDyn.u.relr; |
1034 | rend = (const Elf_RelrElf64_Relr *)((char *)reloc + object->dynDyn.u.relrsz); |
1035 | |
1036 | while (reloc < rend) { |
1037 | Elf_AddrElf64_Addr *where; |
1038 | |
1039 | where = (Elf_AddrElf64_Addr *)(*reloc + loff); |
1040 | *where++ += loff; |
1041 | |
1042 | for (reloc++; reloc < rend && (*reloc & 1); reloc++) { |
1043 | Elf_AddrElf64_Addr bits = *reloc >> 1; |
1044 | |
1045 | Elf_AddrElf64_Addr *here = where; |
1046 | while (bits != 0) { |
1047 | if (bits & 1) { |
1048 | *here += loff; |
1049 | } |
1050 | bits >>= 1; |
1051 | here++; |
1052 | } |
1053 | where += (8 * sizeof *reloc) - 1; |
1054 | } |
1055 | } |
1056 | } |
1057 | |
1058 | void |
1059 | _dl_push_range(struct range_vector *v, vaddr_t s, vaddr_t e) |
1060 | { |
1061 | int i = v->count; |
1062 | |
1063 | if (i == nitems(v->slice)(sizeof((v->slice)) / sizeof((v->slice)[0]))) { |
1064 | _dl_die("too many ranges"); |
1065 | } |
1066 | /* Skips the empty ranges (s == e). */ |
1067 | if (s < e) { |
1068 | v->slice[i].start = s; |
1069 | v->slice[i].end = e; |
1070 | v->count++; |
1071 | } else if (s > e) { |
1072 | _dl_die("invalid range"); |
1073 | } |
1074 | } |
1075 | |
1076 | void |
1077 | _dl_push_range_size(struct range_vector *v, vaddr_t s, vsize_t size) |
1078 | { |
1079 | _dl_push_range(v, s, s + size); |
1080 | } |
1081 | |
1082 | /* |
1083 | * Finds the truly immutable ranges by taking mutable ones out. Implements |
1084 | * interval difference of imut and mut. Interval splitting necessitates |
1085 | * intermediate storage and complex double buffering. |
1086 | */ |
1087 | void |
1088 | _dl_apply_immutable(elf_object_t *object) |
1089 | { |
1090 | struct range_vector acc[2]; /* flips out to avoid copying */ |
1091 | struct addr_range *m, *im; |
1092 | int i, j, imut, in, out; |
1093 | |
1094 | if (object->obj_type != OBJTYPE_LIB3) |
1095 | return; |
1096 | |
1097 | for (imut = 0; imut < object->imut.count; imut++) { |
1098 | im = &object->imut.slice[imut]; |
1099 | out = 0; |
1100 | acc[out].count = 0; |
1101 | _dl_push_range(&acc[out], im->start, im->end); |
1102 | |
1103 | for (i = 0; i < object->mut.count; i++) { |
1104 | m = &object->mut.slice[i]; |
1105 | in = out; |
1106 | out = 1 - in; |
1107 | acc[out].count = 0; |
1108 | for (j = 0; j < acc[in].count; j++) { |
1109 | const vaddr_t ms = m->start, me = m->end; |
1110 | const vaddr_t is = acc[in].slice[j].start, |
1111 | ie = acc[in].slice[j].end; |
1112 | if (ie <= ms || me <= is) { |
1113 | /* is .. ie .. ms .. me -> is .. ie */ |
1114 | /* ms .. me .. is .. ie -> is .. ie */ |
1115 | _dl_push_range(&acc[out], is, ie); |
1116 | } else if (ms <= is && ie <= me) { |
1117 | /* PROVIDED: ms < ie && is < me */ |
1118 | /* ms .. is .. ie .. me -> [] */ |
1119 | ; |
1120 | } else if (ie <= me) { |
1121 | /* is .. ms .. ie .. me -> is .. ms */ |
1122 | _dl_push_range(&acc[out], is, ms); |
1123 | } else if (is < ms) { |
1124 | /* is .. ms .. me .. ie -> is .. ms */ |
1125 | _dl_push_range(&acc[out], is, ms); |
1126 | _dl_push_range(&acc[out], me, ie); |
1127 | } else { |
1128 | /* ms .. is .. me .. ie -> me .. ie */ |
1129 | _dl_push_range(&acc[out], me, ie); |
1130 | } |
1131 | } |
1132 | } |
1133 | |
1134 | /* and now, install immutability for objects */ |
1135 | for (i = 0; i < acc[out].count; i++) { |
1136 | const struct addr_range *ar = &acc[out].slice[i]; |
1137 | _dl_mimmutable((void *)ar->start, ar->end - ar->start); |
1138 | } |
1139 | |
1140 | } |
1141 | } |