Bug Summary

File:src/lib/libc/stdlib/malloc.c
Warning:line 1803, column 14
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name malloc.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/lib/libc/obj -resource-dir /usr/local/lib/clang/13.0.0 -include namespace.h -I /usr/src/lib/libc/include -I /usr/src/lib/libc/hidden -D __LIBC__ -D APIWARN -D YP -I /usr/src/lib/libc/yp -I /usr/src/lib/libc -I /usr/src/lib/libc/gdtoa -I /usr/src/lib/libc/arch/amd64/gdtoa -D INFNAN_CHECK -D MULTIPLE_THREADS -D NO_FENV_H -D USE_LOCALE -I /usr/src/lib/libc -I /usr/src/lib/libc/citrus -D RESOLVSORT -D FLOATING_POINT -D PRINTF_WIDE_CHAR -D SCANF_WIDE_CHAR -D FUTEX -D PIC -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/lib/libc/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/lib/libc/stdlib/malloc.c
1/* $OpenBSD: malloc.c,v 1.272 2021/09/19 09:15:22 tb Exp $ */
2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * If we meet some day, and you think this stuff is worth it, you
23 * can buy me a beer in return. Poul-Henning Kamp
24 */
25
26/* #define MALLOC_STATS */
27
28#include <sys/types.h>
29#include <sys/queue.h>
30#include <sys/mman.h>
31#include <sys/sysctl.h>
32#include <uvm/uvmexp.h>
33#include <errno(*__errno()).h>
34#include <stdarg.h>
35#include <stdint.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <string.h>
39#include <unistd.h>
40
41#ifdef MALLOC_STATS
42#include <sys/tree.h>
43#include <fcntl.h>
44#endif
45
46#include "thread_private.h"
47#include <tib.h>
48
49#define MALLOC_PAGESHIFT12 _MAX_PAGE_SHIFT12
50
51#define MALLOC_MINSHIFT4 4
52#define MALLOC_MAXSHIFT(12 - 1) (MALLOC_PAGESHIFT12 - 1)
53#define MALLOC_PAGESIZE(1UL << 12) (1UL << MALLOC_PAGESHIFT12)
54#define MALLOC_MINSIZE(1UL << 4) (1UL << MALLOC_MINSHIFT4)
55#define MALLOC_PAGEMASK((1UL << 12) - 1) (MALLOC_PAGESIZE(1UL << 12) - 1)
56#define MASK_POINTER(p)((void *)(((uintptr_t)(p)) & ~((1UL << 12) - 1))) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK((1UL << 12) - 1)))
57
58#define MALLOC_MAXCHUNK(1 << (12 - 1)) (1 << MALLOC_MAXSHIFT(12 - 1))
59#define MALLOC_MAXCACHE256 256
60#define MALLOC_DELAYED_CHUNK_MASK15 15
61#ifdef MALLOC_STATS
62#define MALLOC_INITIAL_REGIONS((1UL << 12) / sizeof(struct region_info)) 512
63#else
64#define MALLOC_INITIAL_REGIONS((1UL << 12) / sizeof(struct region_info)) (MALLOC_PAGESIZE(1UL << 12) / sizeof(struct region_info))
65#endif
66#define MALLOC_DEFAULT_CACHE64 64
67#define MALLOC_CHUNK_LISTS4 4
68#define CHUNK_CHECK_LENGTH32 32
69
70/*
71 * We move allocations between half a page and a whole page towards the end,
72 * subject to alignment constraints. This is the extra headroom we allow.
73 * Set to zero to be the most strict.
74 */
75#define MALLOC_LEEWAY0 0
76#define MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL <<
12) - 0)
((sz) - moptsmalloc_readonly.mopts.malloc_guard < \
77 MALLOC_PAGESIZE(1UL << 12) - MALLOC_LEEWAY0)
78#define MALLOC_MOVE(p, sz)(((char *)(p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly
.mopts.malloc_guard)) & ~((1UL << 4) - 1)))
(((char *)(p)) + \
79 ((MALLOC_PAGESIZE(1UL << 12) - MALLOC_LEEWAY0 - \
80 ((sz) - moptsmalloc_readonly.mopts.malloc_guard)) & \
81 ~(MALLOC_MINSIZE(1UL << 4) - 1)))
82
83#define PAGEROUND(x)(((x) + (((1UL << 12) - 1))) & ~((1UL << 12) -
1))
(((x) + (MALLOC_PAGEMASK((1UL << 12) - 1))) & ~MALLOC_PAGEMASK((1UL << 12) - 1))
84
85/*
86 * What to use for Junk. This is the byte value we use to fill with
87 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88 * and SOME_FREEJUNK right before free.
89 */
90#define SOME_JUNK0xdb 0xdb /* deadbeef */
91#define SOME_FREEJUNK0xdf 0xdf /* dead, free */
92#define SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL 0xdfdfdfdfdfdfdfdfULL
93
94#define MMAP(sz,f)mmap(((void*)0), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (f), -1
, 0)
mmap(NULL((void*)0), (sz), PROT_READ0x01 | PROT_WRITE0x02, \
95 MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0)
96
97#define MMAPNONE(sz,f)mmap(((void*)0), (sz), 0x00, 0x1000 | 0x0002 | (f), -1, 0) mmap(NULL((void*)0), (sz), PROT_NONE0x00, \
98 MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0)
99
100#define MMAPA(a,sz,f)mmap((a), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (f), -1, 0) mmap((a), (sz), PROT_READ0x01 | PROT_WRITE0x02, \
101 MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0)
102
103#define MQUERY(a,sz,f)mquery((a), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | 0x0010 | (f)
, -1, 0)
mquery((a), (sz), PROT_READ0x01 | PROT_WRITE0x02, \
104 MAP_ANON0x1000 | MAP_PRIVATE0x0002 | MAP_FIXED0x0010 | (f), -1, 0)
105
106struct region_info {
107 void *p; /* page; low bits used to mark chunks */
108 uintptr_t size; /* size for pages, or chunk_info pointer */
109#ifdef MALLOC_STATS
110 void *f; /* where allocated from */
111#endif
112};
113
114LIST_HEAD(chunk_head, chunk_info)struct chunk_head { struct chunk_info *lh_first; };
115
116#define MAX_CACHEABLE_SIZE32 32
117struct cache {
118 void *pages[MALLOC_MAXCACHE256];
119 ushort length;
120 ushort max;
121};
122
123struct dir_info {
124 u_int32_t canary1;
125 int active; /* status of malloc */
126 struct region_info *r; /* region slots */
127 size_t regions_total; /* number of region slots */
128 size_t regions_free; /* number of free slots */
129 size_t rbytesused; /* random bytes used */
130 char *func; /* current function */
131 int malloc_junk; /* junk fill? */
132 int mmap_flag; /* extra flag for mmap */
133 int mutex;
134 /* lists of free chunk info structs */
135 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT(12 - 1) + 1];
136 /* lists of chunks with free slots */
137 struct chunk_head chunk_dir[MALLOC_MAXSHIFT(12 - 1) + 1][MALLOC_CHUNK_LISTS4];
138 /* delayed free chunk slots */
139 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK15 + 1];
140 u_char rbytes[32]; /* random bytes */
141 /* free pages cache */
142 struct cache cache[MAX_CACHEABLE_SIZE32];
143#ifdef MALLOC_STATS
144 size_t inserts;
145 size_t insert_collisions;
146 size_t finds;
147 size_t find_collisions;
148 size_t deletes;
149 size_t delete_moves;
150 size_t cheap_realloc_tries;
151 size_t cheap_reallocs;
152 size_t malloc_used; /* bytes allocated */
153 size_t malloc_guarded; /* bytes used for guards */
154 size_t pool_searches; /* searches for pool */
155 size_t other_pool; /* searches in other pool */
156#define STATS_ADD(x,y) ((x) += (y))
157#define STATS_SUB(x,y) ((x) -= (y))
158#define STATS_INC(x) ((x)++)
159#define STATS_ZERO(x) ((x) = 0)
160#define STATS_SETF(x,y) ((x)->f = (y))
161#else
162#define STATS_ADD(x,y) /* nothing */
163#define STATS_SUB(x,y) /* nothing */
164#define STATS_INC(x) /* nothing */
165#define STATS_ZERO(x) /* nothing */
166#define STATS_SETF(x,y) /* nothing */
167#endif /* MALLOC_STATS */
168 u_int32_t canary2;
169};
170#define DIR_INFO_RSZ((sizeof(struct dir_info) + ((1UL << 12) - 1)) & ~(
(1UL << 12) - 1))
((sizeof(struct dir_info) + MALLOC_PAGEMASK((1UL << 12) - 1)) & \
171 ~MALLOC_PAGEMASK((1UL << 12) - 1))
172
173static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
174
175/*
176 * This structure describes a page worth of chunks.
177 *
178 * How many bits per u_short in the bitmap
179 */
180#define MALLOC_BITS(8 * sizeof(u_short)) (NBBY8 * sizeof(u_short))
181struct chunk_info {
182 LIST_ENTRY(chunk_info)struct { struct chunk_info *le_next; struct chunk_info **le_prev
; }
entries;
183 void *page; /* pointer to the page */
184 u_short canary;
185 u_short size; /* size of this page's chunks */
186 u_short shift; /* how far to shift for this size */
187 u_short free; /* how many free chunks */
188 u_short total; /* how many chunks */
189 u_short offset; /* requested size table offset */
190 u_short bits[1]; /* which chunks are free */
191};
192
193struct malloc_readonly {
194 /* Main bookkeeping information */
195 struct dir_info *malloc_pool[_MALLOC_MUTEXES32];
196 u_int malloc_mutexes; /* how much in actual use? */
197 int malloc_mt; /* multi-threaded mode? */
198 int malloc_freecheck; /* Extensive double free check */
199 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */
200 int def_malloc_junk; /* junk fill? */
201 int malloc_realloc; /* always realloc? */
202 int malloc_xmalloc; /* xmalloc behaviour? */
203 u_int chunk_canaries; /* use canaries after chunks? */
204 int internal_funcs; /* use better recallocarray/freezero? */
205 u_int def_maxcache; /* free pages we cache */
206 size_t malloc_guard; /* use guard pages after allocations? */
207#ifdef MALLOC_STATS
208 int malloc_stats; /* dump statistics at end */
209#endif
210 u_int32_t malloc_canary; /* Matched against ones in malloc_pool */
211};
212
213/* This object is mapped PROT_READ after initialisation to prevent tampering */
214static union {
215 struct malloc_readonly moptsmalloc_readonly.mopts;
216 u_char _pad[MALLOC_PAGESIZE(1UL << 12)];
217} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE(1UL << 12))));
218#define moptsmalloc_readonly.mopts malloc_readonly.moptsmalloc_readonly.mopts
219
220char *malloc_options; /* compile-time options */
221
222static __dead__attribute__((__noreturn__)) void wrterror(struct dir_info *d, char *msg, ...)
223 __attribute__((__format__ (printf, 2, 3)));
224
225#ifdef MALLOC_STATS
226void malloc_dump(int, int, struct dir_info *);
227PROTO_NORMAL(malloc_dump)__attribute__((__visibility__("hidden"))) typeof(malloc_dump)
malloc_dump asm("_libc_" "malloc_dump")
;
228void malloc_gdump(int);
229PROTO_NORMAL(malloc_gdump)__attribute__((__visibility__("hidden"))) typeof(malloc_gdump
) malloc_gdump asm("_libc_" "malloc_gdump")
;
230static void malloc_exit(void);
231#define CALLER((void*)0) __builtin_return_address(0)
232#else
233#define CALLER((void*)0) NULL((void*)0)
234#endif
235
236/* low bits of r->p determine size: 0 means >= page size and r->size holding
237 * real size, otherwise low bits are a shift count, or 1 for malloc(0)
238 */
239#define REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz
) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << (
(sz)-1))))
\
240 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK((1UL << 12) - 1), \
241 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
242
243static inline void
244_MALLOC_LEAVE(struct dir_info *d)
245{
246 if (moptsmalloc_readonly.mopts.malloc_mt) {
247 d->active--;
248 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
249 }
250}
251
252static inline void
253_MALLOC_ENTER(struct dir_info *d)
254{
255 if (moptsmalloc_readonly.mopts.malloc_mt) {
256 _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex)
; } while (0)
;
257 d->active++;
258 }
259}
260
261static inline size_t
262hash(void *p)
263{
264 size_t sum;
265 uintptr_t u;
266
267 u = (uintptr_t)p >> MALLOC_PAGESHIFT12;
268 sum = u;
269 sum = (sum << 7) - sum + (u >> 16);
270#ifdef __LP64__1
271 sum = (sum << 7) - sum + (u >> 32);
272 sum = (sum << 7) - sum + (u >> 48);
273#endif
274 return sum;
275}
276
277static inline struct dir_info *
278getpool(void)
279{
280 if (!moptsmalloc_readonly.mopts.malloc_mt)
281 return moptsmalloc_readonly.mopts.malloc_pool[1];
282 else /* first one reserved for special pool */
283 return moptsmalloc_readonly.mopts.malloc_pool[1 + TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof
(struct tib, __tib_self) + 0)))
->tib_tid %
284 (moptsmalloc_readonly.mopts.malloc_mutexes - 1)];
285}
286
287static __dead__attribute__((__noreturn__)) void
288wrterror(struct dir_info *d, char *msg, ...)
289{
290 int saved_errno = errno(*__errno());
291 va_list ap;
292
293 dprintf(STDERR_FILENO2, "%s(%d) in %s(): ", __progname,
294 getpid(), (d != NULL((void*)0) && d->func) ? d->func : "unknown");
295 va_start(ap, msg)__builtin_va_start(ap, msg);
296 vdprintf(STDERR_FILENO2, msg, ap);
297 va_end(ap)__builtin_va_end(ap);
298 dprintf(STDERR_FILENO2, "\n");
299
300#ifdef MALLOC_STATS
301 if (moptsmalloc_readonly.mopts.malloc_stats)
302 malloc_gdump(STDERR_FILENO2);
303#endif /* MALLOC_STATS */
304
305 errno(*__errno()) = saved_errno;
306
307 abort();
308}
309
310static void
311rbytes_init(struct dir_info *d)
312{
313 arc4random_buf(d->rbytes, sizeof(d->rbytes));
314 /* add 1 to account for using d->rbytes[0] */
315 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
316}
317
318static inline u_char
319getrbyte(struct dir_info *d)
320{
321 u_char x;
322
323 if (d->rbytesused >= sizeof(d->rbytes))
324 rbytes_init(d);
325 x = d->rbytes[d->rbytesused++];
326 return x;
327}
328
329static void
330omalloc_parseopt(char opt)
331{
332 switch (opt) {
333 case '+':
334 moptsmalloc_readonly.mopts.malloc_mutexes <<= 1;
335 if (moptsmalloc_readonly.mopts.malloc_mutexes > _MALLOC_MUTEXES32)
336 moptsmalloc_readonly.mopts.malloc_mutexes = _MALLOC_MUTEXES32;
337 break;
338 case '-':
339 moptsmalloc_readonly.mopts.malloc_mutexes >>= 1;
340 if (moptsmalloc_readonly.mopts.malloc_mutexes < 2)
341 moptsmalloc_readonly.mopts.malloc_mutexes = 2;
342 break;
343 case '>':
344 moptsmalloc_readonly.mopts.def_maxcache <<= 1;
345 if (moptsmalloc_readonly.mopts.def_maxcache > MALLOC_MAXCACHE256)
346 moptsmalloc_readonly.mopts.def_maxcache = MALLOC_MAXCACHE256;
347 break;
348 case '<':
349 moptsmalloc_readonly.mopts.def_maxcache >>= 1;
350 break;
351 case 'c':
352 moptsmalloc_readonly.mopts.chunk_canaries = 0;
353 break;
354 case 'C':
355 moptsmalloc_readonly.mopts.chunk_canaries = 1;
356 break;
357#ifdef MALLOC_STATS
358 case 'd':
359 moptsmalloc_readonly.mopts.malloc_stats = 0;
360 break;
361 case 'D':
362 moptsmalloc_readonly.mopts.malloc_stats = 1;
363 break;
364#endif /* MALLOC_STATS */
365 case 'f':
366 moptsmalloc_readonly.mopts.malloc_freecheck = 0;
367 moptsmalloc_readonly.mopts.malloc_freeunmap = 0;
368 break;
369 case 'F':
370 moptsmalloc_readonly.mopts.malloc_freecheck = 1;
371 moptsmalloc_readonly.mopts.malloc_freeunmap = 1;
372 break;
373 case 'g':
374 moptsmalloc_readonly.mopts.malloc_guard = 0;
375 break;
376 case 'G':
377 moptsmalloc_readonly.mopts.malloc_guard = MALLOC_PAGESIZE(1UL << 12);
378 break;
379 case 'j':
380 if (moptsmalloc_readonly.mopts.def_malloc_junk > 0)
381 moptsmalloc_readonly.mopts.def_malloc_junk--;
382 break;
383 case 'J':
384 if (moptsmalloc_readonly.mopts.def_malloc_junk < 2)
385 moptsmalloc_readonly.mopts.def_malloc_junk++;
386 break;
387 case 'r':
388 moptsmalloc_readonly.mopts.malloc_realloc = 0;
389 break;
390 case 'R':
391 moptsmalloc_readonly.mopts.malloc_realloc = 1;
392 break;
393 case 'u':
394 moptsmalloc_readonly.mopts.malloc_freeunmap = 0;
395 break;
396 case 'U':
397 moptsmalloc_readonly.mopts.malloc_freeunmap = 1;
398 break;
399 case 'x':
400 moptsmalloc_readonly.mopts.malloc_xmalloc = 0;
401 break;
402 case 'X':
403 moptsmalloc_readonly.mopts.malloc_xmalloc = 1;
404 break;
405 default:
406 dprintf(STDERR_FILENO2, "malloc() warning: "
407 "unknown char in MALLOC_OPTIONS\n");
408 break;
409 }
410}
411
412static void
413omalloc_init(void)
414{
415 char *p, *q, b[16];
416 int i, j;
417 const int mib[2] = { CTL_VM2, VM_MALLOC_CONF12 };
418 size_t sb;
419
420 /*
421 * Default options
422 */
423 moptsmalloc_readonly.mopts.malloc_mutexes = 8;
424 moptsmalloc_readonly.mopts.def_malloc_junk = 1;
425 moptsmalloc_readonly.mopts.def_maxcache = MALLOC_DEFAULT_CACHE64;
426
427 for (i = 0; i < 3; i++) {
428 switch (i) {
429 case 0:
430 sb = sizeof(b);
431 j = sysctl(mib, 2, b, &sb, NULL((void*)0), 0);
432 if (j != 0)
433 continue;
434 p = b;
435 break;
436 case 1:
437 if (issetugid() == 0)
438 p = getenv("MALLOC_OPTIONS");
439 else
440 continue;
441 break;
442 case 2:
443 p = malloc_options;
444 break;
445 default:
446 p = NULL((void*)0);
447 }
448
449 for (; p != NULL((void*)0) && *p != '\0'; p++) {
450 switch (*p) {
451 case 'S':
452 for (q = "CFGJ"; *q != '\0'; q++)
453 omalloc_parseopt(*q);
454 moptsmalloc_readonly.mopts.def_maxcache = 0;
455 break;
456 case 's':
457 for (q = "cfgj"; *q != '\0'; q++)
458 omalloc_parseopt(*q);
459 moptsmalloc_readonly.mopts.def_maxcache = MALLOC_DEFAULT_CACHE64;
460 break;
461 default:
462 omalloc_parseopt(*p);
463 break;
464 }
465 }
466 }
467
468#ifdef MALLOC_STATS
469 if (moptsmalloc_readonly.mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
470 dprintf(STDERR_FILENO2, "malloc() warning: atexit(2) failed."
471 " Will not be able to dump stats on exit\n");
472 }
473#endif /* MALLOC_STATS */
474
475 while ((moptsmalloc_readonly.mopts.malloc_canary = arc4random()) == 0)
476 ;
477 if (moptsmalloc_readonly.mopts.chunk_canaries)
478 do {
479 moptsmalloc_readonly.mopts.chunk_canaries = arc4random();
480 } while ((u_char)moptsmalloc_readonly.mopts.chunk_canaries == 0 ||
481 (u_char)moptsmalloc_readonly.mopts.chunk_canaries == SOME_FREEJUNK0xdf);
482}
483
484static void
485omalloc_poolinit(struct dir_info **dp, int mmap_flag)
486{
487 char *p;
488 size_t d_avail, regioninfo_size;
489 struct dir_info *d;
490 int i, j;
491
492 /*
493 * Allocate dir_info with a guard page on either side. Also
494 * randomise offset inside the page at which the dir_info
495 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
496 */
497 if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)mmap(((void*)0), (((sizeof(struct dir_info) + ((1UL << 12
) - 1)) & ~((1UL << 12) - 1)) + ((1UL << 12) *
2)), 0x00, 0x1000 | 0x0002 | (mmap_flag), -1, 0)
) ==
498 MAP_FAILED((void *)-1))
499 wrterror(NULL((void*)0), "malloc init mmap failed");
500 mprotect(p + MALLOC_PAGESIZE(1UL << 12), DIR_INFO_RSZ((sizeof(struct dir_info) + ((1UL << 12) - 1)) & ~(
(1UL << 12) - 1))
, PROT_READ0x01 | PROT_WRITE0x02);
501 d_avail = (DIR_INFO_RSZ((sizeof(struct dir_info) + ((1UL << 12) - 1)) & ~(
(1UL << 12) - 1))
- sizeof(*d)) >> MALLOC_MINSHIFT4;
502 d = (struct dir_info *)(p + MALLOC_PAGESIZE(1UL << 12) +
503 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT4));
504
505 rbytes_init(d);
506 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS((1UL << 12) / sizeof(struct region_info));
507 regioninfo_size = d->regions_total * sizeof(struct region_info);
508 d->r = MMAP(regioninfo_size, mmap_flag)mmap(((void*)0), (regioninfo_size), 0x01 | 0x02, 0x1000 | 0x0002
| (mmap_flag), -1, 0)
;
509 if (d->r == MAP_FAILED((void *)-1)) {
510 d->regions_total = 0;
511 wrterror(NULL((void*)0), "malloc init mmap failed");
512 }
513 for (i = 0; i <= MALLOC_MAXSHIFT(12 - 1); i++) {
514 LIST_INIT(&d->chunk_info_list[i])do { ((&d->chunk_info_list[i])->lh_first) = ((void*
)0); } while (0)
;
515 for (j = 0; j < MALLOC_CHUNK_LISTS4; j++)
516 LIST_INIT(&d->chunk_dir[i][j])do { ((&d->chunk_dir[i][j])->lh_first) = ((void*)0)
; } while (0)
;
517 }
518 STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE);
519 d->mmap_flag = mmap_flag;
520 d->malloc_junk = moptsmalloc_readonly.mopts.def_malloc_junk;
521 d->canary1 = moptsmalloc_readonly.mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
522 d->canary2 = ~d->canary1;
523
524 *dp = d;
525}
526
527static int
528omalloc_grow(struct dir_info *d)
529{
530 size_t newtotal;
531 size_t newsize;
532 size_t mask;
533 size_t i, oldpsz;
534 struct region_info *p;
535
536 if (d->regions_total > SIZE_MAX0xffffffffffffffffUL / sizeof(struct region_info) / 2)
537 return 1;
538
539 newtotal = d->regions_total * 2;
540 newsize = PAGEROUND(newtotal * sizeof(struct region_info))(((newtotal * sizeof(struct region_info)) + (((1UL << 12
) - 1))) & ~((1UL << 12) - 1))
;
541 mask = newtotal - 1;
542
543 /* Don't use cache here, we don't want user uaf touch this */
544 p = MMAP(newsize, d->mmap_flag)mmap(((void*)0), (newsize), 0x01 | 0x02, 0x1000 | 0x0002 | (d
->mmap_flag), -1, 0)
;
545 if (p == MAP_FAILED((void *)-1))
546 return 1;
547
548 STATS_ADD(d->malloc_used, newsize);
549 STATS_ZERO(d->inserts);
550 STATS_ZERO(d->insert_collisions);
551 for (i = 0; i < d->regions_total; i++) {
552 void *q = d->r[i].p;
553 if (q != NULL((void*)0)) {
554 size_t index = hash(q) & mask;
555 STATS_INC(d->inserts);
556 while (p[index].p != NULL((void*)0)) {
557 index = (index - 1) & mask;
558 STATS_INC(d->insert_collisions);
559 }
560 p[index] = d->r[i];
561 }
562 }
563
564 oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info))(((d->regions_total * sizeof(struct region_info)) + (((1UL
<< 12) - 1))) & ~((1UL << 12) - 1))
;
565 /* clear to avoid meta info ending up in the cache */
566 unmap(d, d->r, oldpsz, oldpsz);
567 d->regions_free += d->regions_total;
568 d->regions_total = newtotal;
569 d->r = p;
570 return 0;
571}
572
573/*
574 * The hashtable uses the assumption that p is never NULL. This holds since
575 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
576 */
577static int
578insert(struct dir_info *d, void *p, size_t sz, void *f)
579{
580 size_t index;
581 size_t mask;
582 void *q;
583
584 if (d->regions_free * 4 < d->regions_total) {
585 if (omalloc_grow(d))
586 return 1;
587 }
588 mask = d->regions_total - 1;
589 index = hash(p) & mask;
590 q = d->r[index].p;
591 STATS_INC(d->inserts);
592 while (q != NULL((void*)0)) {
593 index = (index - 1) & mask;
594 q = d->r[index].p;
595 STATS_INC(d->insert_collisions);
596 }
597 d->r[index].p = p;
598 d->r[index].size = sz;
599#ifdef MALLOC_STATS
600 d->r[index].f = f;
601#endif
602 d->regions_free--;
603 return 0;
604}
605
606static struct region_info *
607find(struct dir_info *d, void *p)
608{
609 size_t index;
610 size_t mask = d->regions_total - 1;
611 void *q, *r;
612
613 if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
614 d->canary1 != ~d->canary2)
615 wrterror(d, "internal struct corrupt");
616 p = MASK_POINTER(p)((void *)(((uintptr_t)(p)) & ~((1UL << 12) - 1)));
617 index = hash(p) & mask;
618 r = d->r[index].p;
619 q = MASK_POINTER(r)((void *)(((uintptr_t)(r)) & ~((1UL << 12) - 1)));
620 STATS_INC(d->finds);
621 while (q != p && r != NULL((void*)0)) {
622 index = (index - 1) & mask;
623 r = d->r[index].p;
624 q = MASK_POINTER(r)((void *)(((uintptr_t)(r)) & ~((1UL << 12) - 1)));
625 STATS_INC(d->find_collisions);
626 }
627 return (q == p && r != NULL((void*)0)) ? &d->r[index] : NULL((void*)0);
628}
629
630static void
631delete(struct dir_info *d, struct region_info *ri)
632{
633 /* algorithm R, Knuth Vol III section 6.4 */
634 size_t mask = d->regions_total - 1;
635 size_t i, j, r;
636
637 if (d->regions_total & (d->regions_total - 1))
638 wrterror(d, "regions_total not 2^x");
639 d->regions_free++;
640 STATS_INC(d->deletes);
641
642 i = ri - d->r;
643 for (;;) {
644 d->r[i].p = NULL((void*)0);
645 d->r[i].size = 0;
646 j = i;
647 for (;;) {
648 i = (i - 1) & mask;
649 if (d->r[i].p == NULL((void*)0))
650 return;
651 r = hash(d->r[i].p) & mask;
652 if ((i <= r && r < j) || (r < j && j < i) ||
653 (j < i && i <= r))
654 continue;
655 d->r[j] = d->r[i];
656 STATS_INC(d->delete_moves);
657 break;
658 }
659
660 }
661}
662
663static inline void
664junk_free(int junk, void *p, size_t sz)
665{
666 size_t i, step = 1;
667 uint64_t *lp = p;
668
669 if (junk == 0 || sz == 0)
670 return;
671 sz /= sizeof(uint64_t);
672 if (junk == 1) {
673 if (sz > MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t))
674 sz = MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t);
675 step = sz / 4;
676 if (step == 0)
677 step = 1;
678 }
679 for (i = 0; i < sz; i += step)
680 lp[i] = SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL;
681}
682
683static inline void
684validate_junk(struct dir_info *pool, void *p, size_t sz)
685{
686 size_t i, step = 1;
687 uint64_t *lp = p;
688
689 if (pool->malloc_junk == 0 || sz == 0)
690 return;
691 sz /= sizeof(uint64_t);
692 if (pool->malloc_junk == 1) {
693 if (sz > MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t))
694 sz = MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t);
695 step = sz / 4;
696 if (step == 0)
697 step = 1;
698 }
699 for (i = 0; i < sz; i += step) {
700 if (lp[i] != SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL)
701 wrterror(pool, "write after free %p", p);
702 }
703}
704
705
706/*
707 * Cache maintenance.
708 * Opposed to the regular region data structure, the sizes in the
709 * cache are in MALLOC_PAGESIZE units.
710 */
711static void
712unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
713{
714 size_t psz = sz >> MALLOC_PAGESHIFT12;
715 void *r;
716 u_short i;
717 struct cache *cache;
718
719 if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
|| psz == 0)
720 wrterror(d, "munmap round");
721
722 if (psz > MAX_CACHEABLE_SIZE32 || d->cache[psz - 1].max == 0) {
723 if (munmap(p, sz))
724 wrterror(d, "munmap %p", p);
725 STATS_SUB(d->malloc_used, sz);
726 return;
727 }
728 cache = &d->cache[psz - 1];
729 if (cache->length == cache->max) {
730 /* use a random slot */
731 i = getrbyte(d) % cache->max;
732 r = cache->pages[i];
733 if (!moptsmalloc_readonly.mopts.malloc_freeunmap)
734 validate_junk(d, r, sz);
735 if (munmap(r, sz))
736 wrterror(d, "munmap %p", r);
737 STATS_SUB(d->malloc_used, sz);
738 cache->length--;
739 } else
740 i = cache->length;
741
742 /* fill slot */
743 if (clear > 0)
744 explicit_bzero(p, clear);
745 if (moptsmalloc_readonly.mopts.malloc_freeunmap)
746 mprotect(p, sz, PROT_NONE0x00);
747 else
748 junk_free(d->malloc_junk, p, sz);
749 cache->pages[i] = p;
750 cache->length++;
751}
752
753static void *
754map(struct dir_info *d, size_t sz, int zero_fill)
755{
756 size_t i, psz = sz >> MALLOC_PAGESHIFT12;
757 void *p;
758 struct cache *cache;
759
760 if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
761 d->canary1 != ~d->canary2)
762 wrterror(d, "internal struct corrupt");
763 if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
|| psz == 0)
764 wrterror(d, "map round");
765
766
767 if (psz <= MAX_CACHEABLE_SIZE32 && d->cache[psz - 1].max > 0) {
768 cache = &d->cache[psz - 1];
769 if (cache->length > 0) {
770 if (cache->length == 1)
771 p = cache->pages[--cache->length];
772 else {
773 i = getrbyte(d) % cache->length;
774 p = cache->pages[i];
775 cache->pages[i] = cache->pages[--cache->length];
776 }
777 if (!moptsmalloc_readonly.mopts.malloc_freeunmap)
778 validate_junk(d, p, sz);
779 if (moptsmalloc_readonly.mopts.malloc_freeunmap)
780 mprotect(p, sz, PROT_READ0x01 | PROT_WRITE0x02);
781 if (zero_fill)
782 memset(p, 0, sz);
783 else if (moptsmalloc_readonly.mopts.malloc_freeunmap)
784 junk_free(d->malloc_junk, p, sz);
785 return p;
786 }
787 if (psz <= 1) {
788 _MALLOC_LEAVE(d);
789 p = MMAP(cache->max * sz, d->mmap_flag)mmap(((void*)0), (cache->max * sz), 0x01 | 0x02, 0x1000 | 0x0002
| (d->mmap_flag), -1, 0)
;
790 _MALLOC_ENTER(d);
791 if (p != MAP_FAILED((void *)-1)) {
792 STATS_ADD(d->malloc_used, cache->max * sz);
793 cache->length = cache->max - 1;
794 for (i = 0; i < cache->max - 1; i++) {
795 void *q = (char*)p + i * sz;
796 cache->pages[i] = q;
797 if (!moptsmalloc_readonly.mopts.malloc_freeunmap)
798 junk_free(d->malloc_junk, q, sz);
799 }
800 if (moptsmalloc_readonly.mopts.malloc_freeunmap)
801 mprotect(p, (cache->max - 1) * sz, PROT_NONE0x00);
802 p = (char*)p + (cache->max - 1) * sz;
803 /* zero fill not needed */
804 return p;
805 }
806 }
807
808 }
809 _MALLOC_LEAVE(d);
810 p = MMAP(sz, d->mmap_flag)mmap(((void*)0), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (d->
mmap_flag), -1, 0)
;
811 _MALLOC_ENTER(d);
812 if (p != MAP_FAILED((void *)-1))
813 STATS_ADD(d->malloc_used, sz);
814 /* zero fill not needed */
815 return p;
816}
817
818static void
819init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits)
820{
821 int i;
822
823 if (bits == 0) {
824 p->shift = MALLOC_MINSHIFT4;
825 p->total = p->free = MALLOC_PAGESIZE(1UL << 12) >> p->shift;
826 p->size = 0;
827 p->offset = 0xdead;
828 } else {
829 p->shift = bits;
830 p->total = p->free = MALLOC_PAGESIZE(1UL << 12) >> p->shift;
831 p->size = 1U << bits;
832 p->offset = howmany(p->total, MALLOC_BITS)(((p->total) + (((8 * sizeof(u_short))) - 1)) / ((8 * sizeof
(u_short))))
;
833 }
834 p->canary = (u_short)d->canary1;
835
836 /* set all valid bits in the bitmap */
837 i = p->total - 1;
838 memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS(8 * sizeof(u_short))));
839 p->bits[i / MALLOC_BITS(8 * sizeof(u_short))] = (2U << (i % MALLOC_BITS(8 * sizeof(u_short)))) - 1;
840}
841
842static struct chunk_info *
843alloc_chunk_info(struct dir_info *d, int bits)
844{
845 struct chunk_info *p;
846
847 if (LIST_EMPTY(&d->chunk_info_list[bits])(((&d->chunk_info_list[bits])->lh_first) == ((void*
)0))
) {
848 size_t size, count, i;
849 char *q;
850
851 if (bits == 0)
852 count = MALLOC_PAGESIZE(1UL << 12) / MALLOC_MINSIZE(1UL << 4);
853 else
854 count = MALLOC_PAGESIZE(1UL << 12) >> bits;
855
856 size = howmany(count, MALLOC_BITS)(((count) + (((8 * sizeof(u_short))) - 1)) / ((8 * sizeof(u_short
))))
;
857 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
858 if (moptsmalloc_readonly.mopts.chunk_canaries)
859 size += count * sizeof(u_short);
860 size = _ALIGN(size)(((unsigned long)(size) + (sizeof(long) - 1)) &~(sizeof(long
) - 1))
;
861
862 /* Don't use cache here, we don't want user uaf touch this */
863 q = MMAP(MALLOC_PAGESIZE, d->mmap_flag)mmap(((void*)0), ((1UL << 12)), 0x01 | 0x02, 0x1000 | 0x0002
| (d->mmap_flag), -1, 0)
;
864 if (q == MAP_FAILED((void *)-1))
865 return NULL((void*)0);
866 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
867 count = MALLOC_PAGESIZE(1UL << 12) / size;
868
869 for (i = 0; i < count; i++, q += size) {
870 p = (struct chunk_info *)q;
871 LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries)do { if (((p)->entries.le_next = (&d->chunk_info_list
[bits])->lh_first) != ((void*)0)) (&d->chunk_info_list
[bits])->lh_first->entries.le_prev = &(p)->entries
.le_next; (&d->chunk_info_list[bits])->lh_first = (
p); (p)->entries.le_prev = &(&d->chunk_info_list
[bits])->lh_first; } while (0)
;
872 }
873 }
874 p = LIST_FIRST(&d->chunk_info_list[bits])((&d->chunk_info_list[bits])->lh_first);
875 LIST_REMOVE(p, entries)do { if ((p)->entries.le_next != ((void*)0)) (p)->entries
.le_next->entries.le_prev = (p)->entries.le_prev; *(p)->
entries.le_prev = (p)->entries.le_next; ; ; } while (0)
;
876 if (p->shift == 0)
877 init_chunk_info(d, p, bits);
878 return p;
879}
880
881/*
882 * Allocate a page of chunks
883 */
884static struct chunk_info *
885omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
886{
887 struct chunk_info *bp;
888 void *pp;
889
890 /* Allocate a new bucket */
891 pp = map(d, MALLOC_PAGESIZE(1UL << 12), 0);
892 if (pp == MAP_FAILED((void *)-1))
893 return NULL((void*)0);
894
895 /* memory protect the page allocated in the malloc(0) case */
896 if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE(1UL << 12), PROT_NONE0x00) == -1)
897 goto err;
898
899 bp = alloc_chunk_info(d, bits);
900 if (bp == NULL((void*)0))
901 goto err;
902 bp->page = pp;
903
904 if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp,
905 NULL((void*)0)))
906 goto err;
907 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries)do { if (((bp)->entries.le_next = (&d->chunk_dir[bits
][listnum])->lh_first) != ((void*)0)) (&d->chunk_dir
[bits][listnum])->lh_first->entries.le_prev = &(bp)
->entries.le_next; (&d->chunk_dir[bits][listnum])->
lh_first = (bp); (bp)->entries.le_prev = &(&d->
chunk_dir[bits][listnum])->lh_first; } while (0)
;
908 return bp;
909
910err:
911 unmap(d, pp, MALLOC_PAGESIZE(1UL << 12), 0);
912 return NULL((void*)0);
913}
914
915static int
916find_chunksize(size_t size)
917{
918 int r;
919
920 /* malloc(0) is special */
921 if (size == 0)
922 return 0;
923
924 if (size < MALLOC_MINSIZE(1UL << 4))
925 size = MALLOC_MINSIZE(1UL << 4);
926 size--;
927
928 r = MALLOC_MINSHIFT4;
929 while (size >> r)
930 r++;
931 return r;
932}
933
934static void
935fill_canary(char *ptr, size_t sz, size_t allocated)
936{
937 size_t check_sz = allocated - sz;
938
939 if (check_sz > CHUNK_CHECK_LENGTH32)
940 check_sz = CHUNK_CHECK_LENGTH32;
941 memset(ptr + sz, moptsmalloc_readonly.mopts.chunk_canaries, check_sz);
942}
943
944/*
945 * Allocate a chunk
946 */
947static void *
948malloc_bytes(struct dir_info *d, size_t size, void *f)
949{
950 u_int i, r;
951 int j, listnum;
952 size_t k;
953 u_short *lp;
954 struct chunk_info *bp;
955 void *p;
956
957 if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
958 d->canary1 != ~d->canary2)
959 wrterror(d, "internal struct corrupt");
960
961 j = find_chunksize(size);
962
963 r = ((u_int)getrbyte(d) << 8) | getrbyte(d);
964 listnum = r % MALLOC_CHUNK_LISTS4;
965 /* If it's empty, make a page more of that size chunks */
966 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])((&d->chunk_dir[j][listnum])->lh_first)) == NULL((void*)0)) {
967 bp = omalloc_make_chunks(d, j, listnum);
968 if (bp == NULL((void*)0))
969 return NULL((void*)0);
970 }
971
972 if (bp->canary != (u_short)d->canary1)
973 wrterror(d, "chunk info corrupted");
974
975 i = (r / MALLOC_CHUNK_LISTS4) & (bp->total - 1);
976
977 /* start somewhere in a short */
978 lp = &bp->bits[i / MALLOC_BITS(8 * sizeof(u_short))];
979 if (*lp) {
980 j = i % MALLOC_BITS(8 * sizeof(u_short));
981 k = ffs(*lp >> j);
982 if (k != 0) {
983 k += j - 1;
984 goto found;
985 }
986 }
987 /* no bit halfway, go to next full short */
988 i /= MALLOC_BITS(8 * sizeof(u_short));
989 for (;;) {
990 if (++i >= bp->total / MALLOC_BITS(8 * sizeof(u_short)))
991 i = 0;
992 lp = &bp->bits[i];
993 if (*lp) {
994 k = ffs(*lp) - 1;
995 break;
996 }
997 }
998found:
999#ifdef MALLOC_STATS
1000 if (i == 0 && k == 0) {
1001 struct region_info *r = find(d, bp->page);
1002 r->f = f;
1003 }
1004#endif
1005
1006 *lp ^= 1 << k;
1007
1008 /* If there are no more free, remove from free-list */
1009 if (--bp->free == 0)
1010 LIST_REMOVE(bp, entries)do { if ((bp)->entries.le_next != ((void*)0)) (bp)->entries
.le_next->entries.le_prev = (bp)->entries.le_prev; *(bp
)->entries.le_prev = (bp)->entries.le_next; ; ; } while
(0)
;
1011
1012 /* Adjust to the real offset of that chunk */
1013 k += (lp - bp->bits) * MALLOC_BITS(8 * sizeof(u_short));
1014
1015 if (moptsmalloc_readonly.mopts.chunk_canaries && size > 0)
1016 bp->bits[bp->offset + k] = size;
1017
1018 k <<= bp->shift;
1019
1020 p = (char *)bp->page + k;
1021 if (bp->size > 0) {
1022 if (d->malloc_junk == 2)
1023 memset(p, SOME_JUNK0xdb, bp->size);
1024 else if (moptsmalloc_readonly.mopts.chunk_canaries)
1025 fill_canary(p, size, bp->size);
1026 }
1027 return p;
1028}
1029
1030static void
1031validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1032{
1033 size_t check_sz = allocated - sz;
1034 u_char *p, *q;
1035
1036 if (check_sz > CHUNK_CHECK_LENGTH32)
1037 check_sz = CHUNK_CHECK_LENGTH32;
1038 p = ptr + sz;
1039 q = p + check_sz;
1040
1041 while (p < q) {
1042 if (*p != (u_char)moptsmalloc_readonly.mopts.chunk_canaries && *p != SOME_JUNK0xdb) {
1043 wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1044 ptr, p - ptr, sz,
1045 *p == SOME_FREEJUNK0xdf ? " (double free?)" : "");
1046 }
1047 p++;
1048 }
1049}
1050
1051static uint32_t
1052find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1053{
1054 uint32_t chunknum;
1055
1056 if (info->canary != (u_short)d->canary1)
1057 wrterror(d, "chunk info corrupted");
1058
1059 /* Find the chunk number on the page */
1060 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK((1UL << 12) - 1)) >> info->shift;
1061
1062 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1063 wrterror(d, "modified chunk-pointer %p", ptr);
1064 if (info->bits[chunknum / MALLOC_BITS(8 * sizeof(u_short))] &
1065 (1U << (chunknum % MALLOC_BITS(8 * sizeof(u_short)))))
1066 wrterror(d, "chunk is already free %p", ptr);
1067 if (check && info->size > 0) {
1068 validate_canary(d, ptr, info->bits[info->offset + chunknum],
1069 info->size);
1070 }
1071 return chunknum;
1072}
1073
1074/*
1075 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1076 */
1077static void
1078free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1079{
1080 struct chunk_head *mp;
1081 struct chunk_info *info;
1082 uint32_t chunknum;
1083 int listnum;
1084
1085 info = (struct chunk_info *)r->size;
1086 chunknum = find_chunknum(d, info, ptr, 0);
1087
1088 info->bits[chunknum / MALLOC_BITS(8 * sizeof(u_short))] |= 1U << (chunknum % MALLOC_BITS(8 * sizeof(u_short)));
1089 info->free++;
1090
1091 if (info->free == 1) {
1092 /* Page became non-full */
1093 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS4;
1094 if (info->size != 0)
1095 mp = &d->chunk_dir[info->shift][listnum];
1096 else
1097 mp = &d->chunk_dir[0][listnum];
1098
1099 LIST_INSERT_HEAD(mp, info, entries)do { if (((info)->entries.le_next = (mp)->lh_first) != (
(void*)0)) (mp)->lh_first->entries.le_prev = &(info
)->entries.le_next; (mp)->lh_first = (info); (info)->
entries.le_prev = &(mp)->lh_first; } while (0)
;
1100 return;
1101 }
1102
1103 if (info->free != info->total)
1104 return;
1105
1106 LIST_REMOVE(info, entries)do { if ((info)->entries.le_next != ((void*)0)) (info)->
entries.le_next->entries.le_prev = (info)->entries.le_prev
; *(info)->entries.le_prev = (info)->entries.le_next; ;
; } while (0)
;
1107
1108 if (info->size == 0 && !moptsmalloc_readonly.mopts.malloc_freeunmap)
1109 mprotect(info->page, MALLOC_PAGESIZE(1UL << 12), PROT_READ0x01 | PROT_WRITE0x02);
1110 unmap(d, info->page, MALLOC_PAGESIZE(1UL << 12), 0);
1111
1112 delete(d, r);
1113 if (info->size != 0)
1114 mp = &d->chunk_info_list[info->shift];
1115 else
1116 mp = &d->chunk_info_list[0];
1117 LIST_INSERT_HEAD(mp, info, entries)do { if (((info)->entries.le_next = (mp)->lh_first) != (
(void*)0)) (mp)->lh_first->entries.le_prev = &(info
)->entries.le_next; (mp)->lh_first = (info); (info)->
entries.le_prev = &(mp)->lh_first; } while (0)
;
1118}
1119
1120
1121
1122static void *
1123omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1124{
1125 void *p;
1126 size_t psz;
1127
1128 if (sz > MALLOC_MAXCHUNK(1 << (12 - 1))) {
1129 if (sz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) {
1130 errno(*__errno()) = ENOMEM12;
1131 return NULL((void*)0);
1132 }
1133 sz += moptsmalloc_readonly.mopts.malloc_guard;
1134 psz = PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
;
1135 p = map(pool, psz, zero_fill);
1136 if (p == MAP_FAILED((void *)-1)) {
1137 errno(*__errno()) = ENOMEM12;
1138 return NULL((void*)0);
1139 }
1140 if (insert(pool, p, sz, f)) {
1141 unmap(pool, p, psz, 0);
1142 errno(*__errno()) = ENOMEM12;
1143 return NULL((void*)0);
1144 }
1145 if (moptsmalloc_readonly.mopts.malloc_guard) {
1146 if (mprotect((char *)p + psz - moptsmalloc_readonly.mopts.malloc_guard,
1147 moptsmalloc_readonly.mopts.malloc_guard, PROT_NONE0x00))
1148 wrterror(pool, "mprotect");
1149 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1150 }
1151
1152 if (MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL <<
12) - 0)
) {
1153 /* fill whole allocation */
1154 if (pool->malloc_junk == 2)
1155 memset(p, SOME_JUNK0xdb, psz - moptsmalloc_readonly.mopts.malloc_guard);
1156 /* shift towards the end */
1157 p = MALLOC_MOVE(p, sz)(((char *)(p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly
.mopts.malloc_guard)) & ~((1UL << 4) - 1)))
;
1158 /* fill zeros if needed and overwritten above */
1159 if (zero_fill && pool->malloc_junk == 2)
1160 memset(p, 0, sz - moptsmalloc_readonly.mopts.malloc_guard);
1161 } else {
1162 if (pool->malloc_junk == 2) {
1163 if (zero_fill)
1164 memset((char *)p + sz - moptsmalloc_readonly.mopts.malloc_guard,
1165 SOME_JUNK0xdb, psz - sz);
1166 else
1167 memset(p, SOME_JUNK0xdb,
1168 psz - moptsmalloc_readonly.mopts.malloc_guard);
1169 } else if (moptsmalloc_readonly.mopts.chunk_canaries)
1170 fill_canary(p, sz - moptsmalloc_readonly.mopts.malloc_guard,
1171 psz - moptsmalloc_readonly.mopts.malloc_guard);
1172 }
1173
1174 } else {
1175 /* takes care of SOME_JUNK */
1176 p = malloc_bytes(pool, sz, f);
1177 if (zero_fill && p != NULL((void*)0) && sz > 0)
1178 memset(p, 0, sz);
1179 }
1180
1181 return p;
1182}
1183
1184/*
1185 * Common function for handling recursion. Only
1186 * print the error message once, to avoid making the problem
1187 * potentially worse.
1188 */
1189static void
1190malloc_recurse(struct dir_info *d)
1191{
1192 static int noprint;
1193
1194 if (noprint == 0) {
1195 noprint = 1;
1196 wrterror(d, "recursive call");
1197 }
1198 d->active--;
1199 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1200 errno(*__errno()) = EDEADLK11;
1201}
1202
1203void
1204_malloc_init(int from_rthreads)
1205{
1206 u_int i, j, nmutexes;
1207 struct dir_info *d;
1208
1209 _MALLOC_LOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_lock(1); } while (
0)
;
1210 if (!from_rthreads && moptsmalloc_readonly.mopts.malloc_pool[1]) {
1211 _MALLOC_UNLOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(1); } while
(0)
;
1212 return;
1213 }
1214 if (!moptsmalloc_readonly.mopts.malloc_canary)
1215 omalloc_init();
1216
1217 nmutexes = from_rthreads ? moptsmalloc_readonly.mopts.malloc_mutexes : 2;
1218 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK((1UL << 12) - 1)) == 0)
1219 mprotect(&malloc_readonly, sizeof(malloc_readonly),
1220 PROT_READ0x01 | PROT_WRITE0x02);
1221 for (i = 0; i < nmutexes; i++) {
1222 if (moptsmalloc_readonly.mopts.malloc_pool[i])
1223 continue;
1224 if (i == 0) {
1225 omalloc_poolinit(&d, MAP_CONCEAL0x8000);
1226 d->malloc_junk = 2;
1227 for (j = 0; j < MAX_CACHEABLE_SIZE32; j++)
1228 d->cache[j].max = 0;
1229 } else {
1230 omalloc_poolinit(&d, 0);
1231 d->malloc_junk = moptsmalloc_readonly.mopts.def_malloc_junk;
1232 for (j = 0; j < MAX_CACHEABLE_SIZE32; j++)
1233 d->cache[j].max = moptsmalloc_readonly.mopts.def_maxcache >> (j / 8);
1234 }
1235 d->mutex = i;
1236 moptsmalloc_readonly.mopts.malloc_pool[i] = d;
1237 }
1238
1239 if (from_rthreads)
1240 moptsmalloc_readonly.mopts.malloc_mt = 1;
1241 else
1242 moptsmalloc_readonly.mopts.internal_funcs = 1;
1243
1244 /*
1245 * Options have been set and will never be reset.
1246 * Prevent further tampering with them.
1247 */
1248 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK((1UL << 12) - 1)) == 0)
1249 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ0x01);
1250 _MALLOC_UNLOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(1); } while
(0)
;
1251}
1252DEF_STRONG(_malloc_init)__asm__(".global " "_malloc_init" " ; " "_malloc_init" " = " "_libc__malloc_init"
)
;
1253
1254#define PROLOGUE(p, fn)d = (p); if (d == ((void*)0)) { _malloc_init(0); d = (p); } do
{ if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex); }
while (0); d->func = fn; if (d->active++) { malloc_recurse
(d); return ((void*)0); }
\
1255 d = (p); \
1256 if (d == NULL((void*)0)) { \
1257 _malloc_init(0); \
1258 d = (p); \
1259 } \
1260 _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex)
; } while (0)
; \
1261 d->func = fn; \
1262 if (d->active++) { \
1263 malloc_recurse(d); \
1264 return NULL((void*)0); \
1265 } \
1266
1267#define EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
\
1268 d->active--; \
1269 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
; \
1270 if (r == NULL((void*)0) && moptsmalloc_readonly.mopts.malloc_xmalloc) \
1271 wrterror(d, "out of memory"); \
1272 if (r != NULL((void*)0)) \
1273 errno(*__errno()) = saved_errno; \
1274
1275void *
1276malloc(size_t size)
1277{
1278 void *r;
1279 struct dir_info *d;
1280 int saved_errno = errno(*__errno());
1281
1282 PROLOGUE(getpool(), "malloc")d = (getpool()); if (d == ((void*)0)) { _malloc_init(0); d = (
getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock
(d->mutex); } while (0); d->func = "malloc"; if (d->
active++) { malloc_recurse(d); return ((void*)0); }
1283 r = omalloc(d, size, 0, CALLER((void*)0));
1284 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1285 return r;
1286}
1287/*DEF_STRONG(malloc);*/
1288
1289void *
1290malloc_conceal(size_t size)
1291{
1292 void *r;
1293 struct dir_info *d;
1294 int saved_errno = errno(*__errno());
1295
1296 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")d = (malloc_readonly.mopts.malloc_pool[0]); if (d == ((void*)
0)) { _malloc_init(0); d = (malloc_readonly.mopts.malloc_pool
[0]); } do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->
mutex); } while (0); d->func = "malloc_conceal"; if (d->
active++) { malloc_recurse(d); return ((void*)0); }
1297 r = omalloc(d, size, 0, CALLER((void*)0));
1298 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1299 return r;
1300}
1301DEF_WEAK(malloc_conceal)__asm__(".weak " "malloc_conceal" " ; " "malloc_conceal" " = "
"_libc_malloc_conceal")
;
1302
1303static struct region_info *
1304findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1305 char **saved_function)
1306{
1307 struct dir_info *pool = argpool;
1308 struct region_info *r = find(pool, p);
1309
1310 STATS_INC(pool->pool_searches);
1311 if (r
20.1
'r' is not equal to NULL
== NULL((void*)0)) {
21
Taking false branch
1312 u_int i, nmutexes;
1313
1314 nmutexes = moptsmalloc_readonly.mopts.malloc_mt ? moptsmalloc_readonly.mopts.malloc_mutexes : 2;
1315 STATS_INC(pool->other_pool);
1316 for (i = 1; i < nmutexes; i++) {
1317 u_int j = (argpool->mutex + i) & (nmutexes - 1);
1318
1319 pool->active--;
1320 _MALLOC_UNLOCK(pool->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(pool->mutex
); } while (0)
;
1321 pool = moptsmalloc_readonly.mopts.malloc_pool[j];
1322 _MALLOC_LOCK(pool->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(pool->mutex
); } while (0)
;
1323 pool->active++;
1324 r = find(pool, p);
1325 if (r != NULL((void*)0)) {
1326 *saved_function = pool->func;
1327 pool->func = argpool->func;
1328 break;
1329 }
1330 }
1331 if (r == NULL((void*)0))
1332 wrterror(argpool, "bogus pointer (double free?) %p", p);
1333 }
1334 *foundpool = pool;
1335 return r;
22
Returning without writing to '*saved_function'
1336}
1337
1338static void
1339ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1340{
1341 struct region_info *r;
1342 struct dir_info *pool;
1343 char *saved_function;
1344 size_t sz;
1345
1346 r = findpool(p, *argpool, &pool, &saved_function);
1347
1348 REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz
) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << (
(sz)-1))))
;
1349 if (pool->mmap_flag) {
1350 clear = 1;
1351 if (!check)
1352 argsz = sz;
1353 }
1354 if (check) {
1355 if (sz <= MALLOC_MAXCHUNK(1 << (12 - 1))) {
1356 if (moptsmalloc_readonly.mopts.chunk_canaries && sz > 0) {
1357 struct chunk_info *info =
1358 (struct chunk_info *)r->size;
1359 uint32_t chunknum =
1360 find_chunknum(pool, info, p, 0);
1361
1362 if (info->bits[info->offset + chunknum] < argsz)
1363 wrterror(pool, "recorded size %hu"
1364 " < %zu",
1365 info->bits[info->offset + chunknum],
1366 argsz);
1367 } else {
1368 if (sz < argsz)
1369 wrterror(pool, "chunk size %zu < %zu",
1370 sz, argsz);
1371 }
1372 } else if (sz - moptsmalloc_readonly.mopts.malloc_guard < argsz) {
1373 wrterror(pool, "recorded size %zu < %zu",
1374 sz - moptsmalloc_readonly.mopts.malloc_guard, argsz);
1375 }
1376 }
1377 if (sz > MALLOC_MAXCHUNK(1 << (12 - 1))) {
1378 if (!MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL <<
12) - 0)
) {
1379 if (r->p != p)
1380 wrterror(pool, "bogus pointer %p", p);
1381 if (moptsmalloc_readonly.mopts.chunk_canaries)
1382 validate_canary(pool, p,
1383 sz - moptsmalloc_readonly.mopts.malloc_guard,
1384 PAGEROUND(sz - mopts.malloc_guard)(((sz - malloc_readonly.mopts.malloc_guard) + (((1UL <<
12) - 1))) & ~((1UL << 12) - 1))
);
1385 } else {
1386 /* shifted towards the end */
1387 if (p != MALLOC_MOVE(r->p, sz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly
.mopts.malloc_guard)) & ~((1UL << 4) - 1)))
)
1388 wrterror(pool, "bogus moved pointer %p", p);
1389 p = r->p;
1390 }
1391 if (moptsmalloc_readonly.mopts.malloc_guard) {
1392 if (sz < moptsmalloc_readonly.mopts.malloc_guard)
1393 wrterror(pool, "guard size");
1394 if (!moptsmalloc_readonly.mopts.malloc_freeunmap) {
1395 if (mprotect((char *)p + PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
-
1396 moptsmalloc_readonly.mopts.malloc_guard, moptsmalloc_readonly.mopts.malloc_guard,
1397 PROT_READ0x01 | PROT_WRITE0x02))
1398 wrterror(pool, "mprotect");
1399 }
1400 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1401 }
1402 unmap(pool, p, PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
, clear ? argsz : 0);
1403 delete(pool, r);
1404 } else {
1405 /* Validate and optionally canary check */
1406 struct chunk_info *info = (struct chunk_info *)r->size;
1407 if (info->size != sz)
1408 wrterror(pool, "internal struct corrupt");
1409 find_chunknum(pool, info, p, moptsmalloc_readonly.mopts.chunk_canaries);
1410 if (!clear) {
1411 void *tmp;
1412 int i;
1413
1414 if (moptsmalloc_readonly.mopts.malloc_freecheck) {
1415 for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK15; i++)
1416 if (p == pool->delayed_chunks[i])
1417 wrterror(pool,
1418 "double free %p", p);
1419 }
1420 junk_free(pool->malloc_junk, p, sz);
1421 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK15;
1422 tmp = p;
1423 p = pool->delayed_chunks[i];
1424 if (tmp == p)
1425 wrterror(pool, "double free %p", tmp);
1426 pool->delayed_chunks[i] = tmp;
1427 if (p != NULL((void*)0)) {
1428 r = find(pool, p);
1429 REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz
) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << (
(sz)-1))))
;
1430 if (r != NULL((void*)0))
1431 validate_junk(pool, p, sz);
1432 }
1433 } else if (argsz > 0) {
1434 r = find(pool, p);
1435 explicit_bzero(p, argsz);
1436 }
1437 if (p != NULL((void*)0)) {
1438 if (r == NULL((void*)0))
1439 wrterror(pool,
1440 "bogus pointer (double free?) %p", p);
1441 free_bytes(pool, r, p);
1442 }
1443 }
1444
1445 if (*argpool != pool) {
1446 pool->func = saved_function;
1447 *argpool = pool;
1448 }
1449}
1450
1451void
1452free(void *ptr)
1453{
1454 struct dir_info *d;
1455 int saved_errno = errno(*__errno());
1456
1457 /* This is legal. */
1458 if (ptr == NULL((void*)0))
1459 return;
1460
1461 d = getpool();
1462 if (d == NULL((void*)0))
1463 wrterror(d, "free() called before allocation");
1464 _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex)
; } while (0)
;
1465 d->func = "free";
1466 if (d->active++) {
1467 malloc_recurse(d);
1468 return;
1469 }
1470 ofree(&d, ptr, 0, 0, 0);
1471 d->active--;
1472 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1473 errno(*__errno()) = saved_errno;
1474}
1475/*DEF_STRONG(free);*/
1476
1477static void
1478freezero_p(void *ptr, size_t sz)
1479{
1480 explicit_bzero(ptr, sz);
1481 free(ptr);
1482}
1483
1484void
1485freezero(void *ptr, size_t sz)
1486{
1487 struct dir_info *d;
1488 int saved_errno = errno(*__errno());
1489
1490 /* This is legal. */
1491 if (ptr == NULL((void*)0))
1492 return;
1493
1494 if (!moptsmalloc_readonly.mopts.internal_funcs) {
1495 freezero_p(ptr, sz);
1496 return;
1497 }
1498
1499 d = getpool();
1500 if (d == NULL((void*)0))
1501 wrterror(d, "freezero() called before allocation");
1502 _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex)
; } while (0)
;
1503 d->func = "freezero";
1504 if (d->active++) {
1505 malloc_recurse(d);
1506 return;
1507 }
1508 ofree(&d, ptr, 1, 1, sz);
1509 d->active--;
1510 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1511 errno(*__errno()) = saved_errno;
1512}
1513DEF_WEAK(freezero)__asm__(".weak " "freezero" " ; " "freezero" " = " "_libc_freezero"
)
;
1514
1515static void *
1516orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f)
1517{
1518 struct region_info *r;
1519 struct dir_info *pool;
1520 char *saved_function;
1521 struct chunk_info *info;
1522 size_t oldsz, goldsz, gnewsz;
1523 void *q, *ret;
1524 uint32_t chunknum;
1525 int forced;
1526
1527 if (p == NULL((void*)0))
1528 return omalloc(*argpool, newsz, 0, f);
1529
1530 if (newsz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) {
1531 errno(*__errno()) = ENOMEM12;
1532 return NULL((void*)0);
1533 }
1534
1535 r = findpool(p, *argpool, &pool, &saved_function);
1536
1537 REALSIZE(oldsz, r)(oldsz) = (uintptr_t)(r)->p & ((1UL << 12) - 1),
(oldsz) = ((oldsz) == 0 ? (r)->size : ((oldsz) == 1 ? 0 :
(1 << ((oldsz)-1))))
;
1538 if (moptsmalloc_readonly.mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK(1 << (12 - 1))) {
1539 info = (struct chunk_info *)r->size;
1540 chunknum = find_chunknum(pool, info, p, 0);
1541 }
1542
1543 goldsz = oldsz;
1544 if (oldsz > MALLOC_MAXCHUNK(1 << (12 - 1))) {
1545 if (oldsz < moptsmalloc_readonly.mopts.malloc_guard)
1546 wrterror(pool, "guard size");
1547 oldsz -= moptsmalloc_readonly.mopts.malloc_guard;
1548 }
1549
1550 gnewsz = newsz;
1551 if (gnewsz > MALLOC_MAXCHUNK(1 << (12 - 1)))
1552 gnewsz += moptsmalloc_readonly.mopts.malloc_guard;
1553
1554 forced = moptsmalloc_readonly.mopts.malloc_realloc || pool->mmap_flag;
1555 if (newsz > MALLOC_MAXCHUNK(1 << (12 - 1)) && oldsz > MALLOC_MAXCHUNK(1 << (12 - 1)) && !forced) {
1556 /* First case: from n pages sized allocation to m pages sized
1557 allocation, m > n */
1558 size_t roldsz = PAGEROUND(goldsz)(((goldsz) + (((1UL << 12) - 1))) & ~((1UL <<
12) - 1))
;
1559 size_t rnewsz = PAGEROUND(gnewsz)(((gnewsz) + (((1UL << 12) - 1))) & ~((1UL <<
12) - 1))
;
1560
1561 if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1562 roldsz - rnewsz < moptsmalloc_readonly.mopts.def_maxcache * MALLOC_PAGESIZE(1UL << 12) &&
1563 !moptsmalloc_readonly.mopts.malloc_guard) {
1564
1565 ret = p;
1566 goto done;
1567 }
1568
1569 if (rnewsz > roldsz) {
1570 /* try to extend existing region */
1571 if (!moptsmalloc_readonly.mopts.malloc_guard) {
1572 void *hint = (char *)r->p + roldsz;
1573 size_t needed = rnewsz - roldsz;
1574
1575 STATS_INC(pool->cheap_realloc_tries);
1576 q = MQUERY(hint, needed, pool->mmap_flag)mquery((hint), (needed), 0x01 | 0x02, 0x1000 | 0x0002 | 0x0010
| (pool->mmap_flag), -1, 0)
;
1577 if (q == hint)
1578 q = MMAPA(hint, needed, pool->mmap_flag)mmap((hint), (needed), 0x01 | 0x02, 0x1000 | 0x0002 | (pool->
mmap_flag), -1, 0)
;
1579 else
1580 q = MAP_FAILED((void *)-1);
1581 if (q == hint) {
1582 STATS_ADD(pool->malloc_used, needed);
1583 if (pool->malloc_junk == 2)
1584 memset(q, SOME_JUNK0xdb, needed);
1585 r->size = gnewsz;
1586 if (r->p != p) {
1587 /* old pointer is moved */
1588 memmove(r->p, p, oldsz);
1589 p = r->p;
1590 }
1591 if (moptsmalloc_readonly.mopts.chunk_canaries)
1592 fill_canary(p, newsz,
1593 PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12
) - 1))
);
1594 STATS_SETF(r, f);
1595 STATS_INC(pool->cheap_reallocs);
1596 ret = p;
1597 goto done;
1598 } else if (q != MAP_FAILED((void *)-1)) {
1599 if (munmap(q, needed))
1600 wrterror(pool, "munmap %p", q);
1601 }
1602 }
1603 } else if (rnewsz < roldsz) {
1604 /* shrink number of pages */
1605 if (moptsmalloc_readonly.mopts.malloc_guard) {
1606 if (mprotect((char *)r->p + rnewsz -
1607 moptsmalloc_readonly.mopts.malloc_guard, moptsmalloc_readonly.mopts.malloc_guard,
1608 PROT_NONE0x00))
1609 wrterror(pool, "mprotect");
1610 }
1611 if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1612 wrterror(pool, "munmap %p", (char *)r->p + rnewsz);
1613 STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1614 r->size = gnewsz;
1615 if (MALLOC_MOVE_COND(gnewsz)((gnewsz) - malloc_readonly.mopts.malloc_guard < (1UL <<
12) - 0)
) {
1616 void *pp = MALLOC_MOVE(r->p, gnewsz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((gnewsz) - malloc_readonly
.mopts.malloc_guard)) & ~((1UL << 4) - 1)))
;
1617 memmove(pp, p, newsz);
1618 p = pp;
1619 } else if (moptsmalloc_readonly.mopts.chunk_canaries)
1620 fill_canary(p, newsz, PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12
) - 1))
);
1621 STATS_SETF(r, f);
1622 ret = p;
1623 goto done;
1624 } else {
1625 /* number of pages remains the same */
1626 void *pp = r->p;
1627
1628 r->size = gnewsz;
1629 if (MALLOC_MOVE_COND(gnewsz)((gnewsz) - malloc_readonly.mopts.malloc_guard < (1UL <<
12) - 0)
)
1630 pp = MALLOC_MOVE(r->p, gnewsz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((gnewsz) - malloc_readonly
.mopts.malloc_guard)) & ~((1UL << 4) - 1)))
;
1631 if (p != pp) {
1632 memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1633 p = pp;
1634 }
1635 if (p == r->p) {
1636 if (newsz > oldsz && pool->malloc_junk == 2)
1637 memset((char *)p + newsz, SOME_JUNK0xdb,
1638 rnewsz - moptsmalloc_readonly.mopts.malloc_guard -
1639 newsz);
1640 if (moptsmalloc_readonly.mopts.chunk_canaries)
1641 fill_canary(p, newsz, PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12
) - 1))
);
1642 }
1643 STATS_SETF(r, f);
1644 ret = p;
1645 goto done;
1646 }
1647 }
1648 if (oldsz <= MALLOC_MAXCHUNK(1 << (12 - 1)) && oldsz > 0 &&
1649 newsz <= MALLOC_MAXCHUNK(1 << (12 - 1)) && newsz > 0 &&
1650 1 << find_chunksize(newsz) == oldsz && !forced) {
1651 /* do not reallocate if new size fits good in existing chunk */
1652 if (pool->malloc_junk == 2)
1653 memset((char *)p + newsz, SOME_JUNK0xdb, oldsz - newsz);
1654 if (moptsmalloc_readonly.mopts.chunk_canaries) {
1655 info->bits[info->offset + chunknum] = newsz;
1656 fill_canary(p, newsz, info->size);
1657 }
1658 STATS_SETF(r, f);
1659 ret = p;
1660 } else if (newsz != oldsz || forced) {
1661 /* create new allocation */
1662 q = omalloc(pool, newsz, 0, f);
1663 if (q == NULL((void*)0)) {
1664 ret = NULL((void*)0);
1665 goto done;
1666 }
1667 if (newsz != 0 && oldsz != 0)
1668 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1669 ofree(&pool, p, 0, 0, 0);
1670 ret = q;
1671 } else {
1672 /* oldsz == newsz */
1673 if (newsz != 0)
1674 wrterror(pool, "realloc internal inconsistency");
1675 STATS_SETF(r, f);
1676 ret = p;
1677 }
1678done:
1679 if (*argpool != pool) {
1680 pool->func = saved_function;
1681 *argpool = pool;
1682 }
1683 return ret;
1684}
1685
1686void *
1687realloc(void *ptr, size_t size)
1688{
1689 struct dir_info *d;
1690 void *r;
1691 int saved_errno = errno(*__errno());
1692
1693 PROLOGUE(getpool(), "realloc")d = (getpool()); if (d == ((void*)0)) { _malloc_init(0); d = (
getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock
(d->mutex); } while (0); d->func = "realloc"; if (d->
active++) { malloc_recurse(d); return ((void*)0); }
1694 r = orealloc(&d, ptr, size, CALLER((void*)0));
1695 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1696 return r;
1697}
1698/*DEF_STRONG(realloc);*/
1699
1700/*
1701 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1702 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1703 */
1704#define MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) (1UL << (sizeof(size_t) * 4))
1705
1706void *
1707calloc(size_t nmemb, size_t size)
1708{
1709 struct dir_info *d;
1710 void *r;
1711 int saved_errno = errno(*__errno());
1712
1713 PROLOGUE(getpool(), "calloc")d = (getpool()); if (d == ((void*)0)) { _malloc_init(0); d = (
getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock
(d->mutex); } while (0); d->func = "calloc"; if (d->
active++) { malloc_recurse(d); return ((void*)0); }
1714 if ((nmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
1715 nmemb > 0 && SIZE_MAX0xffffffffffffffffUL / nmemb < size) {
1716 d->active--;
1717 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1718 if (moptsmalloc_readonly.mopts.malloc_xmalloc)
1719 wrterror(d, "out of memory");
1720 errno(*__errno()) = ENOMEM12;
1721 return NULL((void*)0);
1722 }
1723
1724 size *= nmemb;
1725 r = omalloc(d, size, 1, CALLER((void*)0));
1726 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1727 return r;
1728}
1729/*DEF_STRONG(calloc);*/
1730
1731void *
1732calloc_conceal(size_t nmemb, size_t size)
1733{
1734 struct dir_info *d;
1735 void *r;
1736 int saved_errno = errno(*__errno());
1737
1738 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")d = (malloc_readonly.mopts.malloc_pool[0]); if (d == ((void*)
0)) { _malloc_init(0); d = (malloc_readonly.mopts.malloc_pool
[0]); } do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->
mutex); } while (0); d->func = "calloc_conceal"; if (d->
active++) { malloc_recurse(d); return ((void*)0); }
1739 if ((nmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
1740 nmemb > 0 && SIZE_MAX0xffffffffffffffffUL / nmemb < size) {
1741 d->active--;
1742 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1743 if (moptsmalloc_readonly.mopts.malloc_xmalloc)
1744 wrterror(d, "out of memory");
1745 errno(*__errno()) = ENOMEM12;
1746 return NULL((void*)0);
1747 }
1748
1749 size *= nmemb;
1750 r = omalloc(d, size, 1, CALLER((void*)0));
1751 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1752 return r;
1753}
1754DEF_WEAK(calloc_conceal)__asm__(".weak " "calloc_conceal" " ; " "calloc_conceal" " = "
"_libc_calloc_conceal")
;
1755
1756static void *
1757orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
1758 size_t newsize, void *f)
1759{
1760 struct region_info *r;
1761 struct dir_info *pool;
1762 char *saved_function;
16
'saved_function' declared without an initial value
1763 void *newptr;
1764 size_t sz;
1765
1766 if (p
16.1
'p' is not equal to NULL
== NULL((void*)0))
17
Taking false branch
1767 return omalloc(*argpool, newsize, 1, f);
1768
1769 if (oldsize == newsize)
18
Assuming 'oldsize' is not equal to 'newsize'
19
Taking false branch
1770 return p;
1771
1772 r = findpool(p, *argpool, &pool, &saved_function);
20
Calling 'findpool'
23
Returning from 'findpool'
1773
1774 REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz
) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << (
(sz)-1))))
;
24
Assuming 'sz' is equal to 0
25
'?' condition is true
1775 if (sz <= MALLOC_MAXCHUNK(1 << (12 - 1))) {
26
Assuming the condition is false
27
Taking false branch
1776 if (moptsmalloc_readonly.mopts.chunk_canaries && sz > 0) {
1777 struct chunk_info *info = (struct chunk_info *)r->size;
1778 uint32_t chunknum = find_chunknum(pool, info, p, 0);
1779
1780 if (info->bits[info->offset + chunknum] != oldsize)
1781 wrterror(pool, "recorded old size %hu != %zu",
1782 info->bits[info->offset + chunknum],
1783 oldsize);
1784 }
1785 } else if (oldsize < (sz - moptsmalloc_readonly.mopts.malloc_guard) / 2)
28
Assuming the condition is false
29
Taking false branch
1786 wrterror(pool, "recorded old size %zu != %zu",
1787 sz - moptsmalloc_readonly.mopts.malloc_guard, oldsize);
1788
1789 newptr = omalloc(pool, newsize, 0, f);
1790 if (newptr == NULL((void*)0))
30
Assuming 'newptr' is not equal to NULL
31
Taking false branch
1791 goto done;
1792
1793 if (newsize > oldsize) {
32
Assuming 'newsize' is <= 'oldsize'
33
Taking false branch
1794 memcpy(newptr, p, oldsize);
1795 memset((char *)newptr + oldsize, 0, newsize - oldsize);
1796 } else
1797 memcpy(newptr, p, newsize);
1798
1799 ofree(&pool, p, 1, 0, oldsize);
1800
1801done:
1802 if (*argpool != pool) {
34
Assuming the condition is true
35
Taking true branch
1803 pool->func = saved_function;
36
Assigned value is garbage or undefined
1804 *argpool = pool;
1805 }
1806
1807 return newptr;
1808}
1809
1810static void *
1811recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1812{
1813 size_t oldsize, newsize;
1814 void *newptr;
1815
1816 if (ptr == NULL((void*)0))
1817 return calloc(newnmemb, size);
1818
1819 if ((newnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
1820 newnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / newnmemb < size) {
1821 errno(*__errno()) = ENOMEM12;
1822 return NULL((void*)0);
1823 }
1824 newsize = newnmemb * size;
1825
1826 if ((oldnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
1827 oldnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / oldnmemb < size) {
1828 errno(*__errno()) = EINVAL22;
1829 return NULL((void*)0);
1830 }
1831 oldsize = oldnmemb * size;
1832
1833 /*
1834 * Don't bother too much if we're shrinking just a bit,
1835 * we do not shrink for series of small steps, oh well.
1836 */
1837 if (newsize <= oldsize) {
1838 size_t d = oldsize - newsize;
1839
1840 if (d < oldsize / 2 && d < MALLOC_PAGESIZE(1UL << 12)) {
1841 memset((char *)ptr + newsize, 0, d);
1842 return ptr;
1843 }
1844 }
1845
1846 newptr = malloc(newsize);
1847 if (newptr == NULL((void*)0))
1848 return NULL((void*)0);
1849
1850 if (newsize > oldsize) {
1851 memcpy(newptr, ptr, oldsize);
1852 memset((char *)newptr + oldsize, 0, newsize - oldsize);
1853 } else
1854 memcpy(newptr, ptr, newsize);
1855
1856 explicit_bzero(ptr, oldsize);
1857 free(ptr);
1858
1859 return newptr;
1860}
1861
1862void *
1863recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1864{
1865 struct dir_info *d;
1866 size_t oldsize = 0, newsize;
1867 void *r;
1868 int saved_errno = errno(*__errno());
1869
1870 if (!moptsmalloc_readonly.mopts.internal_funcs)
1
Assuming field 'internal_funcs' is not equal to 0
2
Taking false branch
1871 return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1872
1873 PROLOGUE(getpool(), "recallocarray")d = (getpool()); if (d == ((void*)0)) { _malloc_init(0); d = (
getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock
(d->mutex); } while (0); d->func = "recallocarray"; if (
d->active++) { malloc_recurse(d); return ((void*)0); }
3
Assuming 'd' is not equal to null
4
Taking false branch
5
Assuming '__isthreaded' is 0
6
Taking false branch
7
Loop condition is false. Exiting loop
8
Assuming the condition is false
9
Taking false branch
1874
1875 if ((newnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
10
Assuming the condition is false
11
Assuming the condition is false
1876 newnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / newnmemb < size) {
1877 d->active--;
1878 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1879 if (moptsmalloc_readonly.mopts.malloc_xmalloc)
1880 wrterror(d, "out of memory");
1881 errno(*__errno()) = ENOMEM12;
1882 return NULL((void*)0);
1883 }
1884 newsize = newnmemb * size;
1885
1886 if (ptr != NULL((void*)0)) {
12
Assuming 'ptr' is not equal to NULL
13
Taking true branch
1887 if ((oldnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) &&
14
Assuming the condition is false
1888 oldnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / oldnmemb < size) {
1889 d->active--;
1890 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
1891 errno(*__errno()) = EINVAL22;
1892 return NULL((void*)0);
1893 }
1894 oldsize = oldnmemb * size;
1895 }
1896
1897 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER((void*)0));
15
Calling 'orecallocarray'
1898 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
1899 return r;
1900}
1901DEF_WEAK(recallocarray)__asm__(".weak " "recallocarray" " ; " "recallocarray" " = " "_libc_recallocarray"
)
;
1902
1903static void *
1904mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1905{
1906 char *p, *q;
1907
1908 if (alignment < MALLOC_PAGESIZE(1UL << 12) || ((alignment - 1) & alignment) != 0)
1909 wrterror(d, "mapalign bad alignment");
1910 if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
)
1911 wrterror(d, "mapalign round");
1912
1913 /* Allocate sz + alignment bytes of memory, which must include a
1914 * subrange of size bytes that is properly aligned. Unmap the
1915 * other bytes, and then return that subrange.
1916 */
1917
1918 /* We need sz + alignment to fit into a size_t. */
1919 if (alignment > SIZE_MAX0xffffffffffffffffUL - sz)
1920 return MAP_FAILED((void *)-1);
1921
1922 p = map(d, sz + alignment, zero_fill);
1923 if (p == MAP_FAILED((void *)-1))
1924 return MAP_FAILED((void *)-1);
1925 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1926 if (q != p) {
1927 if (munmap(p, q - p))
1928 wrterror(d, "munmap %p", p);
1929 }
1930 if (munmap(q + sz, alignment - (q - p)))
1931 wrterror(d, "munmap %p", q + sz);
1932 STATS_SUB(d->malloc_used, alignment);
1933
1934 return q;
1935}
1936
1937static void *
1938omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1939 void *f)
1940{
1941 size_t psz;
1942 void *p;
1943
1944 /* If between half a page and a page, avoid MALLOC_MOVE. */
1945 if (sz > MALLOC_MAXCHUNK(1 << (12 - 1)) && sz < MALLOC_PAGESIZE(1UL << 12))
1946 sz = MALLOC_PAGESIZE(1UL << 12);
1947 if (alignment <= MALLOC_PAGESIZE(1UL << 12)) {
1948 /*
1949 * max(size, alignment) is enough to assure the requested
1950 * alignment, since the allocator always allocates
1951 * power-of-two blocks.
1952 */
1953 if (sz < alignment)
1954 sz = alignment;
1955 return omalloc(pool, sz, zero_fill, f);
1956 }
1957
1958 if (sz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) {
1959 errno(*__errno()) = ENOMEM12;
1960 return NULL((void*)0);
1961 }
1962
1963 if (sz < MALLOC_PAGESIZE(1UL << 12))
1964 sz = MALLOC_PAGESIZE(1UL << 12);
1965 sz += moptsmalloc_readonly.mopts.malloc_guard;
1966 psz = PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12)
- 1))
;
1967
1968 p = mapalign(pool, alignment, psz, zero_fill);
1969 if (p == MAP_FAILED((void *)-1)) {
1970 errno(*__errno()) = ENOMEM12;
1971 return NULL((void*)0);
1972 }
1973
1974 if (insert(pool, p, sz, f)) {
1975 unmap(pool, p, psz, 0);
1976 errno(*__errno()) = ENOMEM12;
1977 return NULL((void*)0);
1978 }
1979
1980 if (moptsmalloc_readonly.mopts.malloc_guard) {
1981 if (mprotect((char *)p + psz - moptsmalloc_readonly.mopts.malloc_guard,
1982 moptsmalloc_readonly.mopts.malloc_guard, PROT_NONE0x00))
1983 wrterror(pool, "mprotect");
1984 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1985 }
1986
1987 if (pool->malloc_junk == 2) {
1988 if (zero_fill)
1989 memset((char *)p + sz - moptsmalloc_readonly.mopts.malloc_guard,
1990 SOME_JUNK0xdb, psz - sz);
1991 else
1992 memset(p, SOME_JUNK0xdb, psz - moptsmalloc_readonly.mopts.malloc_guard);
1993 } else if (moptsmalloc_readonly.mopts.chunk_canaries)
1994 fill_canary(p, sz - moptsmalloc_readonly.mopts.malloc_guard,
1995 psz - moptsmalloc_readonly.mopts.malloc_guard);
1996
1997 return p;
1998}
1999
2000int
2001posix_memalign(void **memptr, size_t alignment, size_t size)
2002{
2003 struct dir_info *d;
2004 int res, saved_errno = errno(*__errno());
2005 void *r;
2006
2007 /* Make sure that alignment is a large enough power of 2. */
2008 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2009 return EINVAL22;
2010
2011 d = getpool();
2012 if (d == NULL((void*)0)) {
2013 _malloc_init(0);
2014 d = getpool();
2015 }
2016 _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex)
; } while (0)
;
2017 d->func = "posix_memalign";
2018 if (d->active++) {
2019 malloc_recurse(d);
2020 goto err;
2021 }
2022 r = omemalign(d, alignment, size, 0, CALLER((void*)0));
2023 d->active--;
2024 _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex
); } while (0)
;
2025 if (r == NULL((void*)0)) {
2026 if (moptsmalloc_readonly.mopts.malloc_xmalloc)
2027 wrterror(d, "out of memory");
2028 goto err;
2029 }
2030 errno(*__errno()) = saved_errno;
2031 *memptr = r;
2032 return 0;
2033
2034err:
2035 res = errno(*__errno());
2036 errno(*__errno()) = saved_errno;
2037 return res;
2038}
2039/*DEF_STRONG(posix_memalign);*/
2040
2041void *
2042aligned_alloc(size_t alignment, size_t size)
2043{
2044 struct dir_info *d;
2045 int saved_errno = errno(*__errno());
2046 void *r;
2047
2048 /* Make sure that alignment is a positive power of 2. */
2049 if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2050 errno(*__errno()) = EINVAL22;
2051 return NULL((void*)0);
2052 };
2053 /* Per spec, size should be a multiple of alignment */
2054 if ((size & (alignment - 1)) != 0) {
2055 errno(*__errno()) = EINVAL22;
2056 return NULL((void*)0);
2057 }
2058
2059 PROLOGUE(getpool(), "aligned_alloc")d = (getpool()); if (d == ((void*)0)) { _malloc_init(0); d = (
getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock
(d->mutex); } while (0); d->func = "aligned_alloc"; if (
d->active++) { malloc_recurse(d); return ((void*)0); }
2060 r = omemalign(d, alignment, size, 0, CALLER((void*)0));
2061 EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock
(d->mutex); } while (0); if (r == ((void*)0) && malloc_readonly
.mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r !=
((void*)0)) (*__errno()) = saved_errno;
2062 return r;
2063}
2064/*DEF_STRONG(aligned_alloc);*/
2065
2066#ifdef MALLOC_STATS
2067
2068struct malloc_leak {
2069 void *f;
2070 size_t total_size;
2071 int count;
2072};
2073
2074struct leaknode {
2075 RBT_ENTRY(leaknode) entry;
2076 struct malloc_leak d;
2077};
2078
2079static inline int
2080leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2081{
2082 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2083}
2084
2085static RBT_HEAD(leaktree, leaknode) leakhead;
2086RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2087RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2088
2089static void
2090putleakinfo(void *f, size_t sz, int cnt)
2091{
2092 struct leaknode key, *p;
2093 static struct leaknode *page;
2094 static int used;
2095
2096 if (cnt == 0 || page == MAP_FAILED((void *)-1))
2097 return;
2098
2099 key.d.f = f;
2100 p = RBT_FIND(leaktree, &leakhead, &key);
2101 if (p == NULL((void*)0)) {
2102 if (page == NULL((void*)0) ||
2103 used >= MALLOC_PAGESIZE(1UL << 12) / sizeof(struct leaknode)) {
2104 page = MMAP(MALLOC_PAGESIZE, 0)mmap(((void*)0), ((1UL << 12)), 0x01 | 0x02, 0x1000 | 0x0002
| (0), -1, 0)
;
2105 if (page == MAP_FAILED((void *)-1))
2106 return;
2107 used = 0;
2108 }
2109 p = &page[used++];
2110 p->d.f = f;
2111 p->d.total_size = sz * cnt;
2112 p->d.count = cnt;
2113 RBT_INSERT(leaktree, &leakhead, p);
2114 } else {
2115 p->d.total_size += sz * cnt;
2116 p->d.count += cnt;
2117 }
2118}
2119
2120static struct malloc_leak *malloc_leaks;
2121
2122static void
2123dump_leaks(int fd)
2124{
2125 struct leaknode *p;
2126 int i = 0;
2127
2128 dprintf(fd, "Leak report\n");
2129 dprintf(fd, " f sum # avg\n");
2130 /* XXX only one page of summary */
2131 if (malloc_leaks == NULL((void*)0))
2132 malloc_leaks = MMAP(MALLOC_PAGESIZE, 0)mmap(((void*)0), ((1UL << 12)), 0x01 | 0x02, 0x1000 | 0x0002
| (0), -1, 0)
;
2133 if (malloc_leaks != MAP_FAILED((void *)-1))
2134 memset(malloc_leaks, 0, MALLOC_PAGESIZE(1UL << 12));
2135 RBT_FOREACH(p, leaktree, &leakhead) {
2136 dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2137 p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2138 if (malloc_leaks == MAP_FAILED((void *)-1) ||
2139 i >= MALLOC_PAGESIZE(1UL << 12) / sizeof(struct malloc_leak))
2140 continue;
2141 malloc_leaks[i].f = p->d.f;
2142 malloc_leaks[i].total_size = p->d.total_size;
2143 malloc_leaks[i].count = p->d.count;
2144 i++;
2145 }
2146}
2147
2148static void
2149dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2150{
2151 while (p != NULL((void*)0)) {
2152 dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2153 p->page, ((p->bits[0] & 1) ? NULL((void*)0) : f),
2154 p->size, p->free, p->total);
2155 if (!fromfreelist) {
2156 if (p->bits[0] & 1)
2157 putleakinfo(NULL((void*)0), p->size, p->total - p->free);
2158 else {
2159 putleakinfo(f, p->size, 1);
2160 putleakinfo(NULL((void*)0), p->size,
2161 p->total - p->free - 1);
2162 }
2163 break;
2164 }
2165 p = LIST_NEXT(p, entries)((p)->entries.le_next);
2166 if (p != NULL((void*)0))
2167 dprintf(fd, " ");
2168 }
2169}
2170
2171static void
2172dump_free_chunk_info(int fd, struct dir_info *d)
2173{
2174 int i, j, count;
2175 struct chunk_info *p;
2176
2177 dprintf(fd, "Free chunk structs:\n");
2178 for (i = 0; i <= MALLOC_MAXSHIFT(12 - 1); i++) {
2179 count = 0;
2180 LIST_FOREACH(p, &d->chunk_info_list[i], entries)for((p) = ((&d->chunk_info_list[i])->lh_first); (p)
!= ((void*)0); (p) = ((p)->entries.le_next))
2181 count++;
2182 for (j = 0; j < MALLOC_CHUNK_LISTS4; j++) {
2183 p = LIST_FIRST(&d->chunk_dir[i][j])((&d->chunk_dir[i][j])->lh_first);
2184 if (p == NULL((void*)0) && count == 0)
2185 continue;
2186 dprintf(fd, "%2d) %3d ", i, count);
2187 if (p != NULL((void*)0))
2188 dump_chunk(fd, p, NULL((void*)0), 1);
2189 else
2190 dprintf(fd, "\n");
2191 }
2192 }
2193
2194}
2195
2196static void
2197dump_free_page_info(int fd, struct dir_info *d)
2198{
2199 struct cache *cache;
2200 size_t i, total = 0;
2201
2202 dprintf(fd, "Cached:\n");
2203 for (i = 0; i < MAX_CACHEABLE_SIZE32; i++) {
2204 cache = &d->cache[i];
2205 if (cache->length != 0)
2206 dprintf(fd, "%zu(%u): %u = %zu\n", i + 1, cache->max, cache->length, cache->length * (i + 1));
2207 total += cache->length * (i + 1);
2208 }
2209 dprintf(fd, "Free pages cached: %zu\n", total);
2210}
2211
2212static void
2213malloc_dump1(int fd, int poolno, struct dir_info *d)
2214{
2215 size_t i, realsize;
2216
2217 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2218 if (d == NULL((void*)0))
2219 return;
2220 dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag);
2221 dprintf(fd, "Region slots free %zu/%zu\n",
2222 d->regions_free, d->regions_total);
2223 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions);
2224 dprintf(fd, "Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2225 dprintf(fd, "Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2226 dprintf(fd, "Cheap reallocs %zu/%zu\n",
2227 d->cheap_reallocs, d->cheap_realloc_tries);
2228 dprintf(fd, "Other pool searches %zu/%zu\n",
2229 d->other_pool, d->pool_searches);
2230 dprintf(fd, "In use %zu\n", d->malloc_used);
2231 dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2232 dump_free_chunk_info(fd, d);
2233 dump_free_page_info(fd, d);
2234 dprintf(fd,
2235 "slot) hash d type page f size [free/n]\n");
2236 for (i = 0; i < d->regions_total; i++) {
2237 if (d->r[i].p != NULL((void*)0)) {
2238 size_t h = hash(d->r[i].p) &
2239 (d->regions_total - 1);
2240 dprintf(fd, "%4zx) #%4zx %zd ",
2241 i, h, h - i);
2242 REALSIZE(realsize, &d->r[i])(realsize) = (uintptr_t)(&d->r[i])->p & ((1UL <<
12) - 1), (realsize) = ((realsize) == 0 ? (&d->r[i])->
size : ((realsize) == 1 ? 0 : (1 << ((realsize)-1))))
;
2243 if (realsize > MALLOC_MAXCHUNK(1 << (12 - 1))) {
2244 putleakinfo(d->r[i].f, realsize, 1);
2245 dprintf(fd,
2246 "pages %18p %18p %zu\n", d->r[i].p,
2247 d->r[i].f, realsize);
2248 } else
2249 dump_chunk(fd,
2250 (struct chunk_info *)d->r[i].size,
2251 d->r[i].f, 0);
2252 }
2253 }
2254 dump_leaks(fd);
2255 dprintf(fd, "\n");
2256}
2257
2258void
2259malloc_dump(int fd, int poolno, struct dir_info *pool)
2260{
2261 int i;
2262 void *p;
2263 struct region_info *r;
2264 int saved_errno = errno(*__errno());
2265
2266 if (pool == NULL((void*)0))
2267 return;
2268 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK15 + 1; i++) {
2269 p = pool->delayed_chunks[i];
2270 if (p == NULL((void*)0))
2271 continue;
2272 r = find(pool, p);
2273 if (r == NULL((void*)0))
2274 wrterror(pool, "bogus pointer in malloc_dump %p", p);
2275 free_bytes(pool, r, p);
2276 pool->delayed_chunks[i] = NULL((void*)0);
2277 }
2278 /* XXX leak when run multiple times */
2279 RBT_INIT(leaktree, &leakhead);
2280 malloc_dump1(fd, poolno, pool);
2281 errno(*__errno()) = saved_errno;
2282}
2283DEF_WEAK(malloc_dump)__asm__(".weak " "malloc_dump" " ; " "malloc_dump" " = " "_libc_malloc_dump"
)
;
2284
2285void
2286malloc_gdump(int fd)
2287{
2288 int i;
2289 int saved_errno = errno(*__errno());
2290
2291 for (i = 0; i < moptsmalloc_readonly.mopts.malloc_mutexes; i++)
2292 malloc_dump(fd, i, moptsmalloc_readonly.mopts.malloc_pool[i]);
2293
2294 errno(*__errno()) = saved_errno;
2295}
2296DEF_WEAK(malloc_gdump)__asm__(".weak " "malloc_gdump" " ; " "malloc_gdump" " = " "_libc_malloc_gdump"
)
;
2297
2298static void
2299malloc_exit(void)
2300{
2301 int save_errno = errno(*__errno()), fd, i;
2302
2303 fd = open("malloc.out", O_RDWR|O_APPEND);
2304 if (fd != -1) {
2305 dprintf(fd, "******** Start dump %s *******\n", __progname);
2306 dprintf(fd,
2307 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2308 moptsmalloc_readonly.mopts.malloc_mt, moptsmalloc_readonly.mopts.malloc_mutexes,
2309 moptsmalloc_readonly.mopts.internal_funcs, moptsmalloc_readonly.mopts.malloc_freecheck,
2310 moptsmalloc_readonly.mopts.malloc_freeunmap, moptsmalloc_readonly.mopts.def_malloc_junk,
2311 moptsmalloc_readonly.mopts.malloc_realloc, moptsmalloc_readonly.mopts.malloc_xmalloc,
2312 moptsmalloc_readonly.mopts.chunk_canaries, moptsmalloc_readonly.mopts.def_maxcache,
2313 moptsmalloc_readonly.mopts.malloc_guard);
2314
2315 for (i = 0; i < moptsmalloc_readonly.mopts.malloc_mutexes; i++)
2316 malloc_dump(fd, i, moptsmalloc_readonly.mopts.malloc_pool[i]);
2317 dprintf(fd, "******** End dump %s *******\n", __progname);
2318 close(fd);
2319 } else
2320 dprintf(STDERR_FILENO2,
2321 "malloc() warning: Couldn't dump stats\n");
2322 errno(*__errno()) = save_errno;
2323}
2324
2325#endif /* MALLOC_STATS */