| File: | src/lib/libc/stdlib/malloc.c |
| Warning: | line 1933, column 4 Array subscript is undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: malloc.c,v 1.295 2023/12/19 06:59:28 otto Exp $ */ | |||
| 2 | /* | |||
| 3 | * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net> | |||
| 4 | * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> | |||
| 5 | * Copyright (c) 2008 Damien Miller <djm@openbsd.org> | |||
| 6 | * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> | |||
| 7 | * | |||
| 8 | * Permission to use, copy, modify, and distribute this software for any | |||
| 9 | * purpose with or without fee is hereby granted, provided that the above | |||
| 10 | * copyright notice and this permission notice appear in all copies. | |||
| 11 | * | |||
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |||
| 13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |||
| 14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |||
| 15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |||
| 16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |||
| 17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |||
| 18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |||
| 19 | */ | |||
| 20 | ||||
| 21 | /* | |||
| 22 | * If we meet some day, and you think this stuff is worth it, you | |||
| 23 | * can buy me a beer in return. Poul-Henning Kamp | |||
| 24 | */ | |||
| 25 | ||||
| 26 | #ifndef MALLOC_SMALL | |||
| 27 | #define MALLOC_STATS | |||
| 28 | #endif | |||
| 29 | ||||
| 30 | #include <sys/types.h> | |||
| 31 | #include <sys/queue.h> | |||
| 32 | #include <sys/mman.h> | |||
| 33 | #include <sys/sysctl.h> | |||
| 34 | #include <uvm/uvmexp.h> | |||
| 35 | #include <errno(*__errno()).h> | |||
| 36 | #include <stdarg.h> | |||
| 37 | #include <stdint.h> | |||
| 38 | #include <stdio.h> | |||
| 39 | #include <stdlib.h> | |||
| 40 | #include <string.h> | |||
| 41 | #include <unistd.h> | |||
| 42 | ||||
| 43 | #ifdef MALLOC_STATS | |||
| 44 | #include <sys/tree.h> | |||
| 45 | #include <sys/ktrace.h> | |||
| 46 | #include <dlfcn.h> | |||
| 47 | #endif | |||
| 48 | ||||
| 49 | #include "thread_private.h" | |||
| 50 | #include <tib.h> | |||
| 51 | ||||
| 52 | #define MALLOC_PAGESHIFT12 _MAX_PAGE_SHIFT12 | |||
| 53 | ||||
| 54 | #define MALLOC_MINSHIFT4 4 | |||
| 55 | #define MALLOC_MAXSHIFT(12 - 1) (MALLOC_PAGESHIFT12 - 1) | |||
| 56 | #define MALLOC_PAGESIZE(1UL << 12) (1UL << MALLOC_PAGESHIFT12) | |||
| 57 | #define MALLOC_MINSIZE(1UL << 4) (1UL << MALLOC_MINSHIFT4) | |||
| 58 | #define MALLOC_PAGEMASK((1UL << 12) - 1) (MALLOC_PAGESIZE(1UL << 12) - 1) | |||
| 59 | #define MASK_POINTER(p)((void *)(((uintptr_t)(p)) & ~((1UL << 12) - 1))) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK((1UL << 12) - 1))) | |||
| 60 | ||||
| 61 | #define MALLOC_MAXCHUNK(1 << (12 - 1)) (1 << MALLOC_MAXSHIFT(12 - 1)) | |||
| 62 | #define MALLOC_MAXCACHE256 256 | |||
| 63 | #define MALLOC_DELAYED_CHUNK_MASK15 15 | |||
| 64 | #ifdef MALLOC_STATS | |||
| 65 | #define MALLOC_INITIAL_REGIONS512 512 | |||
| 66 | #else | |||
| 67 | #define MALLOC_INITIAL_REGIONS512 (MALLOC_PAGESIZE(1UL << 12) / sizeof(struct region_info)) | |||
| 68 | #endif | |||
| 69 | #define MALLOC_DEFAULT_CACHE64 64 | |||
| 70 | #define MALLOC_CHUNK_LISTS4 4 | |||
| 71 | #define CHUNK_CHECK_LENGTH32 32 | |||
| 72 | ||||
| 73 | #define B2SIZE(b)((b) * (1UL << 4)) ((b) * MALLOC_MINSIZE(1UL << 4)) | |||
| 74 | #define B2ALLOC(b)((b) == 0 ? (1UL << 4) : (b) * (1UL << 4)) ((b) == 0 ? MALLOC_MINSIZE(1UL << 4) : \ | |||
| 75 | (b) * MALLOC_MINSIZE(1UL << 4)) | |||
| 76 | #define BUCKETS((1 << (12 - 1)) / (1UL << 4)) (MALLOC_MAXCHUNK(1 << (12 - 1)) / MALLOC_MINSIZE(1UL << 4)) | |||
| 77 | ||||
| 78 | /* | |||
| 79 | * We move allocations between half a page and a whole page towards the end, | |||
| 80 | * subject to alignment constraints. This is the extra headroom we allow. | |||
| 81 | * Set to zero to be the most strict. | |||
| 82 | */ | |||
| 83 | #define MALLOC_LEEWAY0 0 | |||
| 84 | #define MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL << 12) - 0) ((sz) - moptsmalloc_readonly.mopts.malloc_guard < \ | |||
| 85 | MALLOC_PAGESIZE(1UL << 12) - MALLOC_LEEWAY0) | |||
| 86 | #define MALLOC_MOVE(p, sz)(((char *)(p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly .mopts.malloc_guard)) & ~((1UL << 4) - 1))) (((char *)(p)) + \ | |||
| 87 | ((MALLOC_PAGESIZE(1UL << 12) - MALLOC_LEEWAY0 - \ | |||
| 88 | ((sz) - moptsmalloc_readonly.mopts.malloc_guard)) & \ | |||
| 89 | ~(MALLOC_MINSIZE(1UL << 4) - 1))) | |||
| 90 | ||||
| 91 | #define PAGEROUND(x)(((x) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)) (((x) + (MALLOC_PAGEMASK((1UL << 12) - 1))) & ~MALLOC_PAGEMASK((1UL << 12) - 1)) | |||
| 92 | ||||
| 93 | /* | |||
| 94 | * What to use for Junk. This is the byte value we use to fill with | |||
| 95 | * when the 'J' option is enabled. Use SOME_JUNK right after alloc, | |||
| 96 | * and SOME_FREEJUNK right before free. | |||
| 97 | */ | |||
| 98 | #define SOME_JUNK0xdb 0xdb /* deadbeef */ | |||
| 99 | #define SOME_FREEJUNK0xdf 0xdf /* dead, free */ | |||
| 100 | #define SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL 0xdfdfdfdfdfdfdfdfULL | |||
| 101 | ||||
| 102 | #define MMAP(sz,f)mmap(((void *)0), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (f), - 1, 0) mmap(NULL((void *)0), (sz), PROT_READ0x01 | PROT_WRITE0x02, \ | |||
| 103 | MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0) | |||
| 104 | ||||
| 105 | #define MMAPNONE(sz,f)mmap(((void *)0), (sz), 0x00, 0x1000 | 0x0002 | (f), -1, 0) mmap(NULL((void *)0), (sz), PROT_NONE0x00, \ | |||
| 106 | MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0) | |||
| 107 | ||||
| 108 | #define MMAPA(a,sz,f)mmap((a), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (f), -1, 0) mmap((a), (sz), PROT_READ0x01 | PROT_WRITE0x02, \ | |||
| 109 | MAP_ANON0x1000 | MAP_PRIVATE0x0002 | (f), -1, 0) | |||
| 110 | ||||
| 111 | struct region_info { | |||
| 112 | void *p; /* page; low bits used to mark chunks */ | |||
| 113 | uintptr_t size; /* size for pages, or chunk_info pointer */ | |||
| 114 | #ifdef MALLOC_STATS | |||
| 115 | void **f; /* where allocated from */ | |||
| 116 | #endif | |||
| 117 | }; | |||
| 118 | ||||
| 119 | LIST_HEAD(chunk_head, chunk_info)struct chunk_head { struct chunk_info *lh_first; }; | |||
| 120 | ||||
| 121 | /* | |||
| 122 | * Two caches, one for "small" regions, one for "big". | |||
| 123 | * Small cache is an array per size, big cache is one array with different | |||
| 124 | * sized regions | |||
| 125 | */ | |||
| 126 | #define MAX_SMALLCACHEABLE_SIZE32 32 | |||
| 127 | #define MAX_BIGCACHEABLE_SIZE512 512 | |||
| 128 | /* If the total # of pages is larger than this, evict before inserting */ | |||
| 129 | #define BIGCACHE_FILL(sz)(512 * (sz) / 4) (MAX_BIGCACHEABLE_SIZE512 * (sz) / 4) | |||
| 130 | ||||
| 131 | struct smallcache { | |||
| 132 | void **pages; | |||
| 133 | ushort length; | |||
| 134 | ushort max; | |||
| 135 | }; | |||
| 136 | ||||
| 137 | struct bigcache { | |||
| 138 | void *page; | |||
| 139 | size_t psize; | |||
| 140 | }; | |||
| 141 | ||||
| 142 | #ifdef MALLOC_STATS | |||
| 143 | #define NUM_FRAMES4 4 | |||
| 144 | struct btnode { | |||
| 145 | RBT_ENTRY(btnode)struct rb_entry entry; | |||
| 146 | void *caller[NUM_FRAMES4]; | |||
| 147 | }; | |||
| 148 | RBT_HEAD(btshead, btnode)struct btshead { struct rb_tree rbh_root; }; | |||
| 149 | RBT_PROTOTYPE(btshead, btnode, entry, btcmp)extern const struct rb_type *const btshead_RBT_TYPE; __attribute__ ((__unused__)) static inline void btshead_RBT_INIT(struct btshead *head) { _rb_init(&head->rbh_root); } __attribute__(( __unused__)) static inline struct btnode * btshead_RBT_INSERT (struct btshead *head, struct btnode *elm) { return _rb_insert (btshead_RBT_TYPE, &head->rbh_root, elm); } __attribute__ ((__unused__)) static inline struct btnode * btshead_RBT_REMOVE (struct btshead *head, struct btnode *elm) { return _rb_remove (btshead_RBT_TYPE, &head->rbh_root, elm); } __attribute__ ((__unused__)) static inline struct btnode * btshead_RBT_FIND (struct btshead *head, const struct btnode *key) { return _rb_find (btshead_RBT_TYPE, &head->rbh_root, key); } __attribute__ ((__unused__)) static inline struct btnode * btshead_RBT_NFIND (struct btshead *head, const struct btnode *key) { return _rb_nfind (btshead_RBT_TYPE, &head->rbh_root, key); } __attribute__ ((__unused__)) static inline struct btnode * btshead_RBT_ROOT (struct btshead *head) { return _rb_root(btshead_RBT_TYPE, & head->rbh_root); } __attribute__((__unused__)) static inline int btshead_RBT_EMPTY(struct btshead *head) { return _rb_empty (&head->rbh_root); } __attribute__((__unused__)) static inline struct btnode * btshead_RBT_MIN(struct btshead *head) { return _rb_min(btshead_RBT_TYPE, &head->rbh_root); } __attribute__((__unused__)) static inline struct btnode * btshead_RBT_MAX (struct btshead *head) { return _rb_max(btshead_RBT_TYPE, & head->rbh_root); } __attribute__((__unused__)) static inline struct btnode * btshead_RBT_NEXT(struct btnode *elm) { return _rb_next(btshead_RBT_TYPE, elm); } __attribute__((__unused__ )) static inline struct btnode * btshead_RBT_PREV(struct btnode *elm) { return _rb_prev(btshead_RBT_TYPE, elm); } __attribute__ ((__unused__)) static inline struct btnode * btshead_RBT_LEFT (struct btnode *elm) { return _rb_left(btshead_RBT_TYPE, elm) ; } __attribute__((__unused__)) static inline struct btnode * btshead_RBT_RIGHT(struct btnode *elm) { return _rb_right(btshead_RBT_TYPE , elm); } __attribute__((__unused__)) static inline struct btnode * btshead_RBT_PARENT(struct btnode *elm) { return _rb_parent (btshead_RBT_TYPE, elm); } __attribute__((__unused__)) static inline void btshead_RBT_SET_LEFT(struct btnode *elm, struct btnode *left) { _rb_set_left(btshead_RBT_TYPE, elm, left); } __attribute__ ((__unused__)) static inline void btshead_RBT_SET_RIGHT(struct btnode *elm, struct btnode *right) { _rb_set_right(btshead_RBT_TYPE , elm, right); } __attribute__((__unused__)) static inline void btshead_RBT_SET_PARENT(struct btnode *elm, struct btnode *parent ) { _rb_set_parent(btshead_RBT_TYPE, elm, parent); } __attribute__ ((__unused__)) static inline void btshead_RBT_POISON(struct btnode *elm, unsigned long poison) { _rb_poison(btshead_RBT_TYPE, elm , poison); } __attribute__((__unused__)) static inline int btshead_RBT_CHECK (struct btnode *elm, unsigned long poison) { return _rb_check (btshead_RBT_TYPE, elm, poison); }; | |||
| 150 | #endif /* MALLOC_STATS */ | |||
| 151 | ||||
| 152 | struct dir_info { | |||
| 153 | u_int32_t canary1; | |||
| 154 | int active; /* status of malloc */ | |||
| 155 | struct region_info *r; /* region slots */ | |||
| 156 | size_t regions_total; /* number of region slots */ | |||
| 157 | size_t regions_free; /* number of free slots */ | |||
| 158 | size_t rbytesused; /* random bytes used */ | |||
| 159 | const char *func; /* current function */ | |||
| 160 | int malloc_junk; /* junk fill? */ | |||
| 161 | int mmap_flag; /* extra flag for mmap */ | |||
| 162 | int mutex; | |||
| 163 | int malloc_mt; /* multi-threaded mode? */ | |||
| 164 | /* lists of free chunk info structs */ | |||
| 165 | struct chunk_head chunk_info_list[BUCKETS((1 << (12 - 1)) / (1UL << 4)) + 1]; | |||
| 166 | /* lists of chunks with free slots */ | |||
| 167 | struct chunk_head chunk_dir[BUCKETS((1 << (12 - 1)) / (1UL << 4)) + 1][MALLOC_CHUNK_LISTS4]; | |||
| 168 | /* delayed free chunk slots */ | |||
| 169 | void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK15 + 1]; | |||
| 170 | u_char rbytes[32]; /* random bytes */ | |||
| 171 | /* free pages cache */ | |||
| 172 | struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE32]; | |||
| 173 | size_t bigcache_used; | |||
| 174 | size_t bigcache_size; | |||
| 175 | struct bigcache *bigcache; | |||
| 176 | void *chunk_pages; | |||
| 177 | size_t chunk_pages_used; | |||
| 178 | #ifdef MALLOC_STATS | |||
| 179 | void *caller; | |||
| 180 | size_t inserts; | |||
| 181 | size_t insert_collisions; | |||
| 182 | size_t deletes; | |||
| 183 | size_t delete_moves; | |||
| 184 | size_t cheap_realloc_tries; | |||
| 185 | size_t cheap_reallocs; | |||
| 186 | size_t malloc_used; /* bytes allocated */ | |||
| 187 | size_t malloc_guarded; /* bytes used for guards */ | |||
| 188 | struct btshead btraces; /* backtraces seen */ | |||
| 189 | struct btnode *btnodes; /* store of backtrace nodes */ | |||
| 190 | size_t btnodesused; | |||
| 191 | #define STATS_ADD(x,y)((x) += (y)) ((x) += (y)) | |||
| 192 | #define STATS_SUB(x,y)((x) -= (y)) ((x) -= (y)) | |||
| 193 | #define STATS_INC(x)((x)++) ((x)++) | |||
| 194 | #define STATS_ZERO(x)((x) = 0) ((x) = 0) | |||
| 195 | #define STATS_SETF(x,y)((x)->f = (y)) ((x)->f = (y)) | |||
| 196 | #define STATS_SETFN(x,k,y)((x)->f[k] = (y)) ((x)->f[k] = (y)) | |||
| 197 | #define SET_CALLER(x,y)if (malloc_readonly.mopts.malloc_stats) ((x)->caller = (y) ) if (DO_STATSmalloc_readonly.mopts.malloc_stats) ((x)->caller = (y)) | |||
| 198 | #else | |||
| 199 | #define STATS_ADD(x,y)((x) += (y)) /* nothing */ | |||
| 200 | #define STATS_SUB(x,y)((x) -= (y)) /* nothing */ | |||
| 201 | #define STATS_INC(x)((x)++) /* nothing */ | |||
| 202 | #define STATS_ZERO(x)((x) = 0) /* nothing */ | |||
| 203 | #define STATS_SETF(x,y)((x)->f = (y)) /* nothing */ | |||
| 204 | #define STATS_SETFN(x,k,y)((x)->f[k] = (y)) /* nothing */ | |||
| 205 | #define SET_CALLER(x,y)if (malloc_readonly.mopts.malloc_stats) ((x)->caller = (y) ) /* nothing */ | |||
| 206 | #endif /* MALLOC_STATS */ | |||
| 207 | u_int32_t canary2; | |||
| 208 | }; | |||
| 209 | ||||
| 210 | static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); | |||
| 211 | ||||
| 212 | /* | |||
| 213 | * This structure describes a page worth of chunks. | |||
| 214 | * | |||
| 215 | * How many bits per u_short in the bitmap | |||
| 216 | */ | |||
| 217 | #define MALLOC_BITS(8 * sizeof(u_short)) (NBBY8 * sizeof(u_short)) | |||
| 218 | struct chunk_info { | |||
| 219 | LIST_ENTRY(chunk_info)struct { struct chunk_info *le_next; struct chunk_info **le_prev ; } entries; | |||
| 220 | void *page; /* pointer to the page */ | |||
| 221 | /* number of shorts should add up to 8, check alloc_chunk_info() */ | |||
| 222 | u_short canary; | |||
| 223 | u_short bucket; | |||
| 224 | u_short free; /* how many free chunks */ | |||
| 225 | u_short total; /* how many chunks */ | |||
| 226 | u_short offset; /* requested size table offset */ | |||
| 227 | #define CHUNK_INFO_TAIL3 3 | |||
| 228 | u_short bits[CHUNK_INFO_TAIL3]; /* which chunks are free */ | |||
| 229 | }; | |||
| 230 | ||||
| 231 | #define CHUNK_FREE(i, n)((i)->bits[(n) / (8 * sizeof(u_short))] & (1U << ((n) % (8 * sizeof(u_short))))) ((i)->bits[(n) / MALLOC_BITS(8 * sizeof(u_short))] & \ | |||
| 232 | (1U << ((n) % MALLOC_BITS(8 * sizeof(u_short))))) | |||
| 233 | ||||
| 234 | struct malloc_readonly { | |||
| 235 | /* Main bookkeeping information */ | |||
| 236 | struct dir_info *malloc_pool[_MALLOC_MUTEXES32]; | |||
| 237 | u_int malloc_mutexes; /* how much in actual use? */ | |||
| 238 | int malloc_freecheck; /* Extensive double free check */ | |||
| 239 | int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ | |||
| 240 | int def_malloc_junk; /* junk fill? */ | |||
| 241 | int malloc_realloc; /* always realloc? */ | |||
| 242 | int malloc_xmalloc; /* xmalloc behaviour? */ | |||
| 243 | u_int chunk_canaries; /* use canaries after chunks? */ | |||
| 244 | int internal_funcs; /* use better recallocarray/freezero? */ | |||
| 245 | u_int def_maxcache; /* free pages we cache */ | |||
| 246 | u_int junk_loc; /* variation in location of junk */ | |||
| 247 | size_t malloc_guard; /* use guard pages after allocations? */ | |||
| 248 | #ifdef MALLOC_STATS | |||
| 249 | int malloc_stats; /* save callers, dump leak report */ | |||
| 250 | int malloc_verbose; /* dump verbose statistics at end */ | |||
| 251 | #define DO_STATSmalloc_readonly.mopts.malloc_stats moptsmalloc_readonly.mopts.malloc_stats | |||
| 252 | #else | |||
| 253 | #define DO_STATSmalloc_readonly.mopts.malloc_stats 0 | |||
| 254 | #endif | |||
| 255 | u_int32_t malloc_canary; /* Matched against ones in pool */ | |||
| 256 | }; | |||
| 257 | ||||
| 258 | ||||
| 259 | /* This object is mapped PROT_READ after initialisation to prevent tampering */ | |||
| 260 | static union { | |||
| 261 | struct malloc_readonly moptsmalloc_readonly.mopts; | |||
| 262 | u_char _pad[MALLOC_PAGESIZE(1UL << 12)]; | |||
| 263 | } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE(1UL << 12)))) | |||
| 264 | __attribute__((section(".openbsd.mutable"))); | |||
| 265 | #define moptsmalloc_readonly.mopts malloc_readonly.moptsmalloc_readonly.mopts | |||
| 266 | ||||
| 267 | char *malloc_options; /* compile-time options */ | |||
| 268 | ||||
| 269 | static __dead__attribute__((__noreturn__)) void wrterror(struct dir_info *d, char *msg, ...) | |||
| 270 | __attribute__((__format__ (printf, 2, 3))); | |||
| 271 | ||||
| 272 | #ifdef MALLOC_STATS | |||
| 273 | void malloc_dump(void); | |||
| 274 | PROTO_NORMAL(malloc_dump)__attribute__((__visibility__("hidden"))) typeof(malloc_dump) malloc_dump asm("_libc_" "malloc_dump"); | |||
| 275 | static void malloc_exit(void); | |||
| 276 | static void print_chunk_details(struct dir_info *, void *, size_t, size_t); | |||
| 277 | static void* store_caller(struct dir_info *, struct btnode *); | |||
| 278 | ||||
| 279 | /* below are the arches for which deeper caller info has been tested */ | |||
| 280 | #if defined(__aarch64__) || \ | |||
| 281 | defined(__amd64__1) || \ | |||
| 282 | defined(__arm__) || \ | |||
| 283 | defined(__i386__) || \ | |||
| 284 | defined(__powerpc__) | |||
| 285 | __attribute__((always_inline)) | |||
| 286 | static inline void* | |||
| 287 | caller(struct dir_info *d) | |||
| 288 | { | |||
| 289 | struct btnode p; | |||
| 290 | int level = DO_STATSmalloc_readonly.mopts.malloc_stats; | |||
| 291 | ||||
| 292 | if (level == 0) | |||
| 293 | return NULL((void *)0); | |||
| 294 | ||||
| 295 | memset(&p.caller, 0, sizeof(p.caller)); | |||
| 296 | if (level >= 1) | |||
| 297 | p.caller[0] = __builtin_extract_return_addr( | |||
| 298 | __builtin_return_address(0)); | |||
| 299 | if (p.caller[0] != NULL((void *)0) && level >= 2) | |||
| 300 | p.caller[1] = __builtin_extract_return_addr( | |||
| 301 | __builtin_return_address(1)); | |||
| 302 | if (p.caller[1] != NULL((void *)0) && level >= 3) | |||
| 303 | p.caller[2] = __builtin_extract_return_addr( | |||
| 304 | __builtin_return_address(2)); | |||
| 305 | if (p.caller[2] != NULL((void *)0) && level >= 4) | |||
| 306 | p.caller[3] = __builtin_extract_return_addr( | |||
| 307 | __builtin_return_address(3)); | |||
| 308 | return store_caller(d, &p); | |||
| 309 | } | |||
| 310 | #else | |||
| 311 | __attribute__((always_inline)) | |||
| 312 | static inline void* caller(struct dir_info *d) | |||
| 313 | { | |||
| 314 | struct btnode p; | |||
| 315 | ||||
| 316 | if (DO_STATSmalloc_readonly.mopts.malloc_stats == 0) | |||
| 317 | return NULL((void *)0); | |||
| 318 | memset(&p.caller, 0, sizeof(p.caller)); | |||
| 319 | p.caller[0] = __builtin_extract_return_addr(__builtin_return_address(0)); | |||
| 320 | return store_caller(d, &p); | |||
| 321 | } | |||
| 322 | #endif | |||
| 323 | #endif /* MALLOC_STATS */ | |||
| 324 | ||||
| 325 | /* low bits of r->p determine size: 0 means >= page size and r->size holding | |||
| 326 | * real size, otherwise low bits is the bucket + 1 | |||
| 327 | */ | |||
| 328 | #define REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz ) = ((sz) == 0 ? (r)->size : (((sz) - 1) * (1UL << 4 ))) \ | |||
| 329 | (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK((1UL << 12) - 1), \ | |||
| 330 | (sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1)(((sz) - 1) * (1UL << 4))) | |||
| 331 | ||||
| 332 | static inline size_t | |||
| 333 | hash(void *p) | |||
| 334 | { | |||
| 335 | size_t sum; | |||
| 336 | uintptr_t u; | |||
| 337 | ||||
| 338 | u = (uintptr_t)p >> MALLOC_PAGESHIFT12; | |||
| 339 | sum = u; | |||
| 340 | sum = (sum << 7) - sum + (u >> 16); | |||
| 341 | #ifdef __LP64__1 | |||
| 342 | sum = (sum << 7) - sum + (u >> 32); | |||
| 343 | sum = (sum << 7) - sum + (u >> 48); | |||
| 344 | #endif | |||
| 345 | return sum; | |||
| 346 | } | |||
| 347 | ||||
| 348 | static inline struct dir_info * | |||
| 349 | getpool(void) | |||
| 350 | { | |||
| 351 | if (moptsmalloc_readonly.mopts.malloc_pool[1] == NULL((void *)0) || !moptsmalloc_readonly.mopts.malloc_pool[1]->malloc_mt) | |||
| 352 | return moptsmalloc_readonly.mopts.malloc_pool[1]; | |||
| 353 | else /* first one reserved for special pool */ | |||
| 354 | return moptsmalloc_readonly.mopts.malloc_pool[1 + TIB_GET()((struct tib *)((char *)(__amd64_read_tcb(0)) - (__builtin_offsetof (struct tib, __tib_self) + 0)))->tib_tid % | |||
| 355 | (moptsmalloc_readonly.mopts.malloc_mutexes - 1)]; | |||
| 356 | } | |||
| 357 | ||||
| 358 | static __dead__attribute__((__noreturn__)) void | |||
| 359 | wrterror(struct dir_info *d, char *msg, ...) | |||
| 360 | { | |||
| 361 | int saved_errno = errno(*__errno()); | |||
| 362 | va_list ap; | |||
| 363 | ||||
| 364 | dprintf(STDERR_FILENO2, "%s(%d) in %s(): ", __progname, | |||
| 365 | getpid(), (d != NULL((void *)0) && d->func) ? d->func : "unknown"); | |||
| 366 | va_start(ap, msg)__builtin_va_start((ap), msg); | |||
| 367 | vdprintf(STDERR_FILENO2, msg, ap); | |||
| 368 | va_end(ap)__builtin_va_end((ap)); | |||
| 369 | dprintf(STDERR_FILENO2, "\n"); | |||
| 370 | ||||
| 371 | #ifdef MALLOC_STATS | |||
| 372 | if (DO_STATSmalloc_readonly.mopts.malloc_stats && moptsmalloc_readonly.mopts.malloc_verbose) | |||
| 373 | malloc_dump(); | |||
| 374 | #endif | |||
| 375 | ||||
| 376 | errno(*__errno()) = saved_errno; | |||
| 377 | ||||
| 378 | abort(); | |||
| 379 | } | |||
| 380 | ||||
| 381 | static void | |||
| 382 | rbytes_init(struct dir_info *d) | |||
| 383 | { | |||
| 384 | arc4random_buf(d->rbytes, sizeof(d->rbytes)); | |||
| 385 | /* add 1 to account for using d->rbytes[0] */ | |||
| 386 | d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); | |||
| 387 | } | |||
| 388 | ||||
| 389 | static inline u_char | |||
| 390 | getrbyte(struct dir_info *d) | |||
| 391 | { | |||
| 392 | u_char x; | |||
| 393 | ||||
| 394 | if (d->rbytesused >= sizeof(d->rbytes)) | |||
| 395 | rbytes_init(d); | |||
| 396 | x = d->rbytes[d->rbytesused++]; | |||
| 397 | return x; | |||
| 398 | } | |||
| 399 | ||||
| 400 | static void | |||
| 401 | omalloc_parseopt(char opt) | |||
| 402 | { | |||
| 403 | switch (opt) { | |||
| 404 | case '+': | |||
| 405 | moptsmalloc_readonly.mopts.malloc_mutexes <<= 1; | |||
| 406 | if (moptsmalloc_readonly.mopts.malloc_mutexes > _MALLOC_MUTEXES32) | |||
| 407 | moptsmalloc_readonly.mopts.malloc_mutexes = _MALLOC_MUTEXES32; | |||
| 408 | break; | |||
| 409 | case '-': | |||
| 410 | moptsmalloc_readonly.mopts.malloc_mutexes >>= 1; | |||
| 411 | if (moptsmalloc_readonly.mopts.malloc_mutexes < 2) | |||
| 412 | moptsmalloc_readonly.mopts.malloc_mutexes = 2; | |||
| 413 | break; | |||
| 414 | case '>': | |||
| 415 | moptsmalloc_readonly.mopts.def_maxcache <<= 1; | |||
| 416 | if (moptsmalloc_readonly.mopts.def_maxcache > MALLOC_MAXCACHE256) | |||
| 417 | moptsmalloc_readonly.mopts.def_maxcache = MALLOC_MAXCACHE256; | |||
| 418 | break; | |||
| 419 | case '<': | |||
| 420 | moptsmalloc_readonly.mopts.def_maxcache >>= 1; | |||
| 421 | break; | |||
| 422 | case 'c': | |||
| 423 | moptsmalloc_readonly.mopts.chunk_canaries = 0; | |||
| 424 | break; | |||
| 425 | case 'C': | |||
| 426 | moptsmalloc_readonly.mopts.chunk_canaries = 1; | |||
| 427 | break; | |||
| 428 | #ifdef MALLOC_STATS | |||
| 429 | case 'd': | |||
| 430 | moptsmalloc_readonly.mopts.malloc_stats = 0; | |||
| 431 | break; | |||
| 432 | case 'D': | |||
| 433 | case '1': | |||
| 434 | moptsmalloc_readonly.mopts.malloc_stats = 1; | |||
| 435 | break; | |||
| 436 | case '2': | |||
| 437 | moptsmalloc_readonly.mopts.malloc_stats = 2; | |||
| 438 | break; | |||
| 439 | case '3': | |||
| 440 | moptsmalloc_readonly.mopts.malloc_stats = 3; | |||
| 441 | break; | |||
| 442 | case '4': | |||
| 443 | moptsmalloc_readonly.mopts.malloc_stats = 4; | |||
| 444 | break; | |||
| 445 | #endif /* MALLOC_STATS */ | |||
| 446 | case 'f': | |||
| 447 | moptsmalloc_readonly.mopts.malloc_freecheck = 0; | |||
| 448 | moptsmalloc_readonly.mopts.malloc_freeunmap = 0; | |||
| 449 | break; | |||
| 450 | case 'F': | |||
| 451 | moptsmalloc_readonly.mopts.malloc_freecheck = 1; | |||
| 452 | moptsmalloc_readonly.mopts.malloc_freeunmap = 1; | |||
| 453 | break; | |||
| 454 | case 'g': | |||
| 455 | moptsmalloc_readonly.mopts.malloc_guard = 0; | |||
| 456 | break; | |||
| 457 | case 'G': | |||
| 458 | moptsmalloc_readonly.mopts.malloc_guard = MALLOC_PAGESIZE(1UL << 12); | |||
| 459 | break; | |||
| 460 | case 'j': | |||
| 461 | if (moptsmalloc_readonly.mopts.def_malloc_junk > 0) | |||
| 462 | moptsmalloc_readonly.mopts.def_malloc_junk--; | |||
| 463 | break; | |||
| 464 | case 'J': | |||
| 465 | if (moptsmalloc_readonly.mopts.def_malloc_junk < 2) | |||
| 466 | moptsmalloc_readonly.mopts.def_malloc_junk++; | |||
| 467 | break; | |||
| 468 | case 'r': | |||
| 469 | moptsmalloc_readonly.mopts.malloc_realloc = 0; | |||
| 470 | break; | |||
| 471 | case 'R': | |||
| 472 | moptsmalloc_readonly.mopts.malloc_realloc = 1; | |||
| 473 | break; | |||
| 474 | case 'u': | |||
| 475 | moptsmalloc_readonly.mopts.malloc_freeunmap = 0; | |||
| 476 | break; | |||
| 477 | case 'U': | |||
| 478 | moptsmalloc_readonly.mopts.malloc_freeunmap = 1; | |||
| 479 | break; | |||
| 480 | #ifdef MALLOC_STATS | |||
| 481 | case 'v': | |||
| 482 | moptsmalloc_readonly.mopts.malloc_verbose = 0; | |||
| 483 | break; | |||
| 484 | case 'V': | |||
| 485 | moptsmalloc_readonly.mopts.malloc_verbose = 1; | |||
| 486 | break; | |||
| 487 | #endif /* MALLOC_STATS */ | |||
| 488 | case 'x': | |||
| 489 | moptsmalloc_readonly.mopts.malloc_xmalloc = 0; | |||
| 490 | break; | |||
| 491 | case 'X': | |||
| 492 | moptsmalloc_readonly.mopts.malloc_xmalloc = 1; | |||
| 493 | break; | |||
| 494 | default: | |||
| 495 | dprintf(STDERR_FILENO2, "malloc() warning: " | |||
| 496 | "unknown char in MALLOC_OPTIONS\n"); | |||
| 497 | break; | |||
| 498 | } | |||
| 499 | } | |||
| 500 | ||||
| 501 | static void | |||
| 502 | omalloc_init(void) | |||
| 503 | { | |||
| 504 | char *p, *q, b[16]; | |||
| 505 | int i, j; | |||
| 506 | const int mib[2] = { CTL_VM2, VM_MALLOC_CONF12 }; | |||
| 507 | size_t sb; | |||
| 508 | ||||
| 509 | /* | |||
| 510 | * Default options | |||
| 511 | */ | |||
| 512 | moptsmalloc_readonly.mopts.malloc_mutexes = 8; | |||
| 513 | moptsmalloc_readonly.mopts.def_malloc_junk = 1; | |||
| 514 | moptsmalloc_readonly.mopts.def_maxcache = MALLOC_DEFAULT_CACHE64; | |||
| 515 | ||||
| 516 | for (i = 0; i < 3; i++) { | |||
| 517 | switch (i) { | |||
| 518 | case 0: | |||
| 519 | sb = sizeof(b); | |||
| 520 | j = sysctl(mib, 2, b, &sb, NULL((void *)0), 0); | |||
| 521 | if (j != 0) | |||
| 522 | continue; | |||
| 523 | p = b; | |||
| 524 | break; | |||
| 525 | case 1: | |||
| 526 | if (issetugid() == 0) | |||
| 527 | p = getenv("MALLOC_OPTIONS"); | |||
| 528 | else | |||
| 529 | continue; | |||
| 530 | break; | |||
| 531 | case 2: | |||
| 532 | p = malloc_options; | |||
| 533 | break; | |||
| 534 | default: | |||
| 535 | p = NULL((void *)0); | |||
| 536 | } | |||
| 537 | ||||
| 538 | for (; p != NULL((void *)0) && *p != '\0'; p++) { | |||
| 539 | switch (*p) { | |||
| 540 | case 'S': | |||
| 541 | for (q = "CFGJ"; *q != '\0'; q++) | |||
| 542 | omalloc_parseopt(*q); | |||
| 543 | moptsmalloc_readonly.mopts.def_maxcache = 0; | |||
| 544 | break; | |||
| 545 | case 's': | |||
| 546 | for (q = "cfgj"; *q != '\0'; q++) | |||
| 547 | omalloc_parseopt(*q); | |||
| 548 | moptsmalloc_readonly.mopts.def_maxcache = MALLOC_DEFAULT_CACHE64; | |||
| 549 | break; | |||
| 550 | default: | |||
| 551 | omalloc_parseopt(*p); | |||
| 552 | break; | |||
| 553 | } | |||
| 554 | } | |||
| 555 | } | |||
| 556 | ||||
| 557 | #ifdef MALLOC_STATS | |||
| 558 | if (DO_STATSmalloc_readonly.mopts.malloc_stats && (atexit(malloc_exit) == -1)) { | |||
| 559 | dprintf(STDERR_FILENO2, "malloc() warning: atexit(3) failed." | |||
| 560 | " Will not be able to dump stats on exit\n"); | |||
| 561 | } | |||
| 562 | #endif | |||
| 563 | ||||
| 564 | while ((moptsmalloc_readonly.mopts.malloc_canary = arc4random()) == 0) | |||
| 565 | ; | |||
| 566 | moptsmalloc_readonly.mopts.junk_loc = arc4random(); | |||
| 567 | if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 568 | do { | |||
| 569 | moptsmalloc_readonly.mopts.chunk_canaries = arc4random(); | |||
| 570 | } while ((u_char)moptsmalloc_readonly.mopts.chunk_canaries == 0 || | |||
| 571 | (u_char)moptsmalloc_readonly.mopts.chunk_canaries == SOME_FREEJUNK0xdf); | |||
| 572 | } | |||
| 573 | ||||
| 574 | static void | |||
| 575 | omalloc_poolinit(struct dir_info *d, int mmap_flag) | |||
| 576 | { | |||
| 577 | u_int i, j; | |||
| 578 | ||||
| 579 | d->r = NULL((void *)0); | |||
| 580 | d->rbytesused = sizeof(d->rbytes); | |||
| 581 | d->regions_free = d->regions_total = 0; | |||
| 582 | for (i = 0; i <= BUCKETS((1 << (12 - 1)) / (1UL << 4)); i++) { | |||
| 583 | LIST_INIT(&d->chunk_info_list[i])do { ((&d->chunk_info_list[i])->lh_first) = ((void * )0); } while (0); | |||
| 584 | for (j = 0; j < MALLOC_CHUNK_LISTS4; j++) | |||
| 585 | LIST_INIT(&d->chunk_dir[i][j])do { ((&d->chunk_dir[i][j])->lh_first) = ((void *)0 ); } while (0); | |||
| 586 | } | |||
| 587 | d->mmap_flag = mmap_flag; | |||
| 588 | d->malloc_junk = moptsmalloc_readonly.mopts.def_malloc_junk; | |||
| 589 | #ifdef MALLOC_STATS | |||
| 590 | RBT_INIT(btshead, &d->btraces)btshead_RBT_INIT(&d->btraces); | |||
| 591 | #endif | |||
| 592 | d->canary1 = moptsmalloc_readonly.mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; | |||
| 593 | d->canary2 = ~d->canary1; | |||
| 594 | } | |||
| 595 | ||||
| 596 | static int | |||
| 597 | omalloc_grow(struct dir_info *d) | |||
| 598 | { | |||
| 599 | size_t newtotal; | |||
| 600 | size_t newsize; | |||
| 601 | size_t mask; | |||
| 602 | size_t i, oldpsz; | |||
| 603 | struct region_info *p; | |||
| 604 | ||||
| 605 | if (d->regions_total > SIZE_MAX0xffffffffffffffffUL / sizeof(struct region_info) / 2) | |||
| 606 | return 1; | |||
| 607 | ||||
| 608 | newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS512 : | |||
| 609 | d->regions_total * 2; | |||
| 610 | newsize = PAGEROUND(newtotal * sizeof(struct region_info))(((newtotal * sizeof(struct region_info)) + (((1UL << 12 ) - 1))) & ~((1UL << 12) - 1)); | |||
| 611 | mask = newtotal - 1; | |||
| 612 | ||||
| 613 | /* Don't use cache here, we don't want user uaf touch this */ | |||
| 614 | p = MMAP(newsize, d->mmap_flag)mmap(((void *)0), (newsize), 0x01 | 0x02, 0x1000 | 0x0002 | ( d->mmap_flag), -1, 0); | |||
| 615 | if (p == MAP_FAILED((void *)-1)) | |||
| 616 | return 1; | |||
| 617 | ||||
| 618 | STATS_ADD(d->malloc_used, newsize)((d->malloc_used) += (newsize)); | |||
| 619 | STATS_ZERO(d->inserts)((d->inserts) = 0); | |||
| 620 | STATS_ZERO(d->insert_collisions)((d->insert_collisions) = 0); | |||
| 621 | for (i = 0; i < d->regions_total; i++) { | |||
| 622 | void *q = d->r[i].p; | |||
| 623 | if (q != NULL((void *)0)) { | |||
| 624 | size_t index = hash(q) & mask; | |||
| 625 | STATS_INC(d->inserts)((d->inserts)++); | |||
| 626 | while (p[index].p != NULL((void *)0)) { | |||
| 627 | index = (index - 1) & mask; | |||
| 628 | STATS_INC(d->insert_collisions)((d->insert_collisions)++); | |||
| 629 | } | |||
| 630 | p[index] = d->r[i]; | |||
| 631 | } | |||
| 632 | } | |||
| 633 | ||||
| 634 | if (d->regions_total > 0) { | |||
| 635 | oldpsz = PAGEROUND(d->regions_total *(((d->regions_total * sizeof(struct region_info)) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)) | |||
| 636 | sizeof(struct region_info))(((d->regions_total * sizeof(struct region_info)) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)); | |||
| 637 | /* clear to avoid meta info ending up in the cache */ | |||
| 638 | unmap(d, d->r, oldpsz, oldpsz); | |||
| 639 | } | |||
| 640 | d->regions_free += newtotal - d->regions_total; | |||
| 641 | d->regions_total = newtotal; | |||
| 642 | d->r = p; | |||
| 643 | return 0; | |||
| 644 | } | |||
| 645 | ||||
| 646 | /* | |||
| 647 | * The hashtable uses the assumption that p is never NULL. This holds since | |||
| 648 | * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. | |||
| 649 | */ | |||
| 650 | static int | |||
| 651 | insert(struct dir_info *d, void *p, size_t sz, void *f) | |||
| 652 | { | |||
| 653 | size_t index; | |||
| 654 | size_t mask; | |||
| 655 | void *q; | |||
| 656 | ||||
| 657 | if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) { | |||
| 658 | if (omalloc_grow(d)) | |||
| 659 | return 1; | |||
| 660 | } | |||
| 661 | mask = d->regions_total - 1; | |||
| 662 | index = hash(p) & mask; | |||
| 663 | q = d->r[index].p; | |||
| 664 | STATS_INC(d->inserts)((d->inserts)++); | |||
| 665 | while (q != NULL((void *)0)) { | |||
| 666 | index = (index - 1) & mask; | |||
| 667 | q = d->r[index].p; | |||
| 668 | STATS_INC(d->insert_collisions)((d->insert_collisions)++); | |||
| 669 | } | |||
| 670 | d->r[index].p = p; | |||
| 671 | d->r[index].size = sz; | |||
| 672 | STATS_SETF(&d->r[index], f)((&d->r[index])->f = (f)); | |||
| 673 | d->regions_free--; | |||
| 674 | return 0; | |||
| 675 | } | |||
| 676 | ||||
| 677 | static struct region_info * | |||
| 678 | find(struct dir_info *d, void *p) | |||
| 679 | { | |||
| 680 | size_t index; | |||
| 681 | size_t mask = d->regions_total - 1; | |||
| 682 | void *q, *r; | |||
| 683 | ||||
| 684 | if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | |||
| 685 | d->canary1 != ~d->canary2) | |||
| 686 | wrterror(d, "internal struct corrupt"); | |||
| 687 | if (d->r == NULL((void *)0)) | |||
| 688 | return NULL((void *)0); | |||
| 689 | p = MASK_POINTER(p)((void *)(((uintptr_t)(p)) & ~((1UL << 12) - 1))); | |||
| 690 | index = hash(p) & mask; | |||
| 691 | r = d->r[index].p; | |||
| 692 | q = MASK_POINTER(r)((void *)(((uintptr_t)(r)) & ~((1UL << 12) - 1))); | |||
| 693 | while (q != p && r != NULL((void *)0)) { | |||
| 694 | index = (index - 1) & mask; | |||
| 695 | r = d->r[index].p; | |||
| 696 | q = MASK_POINTER(r)((void *)(((uintptr_t)(r)) & ~((1UL << 12) - 1))); | |||
| 697 | } | |||
| 698 | return (q == p && r != NULL((void *)0)) ? &d->r[index] : NULL((void *)0); | |||
| 699 | } | |||
| 700 | ||||
| 701 | static void | |||
| 702 | delete(struct dir_info *d, struct region_info *ri) | |||
| 703 | { | |||
| 704 | /* algorithm R, Knuth Vol III section 6.4 */ | |||
| 705 | size_t mask = d->regions_total - 1; | |||
| 706 | size_t i, j, r; | |||
| 707 | ||||
| 708 | if (d->regions_total & (d->regions_total - 1)) | |||
| 709 | wrterror(d, "regions_total not 2^x"); | |||
| 710 | d->regions_free++; | |||
| 711 | STATS_INC(d->deletes)((d->deletes)++); | |||
| 712 | ||||
| 713 | i = ri - d->r; | |||
| 714 | for (;;) { | |||
| 715 | d->r[i].p = NULL((void *)0); | |||
| 716 | d->r[i].size = 0; | |||
| 717 | j = i; | |||
| 718 | for (;;) { | |||
| 719 | i = (i - 1) & mask; | |||
| 720 | if (d->r[i].p == NULL((void *)0)) | |||
| 721 | return; | |||
| 722 | r = hash(d->r[i].p) & mask; | |||
| 723 | if ((i <= r && r < j) || (r < j && j < i) || | |||
| 724 | (j < i && i <= r)) | |||
| 725 | continue; | |||
| 726 | d->r[j] = d->r[i]; | |||
| 727 | STATS_INC(d->delete_moves)((d->delete_moves)++); | |||
| 728 | break; | |||
| 729 | } | |||
| 730 | ||||
| 731 | } | |||
| 732 | } | |||
| 733 | ||||
| 734 | static inline void | |||
| 735 | junk_free(int junk, void *p, size_t sz) | |||
| 736 | { | |||
| 737 | size_t i, step = 1; | |||
| 738 | uint64_t *lp = p; | |||
| 739 | ||||
| 740 | if (junk == 0 || sz == 0) | |||
| 741 | return; | |||
| 742 | sz /= sizeof(uint64_t); | |||
| 743 | if (junk == 1) { | |||
| 744 | if (sz > MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t)) | |||
| 745 | sz = MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t); | |||
| 746 | step = sz / 4; | |||
| 747 | if (step == 0) | |||
| 748 | step = 1; | |||
| 749 | } | |||
| 750 | /* Do not always put the free junk bytes in the same spot. | |||
| 751 | There is modulo bias here, but we ignore that. */ | |||
| 752 | for (i = moptsmalloc_readonly.mopts.junk_loc % step; i < sz; i += step) | |||
| 753 | lp[i] = SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL; | |||
| 754 | } | |||
| 755 | ||||
| 756 | static inline void | |||
| 757 | validate_junk(struct dir_info *pool, void *p, size_t argsz) | |||
| 758 | { | |||
| 759 | size_t i, sz, step = 1; | |||
| 760 | uint64_t *lp = p; | |||
| 761 | ||||
| 762 | if (pool->malloc_junk == 0 || argsz == 0) | |||
| 763 | return; | |||
| 764 | sz = argsz / sizeof(uint64_t); | |||
| 765 | if (pool->malloc_junk == 1) { | |||
| 766 | if (sz > MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t)) | |||
| 767 | sz = MALLOC_PAGESIZE(1UL << 12) / sizeof(uint64_t); | |||
| 768 | step = sz / 4; | |||
| 769 | if (step == 0) | |||
| 770 | step = 1; | |||
| 771 | } | |||
| 772 | /* see junk_free */ | |||
| 773 | for (i = moptsmalloc_readonly.mopts.junk_loc % step; i < sz; i += step) { | |||
| 774 | if (lp[i] != SOME_FREEJUNK_ULL0xdfdfdfdfdfdfdfdfULL) { | |||
| 775 | #ifdef MALLOC_STATS | |||
| 776 | if (DO_STATSmalloc_readonly.mopts.malloc_stats && argsz <= MALLOC_MAXCHUNK(1 << (12 - 1))) | |||
| 777 | print_chunk_details(pool, lp, argsz, i); | |||
| 778 | else | |||
| 779 | #endif | |||
| 780 | wrterror(pool, | |||
| 781 | "write to free mem %p[%zu..%zu]@%zu", | |||
| 782 | lp, i * sizeof(uint64_t), | |||
| 783 | (i + 1) * sizeof(uint64_t) - 1, argsz); | |||
| 784 | } | |||
| 785 | } | |||
| 786 | } | |||
| 787 | ||||
| 788 | ||||
| 789 | /* | |||
| 790 | * Cache maintenance. | |||
| 791 | * Opposed to the regular region data structure, the sizes in the | |||
| 792 | * cache are in MALLOC_PAGESIZE units. | |||
| 793 | */ | |||
| 794 | static void | |||
| 795 | unmap(struct dir_info *d, void *p, size_t sz, size_t clear) | |||
| 796 | { | |||
| 797 | size_t psz = sz >> MALLOC_PAGESHIFT12; | |||
| 798 | void *r; | |||
| 799 | u_short i; | |||
| 800 | struct smallcache *cache; | |||
| 801 | ||||
| 802 | if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)) || psz == 0) | |||
| 803 | wrterror(d, "munmap round"); | |||
| 804 | ||||
| 805 | if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE32 && | |||
| 806 | psz <= MAX_BIGCACHEABLE_SIZE512) { | |||
| 807 | u_short base = getrbyte(d); | |||
| 808 | u_short j; | |||
| 809 | ||||
| 810 | /* don't look through all slots */ | |||
| 811 | for (j = 0; j < d->bigcache_size / 4; j++) { | |||
| 812 | i = (base + j) & (d->bigcache_size - 1); | |||
| 813 | if (d->bigcache_used < | |||
| 814 | BIGCACHE_FILL(d->bigcache_size)(512 * (d->bigcache_size) / 4)) { | |||
| 815 | if (d->bigcache[i].psize == 0) | |||
| 816 | break; | |||
| 817 | } else { | |||
| 818 | if (d->bigcache[i].psize != 0) | |||
| 819 | break; | |||
| 820 | } | |||
| 821 | } | |||
| 822 | /* if we didn't find a preferred slot, use random one */ | |||
| 823 | if (d->bigcache[i].psize != 0) { | |||
| 824 | size_t tmp; | |||
| 825 | ||||
| 826 | r = d->bigcache[i].page; | |||
| 827 | d->bigcache_used -= d->bigcache[i].psize; | |||
| 828 | tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT12; | |||
| 829 | if (!moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 830 | validate_junk(d, r, tmp); | |||
| 831 | if (munmap(r, tmp)) | |||
| 832 | wrterror(d, "munmap %p", r); | |||
| 833 | STATS_SUB(d->malloc_used, tmp)((d->malloc_used) -= (tmp)); | |||
| 834 | } | |||
| 835 | ||||
| 836 | if (clear > 0) | |||
| 837 | explicit_bzero(p, clear); | |||
| 838 | if (moptsmalloc_readonly.mopts.malloc_freeunmap) { | |||
| 839 | if (mprotect(p, sz, PROT_NONE0x00)) | |||
| 840 | wrterror(d, "mprotect %p", r); | |||
| 841 | } else | |||
| 842 | junk_free(d->malloc_junk, p, sz); | |||
| 843 | d->bigcache[i].page = p; | |||
| 844 | d->bigcache[i].psize = psz; | |||
| 845 | d->bigcache_used += psz; | |||
| 846 | return; | |||
| 847 | } | |||
| 848 | if (psz > MAX_SMALLCACHEABLE_SIZE32 || d->smallcache[psz - 1].max == 0) { | |||
| 849 | if (munmap(p, sz)) | |||
| 850 | wrterror(d, "munmap %p", p); | |||
| 851 | STATS_SUB(d->malloc_used, sz)((d->malloc_used) -= (sz)); | |||
| 852 | return; | |||
| 853 | } | |||
| 854 | cache = &d->smallcache[psz - 1]; | |||
| 855 | if (cache->length == cache->max) { | |||
| 856 | int fresh; | |||
| 857 | /* use a random slot */ | |||
| 858 | i = getrbyte(d) & (cache->max - 1); | |||
| 859 | r = cache->pages[i]; | |||
| 860 | fresh = (uintptr_t)r & 1; | |||
| 861 | *(uintptr_t*)&r &= ~1UL; | |||
| 862 | if (!fresh && !moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 863 | validate_junk(d, r, sz); | |||
| 864 | if (munmap(r, sz)) | |||
| 865 | wrterror(d, "munmap %p", r); | |||
| 866 | STATS_SUB(d->malloc_used, sz)((d->malloc_used) -= (sz)); | |||
| 867 | cache->length--; | |||
| 868 | } else | |||
| 869 | i = cache->length; | |||
| 870 | ||||
| 871 | /* fill slot */ | |||
| 872 | if (clear > 0) | |||
| 873 | explicit_bzero(p, clear); | |||
| 874 | if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 875 | mprotect(p, sz, PROT_NONE0x00); | |||
| 876 | else | |||
| 877 | junk_free(d->malloc_junk, p, sz); | |||
| 878 | cache->pages[i] = p; | |||
| 879 | cache->length++; | |||
| 880 | } | |||
| 881 | ||||
| 882 | static void * | |||
| 883 | map(struct dir_info *d, size_t sz, int zero_fill) | |||
| 884 | { | |||
| 885 | size_t psz = sz >> MALLOC_PAGESHIFT12; | |||
| 886 | u_short i; | |||
| 887 | void *p; | |||
| 888 | struct smallcache *cache; | |||
| 889 | ||||
| 890 | if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | |||
| 891 | d->canary1 != ~d->canary2) | |||
| 892 | wrterror(d, "internal struct corrupt"); | |||
| 893 | if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)) || psz == 0) | |||
| 894 | wrterror(d, "map round"); | |||
| 895 | ||||
| 896 | ||||
| 897 | if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE32 && | |||
| 898 | psz <= MAX_BIGCACHEABLE_SIZE512) { | |||
| 899 | size_t base = getrbyte(d); | |||
| 900 | size_t cached = d->bigcache_used; | |||
| 901 | ushort j; | |||
| 902 | ||||
| 903 | for (j = 0; j < d->bigcache_size && cached >= psz; j++) { | |||
| 904 | i = (j + base) & (d->bigcache_size - 1); | |||
| 905 | if (d->bigcache[i].psize == psz) { | |||
| 906 | p = d->bigcache[i].page; | |||
| 907 | d->bigcache_used -= psz; | |||
| 908 | d->bigcache[i].page = NULL((void *)0); | |||
| 909 | d->bigcache[i].psize = 0; | |||
| 910 | ||||
| 911 | if (!moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 912 | validate_junk(d, p, sz); | |||
| 913 | if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 914 | mprotect(p, sz, PROT_READ0x01 | PROT_WRITE0x02); | |||
| 915 | if (zero_fill) | |||
| 916 | memset(p, 0, sz); | |||
| 917 | else if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 918 | junk_free(d->malloc_junk, p, sz); | |||
| 919 | return p; | |||
| 920 | } | |||
| 921 | cached -= d->bigcache[i].psize; | |||
| 922 | } | |||
| 923 | } | |||
| 924 | if (psz <= MAX_SMALLCACHEABLE_SIZE32 && d->smallcache[psz - 1].max > 0) { | |||
| 925 | cache = &d->smallcache[psz - 1]; | |||
| 926 | if (cache->length > 0) { | |||
| 927 | int fresh; | |||
| 928 | if (cache->length == 1) | |||
| 929 | p = cache->pages[--cache->length]; | |||
| 930 | else { | |||
| 931 | i = getrbyte(d) % cache->length; | |||
| 932 | p = cache->pages[i]; | |||
| 933 | cache->pages[i] = cache->pages[--cache->length]; | |||
| 934 | } | |||
| 935 | /* check if page was not junked, i.e. "fresh | |||
| 936 | we use the lsb of the pointer for that */ | |||
| 937 | fresh = (uintptr_t)p & 1UL; | |||
| 938 | *(uintptr_t*)&p &= ~1UL; | |||
| 939 | if (!fresh && !moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 940 | validate_junk(d, p, sz); | |||
| 941 | if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 942 | mprotect(p, sz, PROT_READ0x01 | PROT_WRITE0x02); | |||
| 943 | if (zero_fill) | |||
| 944 | memset(p, 0, sz); | |||
| 945 | else if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 946 | junk_free(d->malloc_junk, p, sz); | |||
| 947 | return p; | |||
| 948 | } | |||
| 949 | if (psz <= 1) { | |||
| 950 | p = MMAP(cache->max * sz, d->mmap_flag)mmap(((void *)0), (cache->max * sz), 0x01 | 0x02, 0x1000 | 0x0002 | (d->mmap_flag), -1, 0); | |||
| 951 | if (p != MAP_FAILED((void *)-1)) { | |||
| 952 | STATS_ADD(d->malloc_used, cache->max * sz)((d->malloc_used) += (cache->max * sz)); | |||
| 953 | cache->length = cache->max - 1; | |||
| 954 | for (i = 0; i < cache->max - 1; i++) { | |||
| 955 | void *q = (char*)p + i * sz; | |||
| 956 | cache->pages[i] = q; | |||
| 957 | /* mark pointer in slot as not junked */ | |||
| 958 | *(uintptr_t*)&cache->pages[i] |= 1UL; | |||
| 959 | } | |||
| 960 | if (moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 961 | mprotect(p, (cache->max - 1) * sz, | |||
| 962 | PROT_NONE0x00); | |||
| 963 | p = (char*)p + (cache->max - 1) * sz; | |||
| 964 | /* zero fill not needed, freshly mmapped */ | |||
| 965 | return p; | |||
| 966 | } | |||
| 967 | } | |||
| 968 | ||||
| 969 | } | |||
| 970 | p = MMAP(sz, d->mmap_flag)mmap(((void *)0), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (d-> mmap_flag), -1, 0); | |||
| 971 | if (p != MAP_FAILED((void *)-1)) | |||
| 972 | STATS_ADD(d->malloc_used, sz)((d->malloc_used) += (sz)); | |||
| 973 | /* zero fill not needed */ | |||
| 974 | return p; | |||
| 975 | } | |||
| 976 | ||||
| 977 | static void | |||
| 978 | init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket) | |||
| 979 | { | |||
| 980 | u_int i; | |||
| 981 | ||||
| 982 | p->bucket = bucket; | |||
| 983 | p->total = p->free = MALLOC_PAGESIZE(1UL << 12) / B2ALLOC(bucket)((bucket) == 0 ? (1UL << 4) : (bucket) * (1UL << 4 )); | |||
| 984 | p->offset = howmany(p->total, MALLOC_BITS)(((p->total) + (((8 * sizeof(u_short))) - 1)) / ((8 * sizeof (u_short)))); | |||
| 985 | p->canary = (u_short)d->canary1; | |||
| 986 | ||||
| 987 | /* set all valid bits in the bitmap */ | |||
| 988 | i = p->total - 1; | |||
| 989 | memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS(8 * sizeof(u_short)))); | |||
| 990 | p->bits[i / MALLOC_BITS(8 * sizeof(u_short))] = (2U << (i % MALLOC_BITS(8 * sizeof(u_short)))) - 1; | |||
| 991 | } | |||
| 992 | ||||
| 993 | static struct chunk_info * | |||
| 994 | alloc_chunk_info(struct dir_info *d, u_int bucket) | |||
| 995 | { | |||
| 996 | struct chunk_info *p; | |||
| 997 | ||||
| 998 | if (LIST_EMPTY(&d->chunk_info_list[bucket])(((&d->chunk_info_list[bucket])->lh_first) == ((void *)0))) { | |||
| 999 | const size_t chunk_pages = 64; | |||
| 1000 | size_t size, count, i; | |||
| 1001 | char *q; | |||
| 1002 | ||||
| 1003 | count = MALLOC_PAGESIZE(1UL << 12) / B2ALLOC(bucket)((bucket) == 0 ? (1UL << 4) : (bucket) * (1UL << 4 )); | |||
| 1004 | ||||
| 1005 | size = howmany(count, MALLOC_BITS)(((count) + (((8 * sizeof(u_short))) - 1)) / ((8 * sizeof(u_short )))); | |||
| 1006 | /* see declaration of struct chunk_info */ | |||
| 1007 | if (size <= CHUNK_INFO_TAIL3) | |||
| 1008 | size = 0; | |||
| 1009 | else | |||
| 1010 | size -= CHUNK_INFO_TAIL3; | |||
| 1011 | size = sizeof(struct chunk_info) + size * sizeof(u_short); | |||
| 1012 | if (moptsmalloc_readonly.mopts.chunk_canaries && bucket > 0) | |||
| 1013 | size += count * sizeof(u_short); | |||
| 1014 | size = _ALIGN(size)(((unsigned long)(size) + (sizeof(long) - 1)) &~(sizeof(long ) - 1)); | |||
| 1015 | count = MALLOC_PAGESIZE(1UL << 12) / size; | |||
| 1016 | ||||
| 1017 | /* Don't use cache here, we don't want user uaf touch this */ | |||
| 1018 | if (d->chunk_pages_used == chunk_pages || | |||
| 1019 | d->chunk_pages == NULL((void *)0)) { | |||
| 1020 | q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag)mmap(((void *)0), ((1UL << 12) * chunk_pages), 0x01 | 0x02 , 0x1000 | 0x0002 | (d->mmap_flag), -1, 0); | |||
| 1021 | if (q == MAP_FAILED((void *)-1)) | |||
| 1022 | return NULL((void *)0); | |||
| 1023 | d->chunk_pages = q; | |||
| 1024 | d->chunk_pages_used = 0; | |||
| 1025 | STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *((d->malloc_used) += ((1UL << 12) * chunk_pages)) | |||
| 1026 | chunk_pages)((d->malloc_used) += ((1UL << 12) * chunk_pages)); | |||
| 1027 | } | |||
| 1028 | q = (char *)d->chunk_pages + d->chunk_pages_used * | |||
| 1029 | MALLOC_PAGESIZE(1UL << 12); | |||
| 1030 | d->chunk_pages_used++; | |||
| 1031 | ||||
| 1032 | for (i = 0; i < count; i++, q += size) { | |||
| 1033 | p = (struct chunk_info *)q; | |||
| 1034 | LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p,do { if (((p)->entries.le_next = (&d->chunk_info_list [bucket])->lh_first) != ((void *)0)) (&d->chunk_info_list [bucket])->lh_first->entries.le_prev = &(p)->entries .le_next; (&d->chunk_info_list[bucket])->lh_first = (p); (p)->entries.le_prev = &(&d->chunk_info_list [bucket])->lh_first; } while (0) | |||
| 1035 | entries)do { if (((p)->entries.le_next = (&d->chunk_info_list [bucket])->lh_first) != ((void *)0)) (&d->chunk_info_list [bucket])->lh_first->entries.le_prev = &(p)->entries .le_next; (&d->chunk_info_list[bucket])->lh_first = (p); (p)->entries.le_prev = &(&d->chunk_info_list [bucket])->lh_first; } while (0); | |||
| 1036 | } | |||
| 1037 | } | |||
| 1038 | p = LIST_FIRST(&d->chunk_info_list[bucket])((&d->chunk_info_list[bucket])->lh_first); | |||
| 1039 | LIST_REMOVE(p, entries)do { if ((p)->entries.le_next != ((void *)0)) (p)->entries .le_next->entries.le_prev = (p)->entries.le_prev; *(p)-> entries.le_prev = (p)->entries.le_next; ; ; } while (0); | |||
| 1040 | if (p->total == 0) | |||
| 1041 | init_chunk_info(d, p, bucket); | |||
| 1042 | return p; | |||
| 1043 | } | |||
| 1044 | ||||
| 1045 | /* | |||
| 1046 | * Allocate a page of chunks | |||
| 1047 | */ | |||
| 1048 | static struct chunk_info * | |||
| 1049 | omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum) | |||
| 1050 | { | |||
| 1051 | struct chunk_info *bp; | |||
| 1052 | void *pp; | |||
| 1053 | void *ff = NULL((void *)0); | |||
| 1054 | ||||
| 1055 | /* Allocate a new bucket */ | |||
| 1056 | pp = map(d, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 1057 | if (pp == MAP_FAILED((void *)-1)) | |||
| 1058 | return NULL((void *)0); | |||
| 1059 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) { | |||
| 1060 | ff = map(d, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 1061 | if (ff == MAP_FAILED((void *)-1)) | |||
| 1062 | goto err; | |||
| 1063 | memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE(1UL << 12) / | |||
| 1064 | B2ALLOC(bucket)((bucket) == 0 ? (1UL << 4) : (bucket) * (1UL << 4 ))); | |||
| 1065 | } | |||
| 1066 | ||||
| 1067 | /* memory protect the page allocated in the malloc(0) case */ | |||
| 1068 | if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE(1UL << 12), PROT_NONE0x00) == -1) | |||
| 1069 | goto err; | |||
| 1070 | ||||
| 1071 | bp = alloc_chunk_info(d, bucket); | |||
| 1072 | if (bp == NULL((void *)0)) | |||
| 1073 | goto err; | |||
| 1074 | bp->page = pp; | |||
| 1075 | ||||
| 1076 | if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp, | |||
| 1077 | ff)) | |||
| 1078 | goto err; | |||
| 1079 | LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries)do { if (((bp)->entries.le_next = (&d->chunk_dir[bucket ][listnum])->lh_first) != ((void *)0)) (&d->chunk_dir [bucket][listnum])->lh_first->entries.le_prev = &(bp )->entries.le_next; (&d->chunk_dir[bucket][listnum] )->lh_first = (bp); (bp)->entries.le_prev = &(& d->chunk_dir[bucket][listnum])->lh_first; } while (0); | |||
| 1080 | ||||
| 1081 | if (bucket > 0 && d->malloc_junk != 0) | |||
| 1082 | memset(pp, SOME_FREEJUNK0xdf, MALLOC_PAGESIZE(1UL << 12)); | |||
| 1083 | ||||
| 1084 | return bp; | |||
| 1085 | ||||
| 1086 | err: | |||
| 1087 | unmap(d, pp, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 1088 | if (ff != NULL((void *)0) && ff != MAP_FAILED((void *)-1)) | |||
| 1089 | unmap(d, ff, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 1090 | return NULL((void *)0); | |||
| 1091 | } | |||
| 1092 | ||||
| 1093 | #if defined(__GNUC__4) && __GNUC__4 < 4 | |||
| 1094 | static inline unsigned int | |||
| 1095 | lb(u_int x) | |||
| 1096 | { | |||
| 1097 | #if defined(__m88k__) | |||
| 1098 | __asm__ __volatile__ ("ff1 %0, %0" : "=r" (x) : "0" (x)); | |||
| 1099 | return x; | |||
| 1100 | #else | |||
| 1101 | /* portable version */ | |||
| 1102 | unsigned int count = 0; | |||
| 1103 | while ((x & (1U << (sizeof(int) * CHAR_BIT8 - 1))) == 0) { | |||
| 1104 | count++; | |||
| 1105 | x <<= 1; | |||
| 1106 | } | |||
| 1107 | return (sizeof(int) * CHAR_BIT8 - 1) - count; | |||
| 1108 | #endif | |||
| 1109 | } | |||
| 1110 | #else | |||
| 1111 | /* using built-in function version */ | |||
| 1112 | static inline unsigned int | |||
| 1113 | lb(u_int x) | |||
| 1114 | { | |||
| 1115 | /* I need an extension just for integer-length (: */ | |||
| 1116 | return (sizeof(int) * CHAR_BIT8 - 1) - __builtin_clz(x); | |||
| 1117 | } | |||
| 1118 | #endif | |||
| 1119 | ||||
| 1120 | /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/ | |||
| 1121 | via Tony Finch */ | |||
| 1122 | static inline unsigned int | |||
| 1123 | bin_of(unsigned int size) | |||
| 1124 | { | |||
| 1125 | const unsigned int linear = 6; | |||
| 1126 | const unsigned int subbin = 2; | |||
| 1127 | ||||
| 1128 | unsigned int mask, rounded, rounded_size; | |||
| 1129 | unsigned int n_bits, shift; | |||
| 1130 | ||||
| 1131 | n_bits = lb(size | (1U << linear)); | |||
| 1132 | shift = n_bits - subbin; | |||
| 1133 | mask = (1ULL << shift) - 1; | |||
| 1134 | rounded = size + mask; /* XXX: overflow. */ | |||
| 1135 | ||||
| 1136 | rounded_size = rounded & ~mask; | |||
| 1137 | return rounded_size; | |||
| 1138 | } | |||
| 1139 | ||||
| 1140 | static inline u_short | |||
| 1141 | find_bucket(u_short size) | |||
| 1142 | { | |||
| 1143 | /* malloc(0) is special */ | |||
| 1144 | if (size == 0) | |||
| 1145 | return 0; | |||
| 1146 | if (size < MALLOC_MINSIZE(1UL << 4)) | |||
| 1147 | size = MALLOC_MINSIZE(1UL << 4); | |||
| 1148 | if (moptsmalloc_readonly.mopts.def_maxcache != 0) | |||
| 1149 | size = bin_of(size); | |||
| 1150 | return howmany(size, MALLOC_MINSIZE)(((size) + (((1UL << 4)) - 1)) / ((1UL << 4))); | |||
| 1151 | } | |||
| 1152 | ||||
| 1153 | static void | |||
| 1154 | fill_canary(char *ptr, size_t sz, size_t allocated) | |||
| 1155 | { | |||
| 1156 | size_t check_sz = allocated - sz; | |||
| 1157 | ||||
| 1158 | if (check_sz > CHUNK_CHECK_LENGTH32) | |||
| 1159 | check_sz = CHUNK_CHECK_LENGTH32; | |||
| 1160 | memset(ptr + sz, moptsmalloc_readonly.mopts.chunk_canaries, check_sz); | |||
| 1161 | } | |||
| 1162 | ||||
| 1163 | /* | |||
| 1164 | * Allocate a chunk | |||
| 1165 | */ | |||
| 1166 | static void * | |||
| 1167 | malloc_bytes(struct dir_info *d, size_t size) | |||
| 1168 | { | |||
| 1169 | u_int i, j, k, r, bucket, listnum; | |||
| 1170 | u_short *lp; | |||
| 1171 | struct chunk_info *bp; | |||
| 1172 | void *p; | |||
| 1173 | ||||
| 1174 | if (moptsmalloc_readonly.mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || | |||
| 1175 | d->canary1 != ~d->canary2) | |||
| 1176 | wrterror(d, "internal struct corrupt"); | |||
| 1177 | ||||
| 1178 | bucket = find_bucket(size); | |||
| 1179 | ||||
| 1180 | r = getrbyte(d); | |||
| 1181 | listnum = r % MALLOC_CHUNK_LISTS4; | |||
| 1182 | ||||
| 1183 | /* If it's empty, make a page more of that size chunks */ | |||
| 1184 | if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])((&d->chunk_dir[bucket][listnum])->lh_first)) == NULL((void *)0)) { | |||
| 1185 | bp = omalloc_make_chunks(d, bucket, listnum); | |||
| 1186 | if (bp == NULL((void *)0)) | |||
| 1187 | return NULL((void *)0); | |||
| 1188 | } | |||
| 1189 | ||||
| 1190 | if (bp->canary != (u_short)d->canary1 || bucket != bp->bucket) | |||
| 1191 | wrterror(d, "chunk info corrupted"); | |||
| 1192 | ||||
| 1193 | r /= MALLOC_CHUNK_LISTS4; | |||
| 1194 | /* do we need more random bits? */ | |||
| 1195 | if (bp->total > 256 / MALLOC_CHUNK_LISTS4) | |||
| 1196 | r = r << 8 | getrbyte(d); | |||
| 1197 | /* bias, as bp->total is not a power of 2 */ | |||
| 1198 | i = r % bp->total; | |||
| 1199 | ||||
| 1200 | j = i % MALLOC_BITS(8 * sizeof(u_short)); | |||
| 1201 | i /= MALLOC_BITS(8 * sizeof(u_short)); | |||
| 1202 | lp = &bp->bits[i]; | |||
| 1203 | /* potentially start somewhere in a short */ | |||
| 1204 | if (j > 0 && *lp >> j) | |||
| 1205 | k = ffs(*lp >> j) + j; | |||
| 1206 | else { | |||
| 1207 | /* no bit halfway, go to next full short */ | |||
| 1208 | for (;;) { | |||
| 1209 | if (*lp) { | |||
| 1210 | k = ffs(*lp); | |||
| 1211 | break; | |||
| 1212 | } | |||
| 1213 | if (++i >= bp->offset) | |||
| 1214 | i = 0; | |||
| 1215 | lp = &bp->bits[i]; | |||
| 1216 | } | |||
| 1217 | } | |||
| 1218 | *lp ^= 1 << --k; | |||
| 1219 | ||||
| 1220 | /* If there are no more free, remove from free-list */ | |||
| 1221 | if (--bp->free == 0) | |||
| 1222 | LIST_REMOVE(bp, entries)do { if ((bp)->entries.le_next != ((void *)0)) (bp)->entries .le_next->entries.le_prev = (bp)->entries.le_prev; *(bp )->entries.le_prev = (bp)->entries.le_next; ; ; } while (0); | |||
| 1223 | ||||
| 1224 | /* Adjust to the real offset of that chunk */ | |||
| 1225 | k += i * MALLOC_BITS(8 * sizeof(u_short)); | |||
| 1226 | ||||
| 1227 | if (moptsmalloc_readonly.mopts.chunk_canaries && size > 0) | |||
| 1228 | bp->bits[bp->offset + k] = size; | |||
| 1229 | ||||
| 1230 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) { | |||
| 1231 | struct region_info *r = find(d, bp->page); | |||
| 1232 | STATS_SETFN(r, k, d->caller)((r)->f[k] = (d->caller)); | |||
| 1233 | } | |||
| 1234 | ||||
| 1235 | p = (char *)bp->page + k * B2ALLOC(bucket)((bucket) == 0 ? (1UL << 4) : (bucket) * (1UL << 4 )); | |||
| 1236 | if (bucket > 0) { | |||
| 1237 | validate_junk(d, p, B2SIZE(bucket)((bucket) * (1UL << 4))); | |||
| 1238 | if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1239 | fill_canary(p, size, B2SIZE(bucket)((bucket) * (1UL << 4))); | |||
| 1240 | } | |||
| 1241 | return p; | |||
| 1242 | } | |||
| 1243 | ||||
| 1244 | static void | |||
| 1245 | validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) | |||
| 1246 | { | |||
| 1247 | size_t check_sz = allocated - sz; | |||
| 1248 | u_char *p, *q; | |||
| 1249 | ||||
| 1250 | if (check_sz > CHUNK_CHECK_LENGTH32) | |||
| 1251 | check_sz = CHUNK_CHECK_LENGTH32; | |||
| 1252 | p = ptr + sz; | |||
| 1253 | q = p + check_sz; | |||
| 1254 | ||||
| 1255 | while (p < q) { | |||
| 1256 | if (*p != (u_char)moptsmalloc_readonly.mopts.chunk_canaries && *p != SOME_JUNK0xdb) { | |||
| 1257 | wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s", | |||
| 1258 | ptr, p - ptr, sz, allocated, | |||
| 1259 | *p == SOME_FREEJUNK0xdf ? " (double free?)" : ""); | |||
| 1260 | } | |||
| 1261 | p++; | |||
| 1262 | } | |||
| 1263 | } | |||
| 1264 | ||||
| 1265 | static inline uint32_t | |||
| 1266 | find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check) | |||
| 1267 | { | |||
| 1268 | uint32_t chunknum; | |||
| 1269 | ||||
| 1270 | if (info->canary != (u_short)d->canary1) | |||
| 1271 | wrterror(d, "chunk info corrupted"); | |||
| 1272 | ||||
| 1273 | /* Find the chunk number on the page */ | |||
| 1274 | chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK((1UL << 12) - 1)) / B2ALLOC(info->bucket)((info->bucket) == 0 ? (1UL << 4) : (info->bucket ) * (1UL << 4)); | |||
| 1275 | ||||
| 1276 | if ((uintptr_t)ptr & (MALLOC_MINSIZE(1UL << 4) - 1)) | |||
| 1277 | wrterror(d, "modified chunk-pointer %p", ptr); | |||
| 1278 | if (CHUNK_FREE(info, chunknum)((info)->bits[(chunknum) / (8 * sizeof(u_short))] & (1U << ((chunknum) % (8 * sizeof(u_short)))))) | |||
| 1279 | wrterror(d, "double free %p", ptr); | |||
| 1280 | if (check && info->bucket > 0) { | |||
| 1281 | validate_canary(d, ptr, info->bits[info->offset + chunknum], | |||
| 1282 | B2SIZE(info->bucket)((info->bucket) * (1UL << 4))); | |||
| 1283 | } | |||
| 1284 | return chunknum; | |||
| 1285 | } | |||
| 1286 | ||||
| 1287 | /* | |||
| 1288 | * Free a chunk, and possibly the page it's on, if the page becomes empty. | |||
| 1289 | */ | |||
| 1290 | static void | |||
| 1291 | free_bytes(struct dir_info *d, struct region_info *r, void *ptr) | |||
| 1292 | { | |||
| 1293 | struct chunk_head *mp; | |||
| 1294 | struct chunk_info *info; | |||
| 1295 | uint32_t chunknum; | |||
| 1296 | uint32_t listnum; | |||
| 1297 | ||||
| 1298 | info = (struct chunk_info *)r->size; | |||
| 1299 | chunknum = find_chunknum(d, info, ptr, 0); | |||
| 1300 | ||||
| 1301 | info->bits[chunknum / MALLOC_BITS(8 * sizeof(u_short))] |= 1U << (chunknum % MALLOC_BITS(8 * sizeof(u_short))); | |||
| 1302 | info->free++; | |||
| 1303 | ||||
| 1304 | if (info->free == 1) { | |||
| 1305 | /* Page became non-full */ | |||
| 1306 | listnum = getrbyte(d) % MALLOC_CHUNK_LISTS4; | |||
| 1307 | mp = &d->chunk_dir[info->bucket][listnum]; | |||
| 1308 | LIST_INSERT_HEAD(mp, info, entries)do { if (((info)->entries.le_next = (mp)->lh_first) != ( (void *)0)) (mp)->lh_first->entries.le_prev = &(info )->entries.le_next; (mp)->lh_first = (info); (info)-> entries.le_prev = &(mp)->lh_first; } while (0); | |||
| 1309 | return; | |||
| 1310 | } | |||
| 1311 | ||||
| 1312 | if (info->free != info->total) | |||
| 1313 | return; | |||
| 1314 | ||||
| 1315 | LIST_REMOVE(info, entries)do { if ((info)->entries.le_next != ((void *)0)) (info)-> entries.le_next->entries.le_prev = (info)->entries.le_prev ; *(info)->entries.le_prev = (info)->entries.le_next; ; ; } while (0); | |||
| 1316 | ||||
| 1317 | if (info->bucket == 0 && !moptsmalloc_readonly.mopts.malloc_freeunmap) | |||
| 1318 | mprotect(info->page, MALLOC_PAGESIZE(1UL << 12), PROT_READ0x01 | PROT_WRITE0x02); | |||
| 1319 | unmap(d, info->page, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 1320 | #ifdef MALLOC_STATS | |||
| 1321 | if (r->f != NULL((void *)0)) { | |||
| 1322 | unmap(d, r->f, MALLOC_PAGESIZE(1UL << 12), MALLOC_PAGESIZE(1UL << 12)); | |||
| 1323 | r->f = NULL((void *)0); | |||
| 1324 | } | |||
| 1325 | #endif | |||
| 1326 | ||||
| 1327 | delete(d, r); | |||
| 1328 | mp = &d->chunk_info_list[info->bucket]; | |||
| 1329 | LIST_INSERT_HEAD(mp, info, entries)do { if (((info)->entries.le_next = (mp)->lh_first) != ( (void *)0)) (mp)->lh_first->entries.le_prev = &(info )->entries.le_next; (mp)->lh_first = (info); (info)-> entries.le_prev = &(mp)->lh_first; } while (0); | |||
| 1330 | } | |||
| 1331 | ||||
| 1332 | static void * | |||
| 1333 | omalloc(struct dir_info *pool, size_t sz, int zero_fill) | |||
| 1334 | { | |||
| 1335 | void *p, *caller = NULL((void *)0); | |||
| 1336 | size_t psz; | |||
| 1337 | ||||
| 1338 | if (sz > MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 1339 | if (sz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) { | |||
| 1340 | errno(*__errno()) = ENOMEM12; | |||
| 1341 | return NULL((void *)0); | |||
| 1342 | } | |||
| 1343 | sz += moptsmalloc_readonly.mopts.malloc_guard; | |||
| 1344 | psz = PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)); | |||
| 1345 | p = map(pool, psz, zero_fill); | |||
| 1346 | if (p == MAP_FAILED((void *)-1)) { | |||
| 1347 | errno(*__errno()) = ENOMEM12; | |||
| 1348 | return NULL((void *)0); | |||
| 1349 | } | |||
| 1350 | #ifdef MALLOC_STATS | |||
| 1351 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) | |||
| 1352 | caller = pool->caller; | |||
| 1353 | #endif | |||
| 1354 | if (insert(pool, p, sz, caller)) { | |||
| 1355 | unmap(pool, p, psz, 0); | |||
| 1356 | errno(*__errno()) = ENOMEM12; | |||
| 1357 | return NULL((void *)0); | |||
| 1358 | } | |||
| 1359 | if (moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 1360 | if (mprotect((char *)p + psz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 1361 | moptsmalloc_readonly.mopts.malloc_guard, PROT_NONE0x00)) | |||
| 1362 | wrterror(pool, "mprotect"); | |||
| 1363 | STATS_ADD(pool->malloc_guarded, mopts.malloc_guard)((pool->malloc_guarded) += (malloc_readonly.mopts.malloc_guard )); | |||
| 1364 | } | |||
| 1365 | ||||
| 1366 | if (MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL << 12) - 0)) { | |||
| 1367 | /* fill whole allocation */ | |||
| 1368 | if (pool->malloc_junk == 2) | |||
| 1369 | memset(p, SOME_JUNK0xdb, psz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 1370 | /* shift towards the end */ | |||
| 1371 | p = MALLOC_MOVE(p, sz)(((char *)(p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly .mopts.malloc_guard)) & ~((1UL << 4) - 1))); | |||
| 1372 | /* fill zeros if needed and overwritten above */ | |||
| 1373 | if (zero_fill && pool->malloc_junk == 2) | |||
| 1374 | memset(p, 0, sz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 1375 | } else { | |||
| 1376 | if (pool->malloc_junk == 2) { | |||
| 1377 | if (zero_fill) | |||
| 1378 | memset((char *)p + sz - | |||
| 1379 | moptsmalloc_readonly.mopts.malloc_guard, SOME_JUNK0xdb, | |||
| 1380 | psz - sz); | |||
| 1381 | else | |||
| 1382 | memset(p, SOME_JUNK0xdb, | |||
| 1383 | psz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 1384 | } else if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1385 | fill_canary(p, sz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 1386 | psz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 1387 | } | |||
| 1388 | ||||
| 1389 | } else { | |||
| 1390 | /* takes care of SOME_JUNK */ | |||
| 1391 | p = malloc_bytes(pool, sz); | |||
| 1392 | if (zero_fill && p != NULL((void *)0) && sz > 0) | |||
| 1393 | memset(p, 0, sz); | |||
| 1394 | } | |||
| 1395 | ||||
| 1396 | return p; | |||
| 1397 | } | |||
| 1398 | ||||
| 1399 | /* | |||
| 1400 | * Common function for handling recursion. Only | |||
| 1401 | * print the error message once, to avoid making the problem | |||
| 1402 | * potentially worse. | |||
| 1403 | */ | |||
| 1404 | static void | |||
| 1405 | malloc_recurse(struct dir_info *d) | |||
| 1406 | { | |||
| 1407 | static int noprint; | |||
| 1408 | ||||
| 1409 | if (noprint == 0) { | |||
| 1410 | noprint = 1; | |||
| 1411 | wrterror(d, "recursive call"); | |||
| 1412 | } | |||
| 1413 | d->active--; | |||
| 1414 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 1415 | errno(*__errno()) = EDEADLK11; | |||
| 1416 | } | |||
| 1417 | ||||
| 1418 | void | |||
| 1419 | _malloc_init(int from_rthreads) | |||
| 1420 | { | |||
| 1421 | u_int i, j, nmutexes; | |||
| 1422 | struct dir_info *d; | |||
| 1423 | ||||
| 1424 | _MALLOC_LOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_lock(1); } while ( 0); | |||
| 1425 | if (!from_rthreads && moptsmalloc_readonly.mopts.malloc_pool[1]) { | |||
| 1426 | _MALLOC_UNLOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(1); } while (0); | |||
| 1427 | return; | |||
| 1428 | } | |||
| 1429 | if (!moptsmalloc_readonly.mopts.malloc_canary) { | |||
| 1430 | char *p; | |||
| 1431 | size_t sz, d_avail; | |||
| 1432 | ||||
| 1433 | omalloc_init(); | |||
| 1434 | /* | |||
| 1435 | * Allocate dir_infos with a guard page on either side. Also | |||
| 1436 | * randomise offset inside the page at which the dir_infos | |||
| 1437 | * lay (subject to alignment by 1 << MALLOC_MINSHIFT) | |||
| 1438 | */ | |||
| 1439 | sz = moptsmalloc_readonly.mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE(1UL << 12); | |||
| 1440 | if ((p = MMAPNONE(sz, 0)mmap(((void *)0), (sz), 0x00, 0x1000 | 0x0002 | (0), -1, 0)) == MAP_FAILED((void *)-1)) | |||
| 1441 | wrterror(NULL((void *)0), "malloc_init mmap1 failed"); | |||
| 1442 | if (mprotect(p + MALLOC_PAGESIZE(1UL << 12), moptsmalloc_readonly.mopts.malloc_mutexes * | |||
| 1443 | sizeof(*d), PROT_READ0x01 | PROT_WRITE0x02)) | |||
| 1444 | wrterror(NULL((void *)0), "malloc_init mprotect1 failed"); | |||
| 1445 | if (mimmutable(p, sz)) | |||
| 1446 | wrterror(NULL((void *)0), "malloc_init mimmutable1 failed"); | |||
| 1447 | d_avail = (((moptsmalloc_readonly.mopts.malloc_mutexes * sizeof(*d) + | |||
| 1448 | MALLOC_PAGEMASK((1UL << 12) - 1)) & ~MALLOC_PAGEMASK((1UL << 12) - 1)) - | |||
| 1449 | (moptsmalloc_readonly.mopts.malloc_mutexes * sizeof(*d))) >> MALLOC_MINSHIFT4; | |||
| 1450 | d = (struct dir_info *)(p + MALLOC_PAGESIZE(1UL << 12) + | |||
| 1451 | (arc4random_uniform(d_avail) << MALLOC_MINSHIFT4)); | |||
| 1452 | STATS_ADD(d[1].malloc_used, sz)((d[1].malloc_used) += (sz)); | |||
| 1453 | for (i = 0; i < moptsmalloc_readonly.mopts.malloc_mutexes; i++) | |||
| 1454 | moptsmalloc_readonly.mopts.malloc_pool[i] = &d[i]; | |||
| 1455 | moptsmalloc_readonly.mopts.internal_funcs = 1; | |||
| 1456 | if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK((1UL << 12) - 1)) == 0) { | |||
| 1457 | if (mprotect(&malloc_readonly, sizeof(malloc_readonly), | |||
| 1458 | PROT_READ0x01)) | |||
| 1459 | wrterror(NULL((void *)0), | |||
| 1460 | "malloc_init mprotect r/o failed"); | |||
| 1461 | if (mimmutable(&malloc_readonly, | |||
| 1462 | sizeof(malloc_readonly))) | |||
| 1463 | wrterror(NULL((void *)0), | |||
| 1464 | "malloc_init mimmutable r/o failed"); | |||
| 1465 | } | |||
| 1466 | } | |||
| 1467 | ||||
| 1468 | nmutexes = from_rthreads ? moptsmalloc_readonly.mopts.malloc_mutexes : 2; | |||
| 1469 | for (i = 0; i < nmutexes; i++) { | |||
| 1470 | d = moptsmalloc_readonly.mopts.malloc_pool[i]; | |||
| 1471 | d->malloc_mt = from_rthreads; | |||
| 1472 | if (d->canary1 == ~d->canary2) | |||
| 1473 | continue; | |||
| 1474 | if (i == 0) { | |||
| 1475 | omalloc_poolinit(d, MAP_CONCEAL0x8000); | |||
| 1476 | d->malloc_junk = 2; | |||
| 1477 | d->bigcache_size = 0; | |||
| 1478 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE32; j++) | |||
| 1479 | d->smallcache[j].max = 0; | |||
| 1480 | } else { | |||
| 1481 | size_t sz = 0; | |||
| 1482 | ||||
| 1483 | omalloc_poolinit(d, 0); | |||
| 1484 | d->malloc_junk = moptsmalloc_readonly.mopts.def_malloc_junk; | |||
| 1485 | d->bigcache_size = moptsmalloc_readonly.mopts.def_maxcache; | |||
| 1486 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE32; j++) { | |||
| 1487 | d->smallcache[j].max = | |||
| 1488 | moptsmalloc_readonly.mopts.def_maxcache >> (j / 8); | |||
| 1489 | sz += d->smallcache[j].max * sizeof(void *); | |||
| 1490 | } | |||
| 1491 | sz += d->bigcache_size * sizeof(struct bigcache); | |||
| 1492 | if (sz > 0) { | |||
| 1493 | void *p = MMAP(sz, 0)mmap(((void *)0), (sz), 0x01 | 0x02, 0x1000 | 0x0002 | (0), - 1, 0); | |||
| 1494 | if (p == MAP_FAILED((void *)-1)) | |||
| 1495 | wrterror(NULL((void *)0), | |||
| 1496 | "malloc_init mmap2 failed"); | |||
| 1497 | if (mimmutable(p, sz)) | |||
| 1498 | wrterror(NULL((void *)0), | |||
| 1499 | "malloc_init mimmutable2 failed"); | |||
| 1500 | for (j = 0; j < MAX_SMALLCACHEABLE_SIZE32; j++) { | |||
| 1501 | d->smallcache[j].pages = p; | |||
| 1502 | p = (char *)p + d->smallcache[j].max * | |||
| 1503 | sizeof(void *); | |||
| 1504 | } | |||
| 1505 | d->bigcache = p; | |||
| 1506 | } | |||
| 1507 | } | |||
| 1508 | d->mutex = i; | |||
| 1509 | } | |||
| 1510 | ||||
| 1511 | _MALLOC_UNLOCK(1)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(1); } while (0); | |||
| 1512 | } | |||
| 1513 | DEF_STRONG(_malloc_init)__asm__(".global " "_malloc_init" " ; " "_malloc_init" " = " "_libc__malloc_init" ); | |||
| 1514 | ||||
| 1515 | #define PROLOGUE(p, fn)d = (p); if (d == ((void *)0)) { _malloc_init(0); d = (p); } do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex); } while (0); d->func = fn; if (d->active++) { malloc_recurse (d); return ((void *)0); } \ | |||
| 1516 | d = (p); \ | |||
| 1517 | if (d == NULL((void *)0)) { \ | |||
| 1518 | _malloc_init(0); \ | |||
| 1519 | d = (p); \ | |||
| 1520 | } \ | |||
| 1521 | _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex) ; } while (0); \ | |||
| 1522 | d->func = fn; \ | |||
| 1523 | if (d->active++) { \ | |||
| 1524 | malloc_recurse(d); \ | |||
| 1525 | return NULL((void *)0); \ | |||
| 1526 | } \ | |||
| 1527 | ||||
| 1528 | #define EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; \ | |||
| 1529 | d->active--; \ | |||
| 1530 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); \ | |||
| 1531 | if (r == NULL((void *)0) && moptsmalloc_readonly.mopts.malloc_xmalloc) \ | |||
| 1532 | wrterror(d, "out of memory"); \ | |||
| 1533 | if (r != NULL((void *)0)) \ | |||
| 1534 | errno(*__errno()) = saved_errno; \ | |||
| 1535 | ||||
| 1536 | void * | |||
| 1537 | malloc(size_t size) | |||
| 1538 | { | |||
| 1539 | void *r; | |||
| 1540 | struct dir_info *d; | |||
| 1541 | int saved_errno = errno(*__errno()); | |||
| 1542 | ||||
| 1543 | PROLOGUE(getpool(), "malloc")d = (getpool()); if (d == ((void *)0)) { _malloc_init(0); d = (getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock (d->mutex); } while (0); d->func = "malloc"; if (d-> active++) { malloc_recurse(d); return ((void *)0); } | |||
| 1544 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 1545 | r = omalloc(d, size, 0); | |||
| 1546 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 1547 | return r; | |||
| 1548 | } | |||
| 1549 | DEF_STRONG(malloc)__asm__(".global " "malloc" " ; " "malloc" " = " "_libc_malloc" ); | |||
| 1550 | ||||
| 1551 | void * | |||
| 1552 | malloc_conceal(size_t size) | |||
| 1553 | { | |||
| 1554 | void *r; | |||
| 1555 | struct dir_info *d; | |||
| 1556 | int saved_errno = errno(*__errno()); | |||
| 1557 | ||||
| 1558 | PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")d = (malloc_readonly.mopts.malloc_pool[0]); if (d == ((void * )0)) { _malloc_init(0); d = (malloc_readonly.mopts.malloc_pool [0]); } do { if (__isthreaded) _thread_cb.tc_malloc_lock(d-> mutex); } while (0); d->func = "malloc_conceal"; if (d-> active++) { malloc_recurse(d); return ((void *)0); } | |||
| 1559 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 1560 | r = omalloc(d, size, 0); | |||
| 1561 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 1562 | return r; | |||
| 1563 | } | |||
| 1564 | DEF_WEAK(malloc_conceal)__asm__(".weak " "malloc_conceal" " ; " "malloc_conceal" " = " "_libc_malloc_conceal"); | |||
| 1565 | ||||
| 1566 | static struct region_info * | |||
| 1567 | findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, | |||
| 1568 | const char ** saved_function) | |||
| 1569 | { | |||
| 1570 | struct dir_info *pool = argpool; | |||
| 1571 | struct region_info *r = find(pool, p); | |||
| 1572 | ||||
| 1573 | if (r == NULL((void *)0)) { | |||
| 1574 | u_int i, nmutexes; | |||
| 1575 | ||||
| 1576 | nmutexes = moptsmalloc_readonly.mopts.malloc_pool[1]->malloc_mt ? | |||
| 1577 | moptsmalloc_readonly.mopts.malloc_mutexes : 2; | |||
| 1578 | for (i = 1; i < nmutexes; i++) { | |||
| 1579 | u_int j = (argpool->mutex + i) & (nmutexes - 1); | |||
| 1580 | ||||
| 1581 | pool->active--; | |||
| 1582 | _MALLOC_UNLOCK(pool->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(pool->mutex ); } while (0); | |||
| 1583 | pool = moptsmalloc_readonly.mopts.malloc_pool[j]; | |||
| 1584 | _MALLOC_LOCK(pool->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(pool->mutex ); } while (0); | |||
| 1585 | pool->active++; | |||
| 1586 | r = find(pool, p); | |||
| 1587 | if (r != NULL((void *)0)) { | |||
| 1588 | *saved_function = pool->func; | |||
| 1589 | pool->func = argpool->func; | |||
| 1590 | break; | |||
| 1591 | } | |||
| 1592 | } | |||
| 1593 | if (r == NULL((void *)0)) | |||
| 1594 | wrterror(argpool, "bogus pointer (double free?) %p", p); | |||
| 1595 | } | |||
| 1596 | *foundpool = pool; | |||
| 1597 | return r; | |||
| 1598 | } | |||
| 1599 | ||||
| 1600 | static void | |||
| 1601 | ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz) | |||
| 1602 | { | |||
| 1603 | struct region_info *r; | |||
| 1604 | struct dir_info *pool; | |||
| 1605 | const char *saved_function; | |||
| 1606 | size_t sz; | |||
| 1607 | ||||
| 1608 | r = findpool(p, *argpool, &pool, &saved_function); | |||
| 1609 | ||||
| 1610 | REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz ) = ((sz) == 0 ? (r)->size : (((sz) - 1) * (1UL << 4 ))); | |||
| 1611 | if (pool->mmap_flag) { | |||
| 1612 | clear = 1; | |||
| 1613 | if (!check) { | |||
| 1614 | argsz = sz; | |||
| 1615 | if (sz > MALLOC_MAXCHUNK(1 << (12 - 1))) | |||
| 1616 | argsz -= moptsmalloc_readonly.mopts.malloc_guard; | |||
| 1617 | } | |||
| 1618 | } | |||
| 1619 | if (check) { | |||
| 1620 | if (sz <= MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 1621 | if (moptsmalloc_readonly.mopts.chunk_canaries && sz > 0) { | |||
| 1622 | struct chunk_info *info = | |||
| 1623 | (struct chunk_info *)r->size; | |||
| 1624 | uint32_t chunknum = | |||
| 1625 | find_chunknum(pool, info, p, 0); | |||
| 1626 | ||||
| 1627 | if (info->bits[info->offset + chunknum] < argsz) | |||
| 1628 | wrterror(pool, "recorded size %hu" | |||
| 1629 | " < %zu", | |||
| 1630 | info->bits[info->offset + chunknum], | |||
| 1631 | argsz); | |||
| 1632 | } else { | |||
| 1633 | if (sz < argsz) | |||
| 1634 | wrterror(pool, "chunk size %zu < %zu", | |||
| 1635 | sz, argsz); | |||
| 1636 | } | |||
| 1637 | } else if (sz - moptsmalloc_readonly.mopts.malloc_guard < argsz) { | |||
| 1638 | wrterror(pool, "recorded size %zu < %zu", | |||
| 1639 | sz - moptsmalloc_readonly.mopts.malloc_guard, argsz); | |||
| 1640 | } | |||
| 1641 | } | |||
| 1642 | if (sz > MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 1643 | if (!MALLOC_MOVE_COND(sz)((sz) - malloc_readonly.mopts.malloc_guard < (1UL << 12) - 0)) { | |||
| 1644 | if (r->p != p) | |||
| 1645 | wrterror(pool, "bogus pointer %p", p); | |||
| 1646 | if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1647 | validate_canary(pool, p, | |||
| 1648 | sz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 1649 | PAGEROUND(sz - mopts.malloc_guard)(((sz - malloc_readonly.mopts.malloc_guard) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1))); | |||
| 1650 | } else { | |||
| 1651 | /* shifted towards the end */ | |||
| 1652 | if (p != MALLOC_MOVE(r->p, sz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((sz) - malloc_readonly .mopts.malloc_guard)) & ~((1UL << 4) - 1)))) | |||
| 1653 | wrterror(pool, "bogus moved pointer %p", p); | |||
| 1654 | p = r->p; | |||
| 1655 | } | |||
| 1656 | if (moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 1657 | if (sz < moptsmalloc_readonly.mopts.malloc_guard) | |||
| 1658 | wrterror(pool, "guard size"); | |||
| 1659 | if (!moptsmalloc_readonly.mopts.malloc_freeunmap) { | |||
| 1660 | if (mprotect((char *)p + PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)) - | |||
| 1661 | moptsmalloc_readonly.mopts.malloc_guard, moptsmalloc_readonly.mopts.malloc_guard, | |||
| 1662 | PROT_READ0x01 | PROT_WRITE0x02)) | |||
| 1663 | wrterror(pool, "mprotect"); | |||
| 1664 | } | |||
| 1665 | STATS_SUB(pool->malloc_guarded, mopts.malloc_guard)((pool->malloc_guarded) -= (malloc_readonly.mopts.malloc_guard )); | |||
| 1666 | } | |||
| 1667 | unmap(pool, p, PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)), clear ? argsz : 0); | |||
| 1668 | delete(pool, r); | |||
| 1669 | } else { | |||
| 1670 | void *tmp; | |||
| 1671 | u_int i; | |||
| 1672 | ||||
| 1673 | /* Validate and optionally canary check */ | |||
| 1674 | struct chunk_info *info = (struct chunk_info *)r->size; | |||
| 1675 | if (B2SIZE(info->bucket)((info->bucket) * (1UL << 4)) != sz) | |||
| 1676 | wrterror(pool, "internal struct corrupt"); | |||
| 1677 | find_chunknum(pool, info, p, moptsmalloc_readonly.mopts.chunk_canaries); | |||
| 1678 | ||||
| 1679 | if (moptsmalloc_readonly.mopts.malloc_freecheck) { | |||
| 1680 | for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK15; i++) { | |||
| 1681 | tmp = pool->delayed_chunks[i]; | |||
| 1682 | if (tmp == p) | |||
| 1683 | wrterror(pool, | |||
| 1684 | "double free %p", p); | |||
| 1685 | if (tmp != NULL((void *)0)) { | |||
| 1686 | size_t tmpsz; | |||
| 1687 | ||||
| 1688 | r = find(pool, tmp); | |||
| 1689 | if (r == NULL((void *)0)) | |||
| 1690 | wrterror(pool, | |||
| 1691 | "bogus pointer (" | |||
| 1692 | "double free?) %p", tmp); | |||
| 1693 | REALSIZE(tmpsz, r)(tmpsz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (tmpsz) = ((tmpsz) == 0 ? (r)->size : (((tmpsz) - 1) * (1UL << 4))); | |||
| 1694 | validate_junk(pool, tmp, tmpsz); | |||
| 1695 | } | |||
| 1696 | } | |||
| 1697 | } | |||
| 1698 | ||||
| 1699 | if (clear && argsz > 0) | |||
| 1700 | explicit_bzero(p, argsz); | |||
| 1701 | junk_free(pool->malloc_junk, p, sz); | |||
| 1702 | ||||
| 1703 | i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK15; | |||
| 1704 | tmp = p; | |||
| 1705 | p = pool->delayed_chunks[i]; | |||
| 1706 | if (tmp == p) | |||
| 1707 | wrterror(pool, "double free %p", p); | |||
| 1708 | pool->delayed_chunks[i] = tmp; | |||
| 1709 | if (p != NULL((void *)0)) { | |||
| 1710 | r = find(pool, p); | |||
| 1711 | if (r == NULL((void *)0)) | |||
| 1712 | wrterror(pool, | |||
| 1713 | "bogus pointer (double free?) %p", p); | |||
| 1714 | if (!moptsmalloc_readonly.mopts.malloc_freecheck) { | |||
| 1715 | REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz ) = ((sz) == 0 ? (r)->size : (((sz) - 1) * (1UL << 4 ))); | |||
| 1716 | validate_junk(pool, p, sz); | |||
| 1717 | } | |||
| 1718 | free_bytes(pool, r, p); | |||
| 1719 | } | |||
| 1720 | } | |||
| 1721 | ||||
| 1722 | if (*argpool != pool) { | |||
| 1723 | pool->func = saved_function; | |||
| 1724 | *argpool = pool; | |||
| 1725 | } | |||
| 1726 | } | |||
| 1727 | ||||
| 1728 | void | |||
| 1729 | free(void *ptr) | |||
| 1730 | { | |||
| 1731 | struct dir_info *d; | |||
| 1732 | int saved_errno = errno(*__errno()); | |||
| 1733 | ||||
| 1734 | /* This is legal. */ | |||
| 1735 | if (ptr == NULL((void *)0)) | |||
| 1736 | return; | |||
| 1737 | ||||
| 1738 | d = getpool(); | |||
| 1739 | if (d == NULL((void *)0)) | |||
| 1740 | wrterror(d, "free() called before allocation"); | |||
| 1741 | _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex) ; } while (0); | |||
| 1742 | d->func = "free"; | |||
| 1743 | if (d->active++) { | |||
| 1744 | malloc_recurse(d); | |||
| 1745 | return; | |||
| 1746 | } | |||
| 1747 | ofree(&d, ptr, 0, 0, 0); | |||
| 1748 | d->active--; | |||
| 1749 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 1750 | errno(*__errno()) = saved_errno; | |||
| 1751 | } | |||
| 1752 | DEF_STRONG(free)__asm__(".global " "free" " ; " "free" " = " "_libc_free"); | |||
| 1753 | ||||
| 1754 | static void | |||
| 1755 | freezero_p(void *ptr, size_t sz) | |||
| 1756 | { | |||
| 1757 | explicit_bzero(ptr, sz); | |||
| 1758 | free(ptr); | |||
| 1759 | } | |||
| 1760 | ||||
| 1761 | void | |||
| 1762 | freezero(void *ptr, size_t sz) | |||
| 1763 | { | |||
| 1764 | struct dir_info *d; | |||
| 1765 | int saved_errno = errno(*__errno()); | |||
| 1766 | ||||
| 1767 | /* This is legal. */ | |||
| 1768 | if (ptr == NULL((void *)0)) | |||
| 1769 | return; | |||
| 1770 | ||||
| 1771 | if (!moptsmalloc_readonly.mopts.internal_funcs) { | |||
| 1772 | freezero_p(ptr, sz); | |||
| 1773 | return; | |||
| 1774 | } | |||
| 1775 | ||||
| 1776 | d = getpool(); | |||
| 1777 | if (d == NULL((void *)0)) | |||
| 1778 | wrterror(d, "freezero() called before allocation"); | |||
| 1779 | _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex) ; } while (0); | |||
| 1780 | d->func = "freezero"; | |||
| 1781 | if (d->active++) { | |||
| 1782 | malloc_recurse(d); | |||
| 1783 | return; | |||
| 1784 | } | |||
| 1785 | ofree(&d, ptr, 1, 1, sz); | |||
| 1786 | d->active--; | |||
| 1787 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 1788 | errno(*__errno()) = saved_errno; | |||
| 1789 | } | |||
| 1790 | DEF_WEAK(freezero)__asm__(".weak " "freezero" " ; " "freezero" " = " "_libc_freezero" ); | |||
| 1791 | ||||
| 1792 | static void * | |||
| 1793 | orealloc(struct dir_info **argpool, void *p, size_t newsz) | |||
| 1794 | { | |||
| 1795 | struct region_info *r; | |||
| 1796 | struct dir_info *pool; | |||
| 1797 | const char *saved_function; | |||
| 1798 | struct chunk_info *info; | |||
| 1799 | size_t oldsz, goldsz, gnewsz; | |||
| 1800 | void *q, *ret; | |||
| 1801 | uint32_t chunknum; | |||
| 1802 | int forced; | |||
| 1803 | ||||
| 1804 | if (p == NULL((void *)0)) | |||
| 1805 | return omalloc(*argpool, newsz, 0); | |||
| 1806 | ||||
| 1807 | if (newsz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) { | |||
| 1808 | errno(*__errno()) = ENOMEM12; | |||
| 1809 | return NULL((void *)0); | |||
| 1810 | } | |||
| 1811 | ||||
| 1812 | r = findpool(p, *argpool, &pool, &saved_function); | |||
| 1813 | ||||
| 1814 | REALSIZE(oldsz, r)(oldsz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (oldsz) = ((oldsz) == 0 ? (r)->size : (((oldsz) - 1) * (1UL << 4))); | |||
| 1815 | if (oldsz <= MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 1816 | if (DO_STATSmalloc_readonly.mopts.malloc_stats || moptsmalloc_readonly.mopts.chunk_canaries) { | |||
| 1817 | info = (struct chunk_info *)r->size; | |||
| 1818 | chunknum = find_chunknum(pool, info, p, 0); | |||
| 1819 | } | |||
| 1820 | } | |||
| 1821 | ||||
| 1822 | goldsz = oldsz; | |||
| 1823 | if (oldsz > MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 1824 | if (oldsz < moptsmalloc_readonly.mopts.malloc_guard) | |||
| 1825 | wrterror(pool, "guard size"); | |||
| 1826 | oldsz -= moptsmalloc_readonly.mopts.malloc_guard; | |||
| 1827 | } | |||
| 1828 | ||||
| 1829 | gnewsz = newsz; | |||
| 1830 | if (gnewsz > MALLOC_MAXCHUNK(1 << (12 - 1))) | |||
| 1831 | gnewsz += moptsmalloc_readonly.mopts.malloc_guard; | |||
| 1832 | ||||
| 1833 | forced = moptsmalloc_readonly.mopts.malloc_realloc || pool->mmap_flag; | |||
| 1834 | if (newsz > MALLOC_MAXCHUNK(1 << (12 - 1)) && oldsz > MALLOC_MAXCHUNK(1 << (12 - 1)) && !forced) { | |||
| 1835 | /* First case: from n pages sized allocation to m pages sized | |||
| 1836 | allocation, m > n */ | |||
| 1837 | size_t roldsz = PAGEROUND(goldsz)(((goldsz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)); | |||
| 1838 | size_t rnewsz = PAGEROUND(gnewsz)(((gnewsz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)); | |||
| 1839 | ||||
| 1840 | if (rnewsz < roldsz && rnewsz > roldsz / 2 && | |||
| 1841 | roldsz - rnewsz < moptsmalloc_readonly.mopts.def_maxcache * MALLOC_PAGESIZE(1UL << 12) && | |||
| 1842 | !moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 1843 | ||||
| 1844 | ret = p; | |||
| 1845 | goto done; | |||
| 1846 | } | |||
| 1847 | ||||
| 1848 | if (rnewsz > roldsz) { | |||
| 1849 | /* try to extend existing region */ | |||
| 1850 | if (!moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 1851 | void *hint = (char *)r->p + roldsz; | |||
| 1852 | size_t needed = rnewsz - roldsz; | |||
| 1853 | ||||
| 1854 | STATS_INC(pool->cheap_realloc_tries)((pool->cheap_realloc_tries)++); | |||
| 1855 | q = MMAPA(hint, needed, MAP_FIXED |mmap((hint), (needed), 0x01 | 0x02, 0x1000 | 0x0002 | (0x0010 | 0x0800 | pool->mmap_flag), -1, 0) | |||
| 1856 | __MAP_NOREPLACE | pool->mmap_flag)mmap((hint), (needed), 0x01 | 0x02, 0x1000 | 0x0002 | (0x0010 | 0x0800 | pool->mmap_flag), -1, 0); | |||
| 1857 | if (q == hint) { | |||
| 1858 | STATS_ADD(pool->malloc_used, needed)((pool->malloc_used) += (needed)); | |||
| 1859 | if (pool->malloc_junk == 2) | |||
| 1860 | memset(q, SOME_JUNK0xdb, needed); | |||
| 1861 | r->size = gnewsz; | |||
| 1862 | if (r->p != p) { | |||
| 1863 | /* old pointer is moved */ | |||
| 1864 | memmove(r->p, p, oldsz); | |||
| 1865 | p = r->p; | |||
| 1866 | } | |||
| 1867 | if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1868 | fill_canary(p, newsz, | |||
| 1869 | PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12 ) - 1))); | |||
| 1870 | STATS_SETF(r, (*argpool)->caller)((r)->f = ((*argpool)->caller)); | |||
| 1871 | STATS_INC(pool->cheap_reallocs)((pool->cheap_reallocs)++); | |||
| 1872 | ret = p; | |||
| 1873 | goto done; | |||
| 1874 | } | |||
| 1875 | } | |||
| 1876 | } else if (rnewsz < roldsz) { | |||
| 1877 | /* shrink number of pages */ | |||
| 1878 | if (moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 1879 | if (mprotect((char *)r->p + rnewsz - | |||
| 1880 | moptsmalloc_readonly.mopts.malloc_guard, moptsmalloc_readonly.mopts.malloc_guard, | |||
| 1881 | PROT_NONE0x00)) | |||
| 1882 | wrterror(pool, "mprotect"); | |||
| 1883 | } | |||
| 1884 | if (munmap((char *)r->p + rnewsz, roldsz - rnewsz)) | |||
| 1885 | wrterror(pool, "munmap %p", (char *)r->p + | |||
| 1886 | rnewsz); | |||
| 1887 | STATS_SUB(pool->malloc_used, roldsz - rnewsz)((pool->malloc_used) -= (roldsz - rnewsz)); | |||
| 1888 | r->size = gnewsz; | |||
| 1889 | if (MALLOC_MOVE_COND(gnewsz)((gnewsz) - malloc_readonly.mopts.malloc_guard < (1UL << 12) - 0)) { | |||
| 1890 | void *pp = MALLOC_MOVE(r->p, gnewsz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((gnewsz) - malloc_readonly .mopts.malloc_guard)) & ~((1UL << 4) - 1))); | |||
| 1891 | memmove(pp, p, newsz); | |||
| 1892 | p = pp; | |||
| 1893 | } else if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1894 | fill_canary(p, newsz, PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12 ) - 1))); | |||
| 1895 | STATS_SETF(r, (*argpool)->caller)((r)->f = ((*argpool)->caller)); | |||
| 1896 | ret = p; | |||
| 1897 | goto done; | |||
| 1898 | } else { | |||
| 1899 | /* number of pages remains the same */ | |||
| 1900 | void *pp = r->p; | |||
| 1901 | ||||
| 1902 | r->size = gnewsz; | |||
| 1903 | if (MALLOC_MOVE_COND(gnewsz)((gnewsz) - malloc_readonly.mopts.malloc_guard < (1UL << 12) - 0)) | |||
| 1904 | pp = MALLOC_MOVE(r->p, gnewsz)(((char *)(r->p)) + (((1UL << 12) - 0 - ((gnewsz) - malloc_readonly .mopts.malloc_guard)) & ~((1UL << 4) - 1))); | |||
| 1905 | if (p != pp) { | |||
| 1906 | memmove(pp, p, oldsz < newsz ? oldsz : newsz); | |||
| 1907 | p = pp; | |||
| 1908 | } | |||
| 1909 | if (p == r->p) { | |||
| 1910 | if (newsz > oldsz && pool->malloc_junk == 2) | |||
| 1911 | memset((char *)p + newsz, SOME_JUNK0xdb, | |||
| 1912 | rnewsz - moptsmalloc_readonly.mopts.malloc_guard - | |||
| 1913 | newsz); | |||
| 1914 | if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 1915 | fill_canary(p, newsz, PAGEROUND(newsz)(((newsz) + (((1UL << 12) - 1))) & ~((1UL << 12 ) - 1))); | |||
| 1916 | } | |||
| 1917 | STATS_SETF(r, (*argpool)->caller)((r)->f = ((*argpool)->caller)); | |||
| 1918 | ret = p; | |||
| 1919 | goto done; | |||
| 1920 | } | |||
| 1921 | } | |||
| 1922 | if (oldsz <= MALLOC_MAXCHUNK(1 << (12 - 1)) && oldsz > 0 && | |||
| 1923 | newsz <= MALLOC_MAXCHUNK(1 << (12 - 1)) && newsz > 0 && | |||
| 1924 | !forced && find_bucket(newsz) == find_bucket(oldsz)) { | |||
| 1925 | /* do not reallocate if new size fits good in existing chunk */ | |||
| 1926 | if (pool->malloc_junk == 2) | |||
| 1927 | memset((char *)p + newsz, SOME_JUNK0xdb, oldsz - newsz); | |||
| 1928 | if (moptsmalloc_readonly.mopts.chunk_canaries) { | |||
| 1929 | info->bits[info->offset + chunknum] = newsz; | |||
| 1930 | fill_canary(p, newsz, B2SIZE(info->bucket)((info->bucket) * (1UL << 4))); | |||
| 1931 | } | |||
| 1932 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) | |||
| 1933 | STATS_SETFN(r, chunknum, (*argpool)->caller)((r)->f[chunknum] = ((*argpool)->caller)); | |||
| ||||
| 1934 | ret = p; | |||
| 1935 | } else if (newsz != oldsz || forced) { | |||
| 1936 | /* create new allocation */ | |||
| 1937 | q = omalloc(pool, newsz, 0); | |||
| 1938 | if (q == NULL((void *)0)) { | |||
| 1939 | ret = NULL((void *)0); | |||
| 1940 | goto done; | |||
| 1941 | } | |||
| 1942 | if (newsz != 0 && oldsz != 0) | |||
| 1943 | memcpy(q, p, oldsz < newsz ? oldsz : newsz); | |||
| 1944 | ofree(&pool, p, 0, 0, 0); | |||
| 1945 | ret = q; | |||
| 1946 | } else { | |||
| 1947 | /* oldsz == newsz */ | |||
| 1948 | if (newsz != 0) | |||
| 1949 | wrterror(pool, "realloc internal inconsistency"); | |||
| 1950 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) | |||
| 1951 | STATS_SETFN(r, chunknum, (*argpool)->caller)((r)->f[chunknum] = ((*argpool)->caller)); | |||
| 1952 | ret = p; | |||
| 1953 | } | |||
| 1954 | done: | |||
| 1955 | if (*argpool != pool) { | |||
| 1956 | pool->func = saved_function; | |||
| 1957 | *argpool = pool; | |||
| 1958 | } | |||
| 1959 | return ret; | |||
| 1960 | } | |||
| 1961 | ||||
| 1962 | void * | |||
| 1963 | realloc(void *ptr, size_t size) | |||
| 1964 | { | |||
| 1965 | struct dir_info *d; | |||
| 1966 | void *r; | |||
| 1967 | int saved_errno = errno(*__errno()); | |||
| 1968 | ||||
| 1969 | PROLOGUE(getpool(), "realloc")d = (getpool()); if (d == ((void *)0)) { _malloc_init(0); d = (getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock (d->mutex); } while (0); d->func = "realloc"; if (d-> active++) { malloc_recurse(d); return ((void *)0); } | |||
| ||||
| 1970 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 1971 | r = orealloc(&d, ptr, size); | |||
| 1972 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 1973 | return r; | |||
| 1974 | } | |||
| 1975 | DEF_STRONG(realloc)__asm__(".global " "realloc" " ; " "realloc" " = " "_libc_realloc" ); | |||
| 1976 | ||||
| 1977 | /* | |||
| 1978 | * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX | |||
| 1979 | * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW | |||
| 1980 | */ | |||
| 1981 | #define MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) (1UL << (sizeof(size_t) * 4)) | |||
| 1982 | ||||
| 1983 | void * | |||
| 1984 | calloc(size_t nmemb, size_t size) | |||
| 1985 | { | |||
| 1986 | struct dir_info *d; | |||
| 1987 | void *r; | |||
| 1988 | int saved_errno = errno(*__errno()); | |||
| 1989 | ||||
| 1990 | PROLOGUE(getpool(), "calloc")d = (getpool()); if (d == ((void *)0)) { _malloc_init(0); d = (getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock (d->mutex); } while (0); d->func = "calloc"; if (d-> active++) { malloc_recurse(d); return ((void *)0); } | |||
| 1991 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 1992 | if ((nmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 1993 | nmemb > 0 && SIZE_MAX0xffffffffffffffffUL / nmemb < size) { | |||
| 1994 | d->active--; | |||
| 1995 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 1996 | if (moptsmalloc_readonly.mopts.malloc_xmalloc) | |||
| 1997 | wrterror(d, "out of memory"); | |||
| 1998 | errno(*__errno()) = ENOMEM12; | |||
| 1999 | return NULL((void *)0); | |||
| 2000 | } | |||
| 2001 | ||||
| 2002 | size *= nmemb; | |||
| 2003 | r = omalloc(d, size, 1); | |||
| 2004 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 2005 | return r; | |||
| 2006 | } | |||
| 2007 | DEF_STRONG(calloc)__asm__(".global " "calloc" " ; " "calloc" " = " "_libc_calloc" ); | |||
| 2008 | ||||
| 2009 | void * | |||
| 2010 | calloc_conceal(size_t nmemb, size_t size) | |||
| 2011 | { | |||
| 2012 | struct dir_info *d; | |||
| 2013 | void *r; | |||
| 2014 | int saved_errno = errno(*__errno()); | |||
| 2015 | ||||
| 2016 | PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")d = (malloc_readonly.mopts.malloc_pool[0]); if (d == ((void * )0)) { _malloc_init(0); d = (malloc_readonly.mopts.malloc_pool [0]); } do { if (__isthreaded) _thread_cb.tc_malloc_lock(d-> mutex); } while (0); d->func = "calloc_conceal"; if (d-> active++) { malloc_recurse(d); return ((void *)0); } | |||
| 2017 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 2018 | if ((nmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 2019 | nmemb > 0 && SIZE_MAX0xffffffffffffffffUL / nmemb < size) { | |||
| 2020 | d->active--; | |||
| 2021 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 2022 | if (moptsmalloc_readonly.mopts.malloc_xmalloc) | |||
| 2023 | wrterror(d, "out of memory"); | |||
| 2024 | errno(*__errno()) = ENOMEM12; | |||
| 2025 | return NULL((void *)0); | |||
| 2026 | } | |||
| 2027 | ||||
| 2028 | size *= nmemb; | |||
| 2029 | r = omalloc(d, size, 1); | |||
| 2030 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 2031 | return r; | |||
| 2032 | } | |||
| 2033 | DEF_WEAK(calloc_conceal)__asm__(".weak " "calloc_conceal" " ; " "calloc_conceal" " = " "_libc_calloc_conceal"); | |||
| 2034 | ||||
| 2035 | static void * | |||
| 2036 | orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, | |||
| 2037 | size_t newsize) | |||
| 2038 | { | |||
| 2039 | struct region_info *r; | |||
| 2040 | struct dir_info *pool; | |||
| 2041 | const char *saved_function; | |||
| 2042 | void *newptr; | |||
| 2043 | size_t sz; | |||
| 2044 | ||||
| 2045 | if (p == NULL((void *)0)) | |||
| 2046 | return omalloc(*argpool, newsize, 1); | |||
| 2047 | ||||
| 2048 | if (oldsize == newsize) | |||
| 2049 | return p; | |||
| 2050 | ||||
| 2051 | r = findpool(p, *argpool, &pool, &saved_function); | |||
| 2052 | ||||
| 2053 | REALSIZE(sz, r)(sz) = (uintptr_t)(r)->p & ((1UL << 12) - 1), (sz ) = ((sz) == 0 ? (r)->size : (((sz) - 1) * (1UL << 4 ))); | |||
| 2054 | if (sz <= MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 2055 | if (moptsmalloc_readonly.mopts.chunk_canaries && sz > 0) { | |||
| 2056 | struct chunk_info *info = (struct chunk_info *)r->size; | |||
| 2057 | uint32_t chunknum = find_chunknum(pool, info, p, 0); | |||
| 2058 | ||||
| 2059 | if (info->bits[info->offset + chunknum] != oldsize) | |||
| 2060 | wrterror(pool, "recorded size %hu != %zu", | |||
| 2061 | info->bits[info->offset + chunknum], | |||
| 2062 | oldsize); | |||
| 2063 | } else { | |||
| 2064 | if (sz < oldsize) | |||
| 2065 | wrterror(pool, "chunk size %zu < %zu", | |||
| 2066 | sz, oldsize); | |||
| 2067 | } | |||
| 2068 | } else { | |||
| 2069 | if (sz - moptsmalloc_readonly.mopts.malloc_guard < oldsize) | |||
| 2070 | wrterror(pool, "recorded size %zu < %zu", | |||
| 2071 | sz - moptsmalloc_readonly.mopts.malloc_guard, oldsize); | |||
| 2072 | if (oldsize < (sz - moptsmalloc_readonly.mopts.malloc_guard) / 2) | |||
| 2073 | wrterror(pool, | |||
| 2074 | "recorded size %zu inconsistent with %zu", | |||
| 2075 | sz - moptsmalloc_readonly.mopts.malloc_guard, oldsize); | |||
| 2076 | } | |||
| 2077 | ||||
| 2078 | newptr = omalloc(pool, newsize, 0); | |||
| 2079 | if (newptr == NULL((void *)0)) | |||
| 2080 | goto done; | |||
| 2081 | ||||
| 2082 | if (newsize > oldsize) { | |||
| 2083 | memcpy(newptr, p, oldsize); | |||
| 2084 | memset((char *)newptr + oldsize, 0, newsize - oldsize); | |||
| 2085 | } else | |||
| 2086 | memcpy(newptr, p, newsize); | |||
| 2087 | ||||
| 2088 | ofree(&pool, p, 1, 0, oldsize); | |||
| 2089 | ||||
| 2090 | done: | |||
| 2091 | if (*argpool != pool) { | |||
| 2092 | pool->func = saved_function; | |||
| 2093 | *argpool = pool; | |||
| 2094 | } | |||
| 2095 | ||||
| 2096 | return newptr; | |||
| 2097 | } | |||
| 2098 | ||||
| 2099 | static void * | |||
| 2100 | recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) | |||
| 2101 | { | |||
| 2102 | size_t oldsize, newsize; | |||
| 2103 | void *newptr; | |||
| 2104 | ||||
| 2105 | if (ptr == NULL((void *)0)) | |||
| 2106 | return calloc(newnmemb, size); | |||
| 2107 | ||||
| 2108 | if ((newnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 2109 | newnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / newnmemb < size) { | |||
| 2110 | errno(*__errno()) = ENOMEM12; | |||
| 2111 | return NULL((void *)0); | |||
| 2112 | } | |||
| 2113 | newsize = newnmemb * size; | |||
| 2114 | ||||
| 2115 | if ((oldnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 2116 | oldnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / oldnmemb < size) { | |||
| 2117 | errno(*__errno()) = EINVAL22; | |||
| 2118 | return NULL((void *)0); | |||
| 2119 | } | |||
| 2120 | oldsize = oldnmemb * size; | |||
| 2121 | ||||
| 2122 | /* | |||
| 2123 | * Don't bother too much if we're shrinking just a bit, | |||
| 2124 | * we do not shrink for series of small steps, oh well. | |||
| 2125 | */ | |||
| 2126 | if (newsize <= oldsize) { | |||
| 2127 | size_t d = oldsize - newsize; | |||
| 2128 | ||||
| 2129 | if (d < oldsize / 2 && d < MALLOC_PAGESIZE(1UL << 12)) { | |||
| 2130 | memset((char *)ptr + newsize, 0, d); | |||
| 2131 | return ptr; | |||
| 2132 | } | |||
| 2133 | } | |||
| 2134 | ||||
| 2135 | newptr = malloc(newsize); | |||
| 2136 | if (newptr == NULL((void *)0)) | |||
| 2137 | return NULL((void *)0); | |||
| 2138 | ||||
| 2139 | if (newsize > oldsize) { | |||
| 2140 | memcpy(newptr, ptr, oldsize); | |||
| 2141 | memset((char *)newptr + oldsize, 0, newsize - oldsize); | |||
| 2142 | } else | |||
| 2143 | memcpy(newptr, ptr, newsize); | |||
| 2144 | ||||
| 2145 | explicit_bzero(ptr, oldsize); | |||
| 2146 | free(ptr); | |||
| 2147 | ||||
| 2148 | return newptr; | |||
| 2149 | } | |||
| 2150 | ||||
| 2151 | void * | |||
| 2152 | recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) | |||
| 2153 | { | |||
| 2154 | struct dir_info *d; | |||
| 2155 | size_t oldsize = 0, newsize; | |||
| 2156 | void *r; | |||
| 2157 | int saved_errno = errno(*__errno()); | |||
| 2158 | ||||
| 2159 | if (!moptsmalloc_readonly.mopts.internal_funcs) | |||
| 2160 | return recallocarray_p(ptr, oldnmemb, newnmemb, size); | |||
| 2161 | ||||
| 2162 | PROLOGUE(getpool(), "recallocarray")d = (getpool()); if (d == ((void *)0)) { _malloc_init(0); d = (getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock (d->mutex); } while (0); d->func = "recallocarray"; if ( d->active++) { malloc_recurse(d); return ((void *)0); } | |||
| 2163 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 2164 | ||||
| 2165 | if ((newnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 2166 | newnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / newnmemb < size) { | |||
| 2167 | d->active--; | |||
| 2168 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 2169 | if (moptsmalloc_readonly.mopts.malloc_xmalloc) | |||
| 2170 | wrterror(d, "out of memory"); | |||
| 2171 | errno(*__errno()) = ENOMEM12; | |||
| 2172 | return NULL((void *)0); | |||
| 2173 | } | |||
| 2174 | newsize = newnmemb * size; | |||
| 2175 | ||||
| 2176 | if (ptr != NULL((void *)0)) { | |||
| 2177 | if ((oldnmemb >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4)) || size >= MUL_NO_OVERFLOW(1UL << (sizeof(size_t) * 4))) && | |||
| 2178 | oldnmemb > 0 && SIZE_MAX0xffffffffffffffffUL / oldnmemb < size) { | |||
| 2179 | d->active--; | |||
| 2180 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 2181 | errno(*__errno()) = EINVAL22; | |||
| 2182 | return NULL((void *)0); | |||
| 2183 | } | |||
| 2184 | oldsize = oldnmemb * size; | |||
| 2185 | } | |||
| 2186 | ||||
| 2187 | r = orecallocarray(&d, ptr, oldsize, newsize); | |||
| 2188 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 2189 | return r; | |||
| 2190 | } | |||
| 2191 | DEF_WEAK(recallocarray)__asm__(".weak " "recallocarray" " ; " "recallocarray" " = " "_libc_recallocarray" ); | |||
| 2192 | ||||
| 2193 | static void * | |||
| 2194 | mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) | |||
| 2195 | { | |||
| 2196 | char *p, *q; | |||
| 2197 | ||||
| 2198 | if (alignment < MALLOC_PAGESIZE(1UL << 12) || ((alignment - 1) & alignment) != 0) | |||
| 2199 | wrterror(d, "mapalign bad alignment"); | |||
| 2200 | if (sz != PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1))) | |||
| 2201 | wrterror(d, "mapalign round"); | |||
| 2202 | ||||
| 2203 | /* Allocate sz + alignment bytes of memory, which must include a | |||
| 2204 | * subrange of size bytes that is properly aligned. Unmap the | |||
| 2205 | * other bytes, and then return that subrange. | |||
| 2206 | */ | |||
| 2207 | ||||
| 2208 | /* We need sz + alignment to fit into a size_t. */ | |||
| 2209 | if (alignment > SIZE_MAX0xffffffffffffffffUL - sz) | |||
| 2210 | return MAP_FAILED((void *)-1); | |||
| 2211 | ||||
| 2212 | p = map(d, sz + alignment, zero_fill); | |||
| 2213 | if (p == MAP_FAILED((void *)-1)) | |||
| 2214 | return MAP_FAILED((void *)-1); | |||
| 2215 | q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); | |||
| 2216 | if (q != p) { | |||
| 2217 | if (munmap(p, q - p)) | |||
| 2218 | wrterror(d, "munmap %p", p); | |||
| 2219 | } | |||
| 2220 | if (munmap(q + sz, alignment - (q - p))) | |||
| 2221 | wrterror(d, "munmap %p", q + sz); | |||
| 2222 | STATS_SUB(d->malloc_used, alignment)((d->malloc_used) -= (alignment)); | |||
| 2223 | ||||
| 2224 | return q; | |||
| 2225 | } | |||
| 2226 | ||||
| 2227 | static void * | |||
| 2228 | omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill) | |||
| 2229 | { | |||
| 2230 | size_t psz; | |||
| 2231 | void *p, *caller = NULL((void *)0); | |||
| 2232 | ||||
| 2233 | /* If between half a page and a page, avoid MALLOC_MOVE. */ | |||
| 2234 | if (sz > MALLOC_MAXCHUNK(1 << (12 - 1)) && sz < MALLOC_PAGESIZE(1UL << 12)) | |||
| 2235 | sz = MALLOC_PAGESIZE(1UL << 12); | |||
| 2236 | if (alignment <= MALLOC_PAGESIZE(1UL << 12)) { | |||
| 2237 | size_t pof2; | |||
| 2238 | /* | |||
| 2239 | * max(size, alignment) rounded up to power of 2 is enough | |||
| 2240 | * to assure the requested alignment. Large regions are | |||
| 2241 | * always page aligned. | |||
| 2242 | */ | |||
| 2243 | if (sz < alignment) | |||
| 2244 | sz = alignment; | |||
| 2245 | if (sz < MALLOC_PAGESIZE(1UL << 12)) { | |||
| 2246 | pof2 = MALLOC_MINSIZE(1UL << 4); | |||
| 2247 | while (pof2 < sz) | |||
| 2248 | pof2 <<= 1; | |||
| 2249 | } else | |||
| 2250 | pof2 = sz; | |||
| 2251 | return omalloc(pool, pof2, zero_fill); | |||
| 2252 | } | |||
| 2253 | ||||
| 2254 | if (sz >= SIZE_MAX0xffffffffffffffffUL - moptsmalloc_readonly.mopts.malloc_guard - MALLOC_PAGESIZE(1UL << 12)) { | |||
| 2255 | errno(*__errno()) = ENOMEM12; | |||
| 2256 | return NULL((void *)0); | |||
| 2257 | } | |||
| 2258 | ||||
| 2259 | if (sz < MALLOC_PAGESIZE(1UL << 12)) | |||
| 2260 | sz = MALLOC_PAGESIZE(1UL << 12); | |||
| 2261 | sz += moptsmalloc_readonly.mopts.malloc_guard; | |||
| 2262 | psz = PAGEROUND(sz)(((sz) + (((1UL << 12) - 1))) & ~((1UL << 12) - 1)); | |||
| 2263 | ||||
| 2264 | p = mapalign(pool, alignment, psz, zero_fill); | |||
| 2265 | if (p == MAP_FAILED((void *)-1)) { | |||
| 2266 | errno(*__errno()) = ENOMEM12; | |||
| 2267 | return NULL((void *)0); | |||
| 2268 | } | |||
| 2269 | ||||
| 2270 | #ifdef MALLOC_STATS | |||
| 2271 | if (DO_STATSmalloc_readonly.mopts.malloc_stats) | |||
| 2272 | caller = pool->caller; | |||
| 2273 | #endif | |||
| 2274 | if (insert(pool, p, sz, caller)) { | |||
| 2275 | unmap(pool, p, psz, 0); | |||
| 2276 | errno(*__errno()) = ENOMEM12; | |||
| 2277 | return NULL((void *)0); | |||
| 2278 | } | |||
| 2279 | ||||
| 2280 | if (moptsmalloc_readonly.mopts.malloc_guard) { | |||
| 2281 | if (mprotect((char *)p + psz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 2282 | moptsmalloc_readonly.mopts.malloc_guard, PROT_NONE0x00)) | |||
| 2283 | wrterror(pool, "mprotect"); | |||
| 2284 | STATS_ADD(pool->malloc_guarded, mopts.malloc_guard)((pool->malloc_guarded) += (malloc_readonly.mopts.malloc_guard )); | |||
| 2285 | } | |||
| 2286 | ||||
| 2287 | if (pool->malloc_junk == 2) { | |||
| 2288 | if (zero_fill) | |||
| 2289 | memset((char *)p + sz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 2290 | SOME_JUNK0xdb, psz - sz); | |||
| 2291 | else | |||
| 2292 | memset(p, SOME_JUNK0xdb, psz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 2293 | } else if (moptsmalloc_readonly.mopts.chunk_canaries) | |||
| 2294 | fill_canary(p, sz - moptsmalloc_readonly.mopts.malloc_guard, | |||
| 2295 | psz - moptsmalloc_readonly.mopts.malloc_guard); | |||
| 2296 | ||||
| 2297 | return p; | |||
| 2298 | } | |||
| 2299 | ||||
| 2300 | int | |||
| 2301 | posix_memalign(void **memptr, size_t alignment, size_t size) | |||
| 2302 | { | |||
| 2303 | struct dir_info *d; | |||
| 2304 | int res, saved_errno = errno(*__errno()); | |||
| 2305 | void *r; | |||
| 2306 | ||||
| 2307 | /* Make sure that alignment is a large enough power of 2. */ | |||
| 2308 | if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) | |||
| 2309 | return EINVAL22; | |||
| 2310 | ||||
| 2311 | d = getpool(); | |||
| 2312 | if (d == NULL((void *)0)) { | |||
| 2313 | _malloc_init(0); | |||
| 2314 | d = getpool(); | |||
| 2315 | } | |||
| 2316 | _MALLOC_LOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_lock(d->mutex) ; } while (0); | |||
| 2317 | d->func = "posix_memalign"; | |||
| 2318 | if (d->active++) { | |||
| 2319 | malloc_recurse(d); | |||
| 2320 | goto err; | |||
| 2321 | } | |||
| 2322 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 2323 | r = omemalign(d, alignment, size, 0); | |||
| 2324 | d->active--; | |||
| 2325 | _MALLOC_UNLOCK(d->mutex)do { if (__isthreaded) _thread_cb.tc_malloc_unlock(d->mutex ); } while (0); | |||
| 2326 | if (r == NULL((void *)0)) { | |||
| 2327 | if (moptsmalloc_readonly.mopts.malloc_xmalloc) | |||
| 2328 | wrterror(d, "out of memory"); | |||
| 2329 | goto err; | |||
| 2330 | } | |||
| 2331 | errno(*__errno()) = saved_errno; | |||
| 2332 | *memptr = r; | |||
| 2333 | return 0; | |||
| 2334 | ||||
| 2335 | err: | |||
| 2336 | res = errno(*__errno()); | |||
| 2337 | errno(*__errno()) = saved_errno; | |||
| 2338 | return res; | |||
| 2339 | } | |||
| 2340 | DEF_STRONG(posix_memalign)__asm__(".global " "posix_memalign" " ; " "posix_memalign" " = " "_libc_posix_memalign"); | |||
| 2341 | ||||
| 2342 | void * | |||
| 2343 | aligned_alloc(size_t alignment, size_t size) | |||
| 2344 | { | |||
| 2345 | struct dir_info *d; | |||
| 2346 | int saved_errno = errno(*__errno()); | |||
| 2347 | void *r; | |||
| 2348 | ||||
| 2349 | /* Make sure that alignment is a positive power of 2. */ | |||
| 2350 | if (((alignment - 1) & alignment) != 0 || alignment == 0) { | |||
| 2351 | errno(*__errno()) = EINVAL22; | |||
| 2352 | return NULL((void *)0); | |||
| 2353 | }; | |||
| 2354 | /* Per spec, size should be a multiple of alignment */ | |||
| 2355 | if ((size & (alignment - 1)) != 0) { | |||
| 2356 | errno(*__errno()) = EINVAL22; | |||
| 2357 | return NULL((void *)0); | |||
| 2358 | } | |||
| 2359 | ||||
| 2360 | PROLOGUE(getpool(), "aligned_alloc")d = (getpool()); if (d == ((void *)0)) { _malloc_init(0); d = (getpool()); } do { if (__isthreaded) _thread_cb.tc_malloc_lock (d->mutex); } while (0); d->func = "aligned_alloc"; if ( d->active++) { malloc_recurse(d); return ((void *)0); } | |||
| 2361 | SET_CALLER(d, caller(d))if (malloc_readonly.mopts.malloc_stats) ((d)->caller = (caller (d))); | |||
| 2362 | r = omemalign(d, alignment, size, 0); | |||
| 2363 | EPILOGUE()d->active--; do { if (__isthreaded) _thread_cb.tc_malloc_unlock (d->mutex); } while (0); if (r == ((void *)0) && malloc_readonly .mopts.malloc_xmalloc) wrterror(d, "out of memory"); if (r != ((void *)0)) (*__errno()) = saved_errno; | |||
| 2364 | return r; | |||
| 2365 | } | |||
| 2366 | DEF_STRONG(aligned_alloc)__asm__(".global " "aligned_alloc" " ; " "aligned_alloc" " = " "_libc_aligned_alloc"); | |||
| 2367 | ||||
| 2368 | #ifdef MALLOC_STATS | |||
| 2369 | ||||
| 2370 | static int | |||
| 2371 | btcmp(const struct btnode *e1, const struct btnode *e2) | |||
| 2372 | { | |||
| 2373 | return memcmp(e1->caller, e2->caller, sizeof(e1->caller)); | |||
| 2374 | } | |||
| 2375 | ||||
| 2376 | RBT_GENERATE(btshead, btnode, entry, btcmp)static int btshead_RBT_COMPARE(const void *lptr, const void * rptr) { const struct btnode *l = lptr, *r = rptr; return btcmp (l, r); } static const struct rb_type btshead_RBT_INFO = { btshead_RBT_COMPARE , ((void *)0), __builtin_offsetof(struct btnode, entry), }; const struct rb_type *const btshead_RBT_TYPE = &btshead_RBT_INFO; | |||
| 2377 | ||||
| 2378 | static void* | |||
| 2379 | store_caller(struct dir_info *d, struct btnode *f) | |||
| 2380 | { | |||
| 2381 | struct btnode *p; | |||
| 2382 | ||||
| 2383 | if (DO_STATSmalloc_readonly.mopts.malloc_stats == 0 || d->btnodes == MAP_FAILED((void *)-1)) | |||
| 2384 | return NULL((void *)0); | |||
| 2385 | ||||
| 2386 | p = RBT_FIND(btshead, &d->btraces, f)btshead_RBT_FIND(&d->btraces, f); | |||
| 2387 | if (p != NULL((void *)0)) | |||
| 2388 | return p; | |||
| 2389 | if (d->btnodes == NULL((void *)0) || | |||
| 2390 | d->btnodesused >= MALLOC_PAGESIZE(1UL << 12) / sizeof(struct btnode)) { | |||
| 2391 | d->btnodes = map(d, MALLOC_PAGESIZE(1UL << 12), 0); | |||
| 2392 | if (d->btnodes == MAP_FAILED((void *)-1)) | |||
| 2393 | return NULL((void *)0); | |||
| 2394 | d->btnodesused = 0; | |||
| 2395 | } | |||
| 2396 | p = &d->btnodes[d->btnodesused++]; | |||
| 2397 | memcpy(p->caller, f->caller, sizeof(p->caller[0]) * DO_STATSmalloc_readonly.mopts.malloc_stats); | |||
| 2398 | RBT_INSERT(btshead, &d->btraces, p)btshead_RBT_INSERT(&d->btraces, p); | |||
| 2399 | return p; | |||
| 2400 | } | |||
| 2401 | ||||
| 2402 | static void fabstorel(const void *, char *, size_t); | |||
| 2403 | ||||
| 2404 | static void | |||
| 2405 | print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t index) | |||
| 2406 | { | |||
| 2407 | struct region_info *r; | |||
| 2408 | struct chunk_info *chunkinfo; | |||
| 2409 | struct btnode* btnode; | |||
| 2410 | uint32_t chunknum; | |||
| 2411 | int frame; | |||
| 2412 | char buf1[128]; | |||
| 2413 | char buf2[128]; | |||
| 2414 | const char *msg = ""; | |||
| 2415 | ||||
| 2416 | r = find(pool, p); | |||
| 2417 | chunkinfo = (struct chunk_info *)r->size; | |||
| 2418 | chunknum = find_chunknum(pool, chunkinfo, p, 0); | |||
| 2419 | btnode = (struct btnode *)r->f[chunknum]; | |||
| 2420 | frame = DO_STATSmalloc_readonly.mopts.malloc_stats - 1; | |||
| 2421 | if (btnode != NULL((void *)0)) | |||
| 2422 | fabstorel(btnode->caller[frame], buf1, sizeof(buf1)); | |||
| 2423 | strlcpy(buf2, ". 0x0", sizeof(buf2)); | |||
| 2424 | if (chunknum > 0) { | |||
| 2425 | chunknum--; | |||
| 2426 | btnode = (struct btnode *)r->f[chunknum]; | |||
| 2427 | if (btnode != NULL((void *)0)) | |||
| 2428 | fabstorel(btnode->caller[frame], buf2, sizeof(buf2)); | |||
| 2429 | if (CHUNK_FREE(chunkinfo, chunknum)((chunkinfo)->bits[(chunknum) / (8 * sizeof(u_short))] & (1U << ((chunknum) % (8 * sizeof(u_short)))))) | |||
| 2430 | msg = " (now free)"; | |||
| 2431 | } | |||
| 2432 | ||||
| 2433 | wrterror(pool, | |||
| 2434 | "write to free chunk %p[%zu..%zu]@%zu allocated at %s " | |||
| 2435 | "(preceding chunk %p allocated at %s%s)", | |||
| 2436 | p, index * sizeof(uint64_t), (index + 1) * sizeof(uint64_t) - 1, | |||
| 2437 | sz, buf1, p - sz, buf2, msg); | |||
| 2438 | } | |||
| 2439 | ||||
| 2440 | static void | |||
| 2441 | ulog(const char *format, ...) | |||
| 2442 | { | |||
| 2443 | va_list ap; | |||
| 2444 | static char* buf; | |||
| 2445 | static size_t filled; | |||
| 2446 | int len; | |||
| 2447 | ||||
| 2448 | if (buf == NULL((void *)0)) | |||
| 2449 | buf = MMAP(KTR_USER_MAXLEN, 0)mmap(((void *)0), (2048), 0x01 | 0x02, 0x1000 | 0x0002 | (0), -1, 0); | |||
| 2450 | if (buf == MAP_FAILED((void *)-1)) | |||
| 2451 | return; | |||
| 2452 | ||||
| 2453 | va_start(ap, format)__builtin_va_start((ap), format); | |||
| 2454 | len = vsnprintf(buf + filled, KTR_USER_MAXLEN2048 - filled, format, ap); | |||
| 2455 | va_end(ap)__builtin_va_end((ap)); | |||
| 2456 | if (len < 0) | |||
| 2457 | return; | |||
| 2458 | if ((size_t)len > KTR_USER_MAXLEN2048 - filled) | |||
| 2459 | len = KTR_USER_MAXLEN2048 - filled; | |||
| 2460 | filled += len; | |||
| 2461 | if (filled > 0) { | |||
| 2462 | if (filled == KTR_USER_MAXLEN2048 || buf[filled - 1] == '\n') { | |||
| 2463 | utrace("malloc", buf, filled); | |||
| 2464 | filled = 0; | |||
| 2465 | } | |||
| 2466 | } | |||
| 2467 | } | |||
| 2468 | ||||
| 2469 | struct malloc_leak { | |||
| 2470 | void *f; | |||
| 2471 | size_t total_size; | |||
| 2472 | int count; | |||
| 2473 | }; | |||
| 2474 | ||||
| 2475 | struct leaknode { | |||
| 2476 | RBT_ENTRY(leaknode)struct rb_entry entry; | |||
| 2477 | struct malloc_leak d; | |||
| 2478 | }; | |||
| 2479 | ||||
| 2480 | static inline int | |||
| 2481 | leakcmp(const struct leaknode *e1, const struct leaknode *e2) | |||
| 2482 | { | |||
| 2483 | return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; | |||
| 2484 | } | |||
| 2485 | ||||
| 2486 | RBT_HEAD(leaktree, leaknode)struct leaktree { struct rb_tree rbh_root; }; | |||
| 2487 | RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp)extern const struct rb_type *const leaktree_RBT_TYPE; __attribute__ ((__unused__)) static inline void leaktree_RBT_INIT(struct leaktree *head) { _rb_init(&head->rbh_root); } __attribute__(( __unused__)) static inline struct leaknode * leaktree_RBT_INSERT (struct leaktree *head, struct leaknode *elm) { return _rb_insert (leaktree_RBT_TYPE, &head->rbh_root, elm); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_REMOVE (struct leaktree *head, struct leaknode *elm) { return _rb_remove (leaktree_RBT_TYPE, &head->rbh_root, elm); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_FIND (struct leaktree *head, const struct leaknode *key) { return _rb_find (leaktree_RBT_TYPE, &head->rbh_root, key); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_NFIND (struct leaktree *head, const struct leaknode *key) { return _rb_nfind (leaktree_RBT_TYPE, &head->rbh_root, key); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_ROOT (struct leaktree *head) { return _rb_root(leaktree_RBT_TYPE, & head->rbh_root); } __attribute__((__unused__)) static inline int leaktree_RBT_EMPTY(struct leaktree *head) { return _rb_empty (&head->rbh_root); } __attribute__((__unused__)) static inline struct leaknode * leaktree_RBT_MIN(struct leaktree *head ) { return _rb_min(leaktree_RBT_TYPE, &head->rbh_root) ; } __attribute__((__unused__)) static inline struct leaknode * leaktree_RBT_MAX(struct leaktree *head) { return _rb_max(leaktree_RBT_TYPE , &head->rbh_root); } __attribute__((__unused__)) static inline struct leaknode * leaktree_RBT_NEXT(struct leaknode * elm) { return _rb_next(leaktree_RBT_TYPE, elm); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_PREV (struct leaknode *elm) { return _rb_prev(leaktree_RBT_TYPE, elm ); } __attribute__((__unused__)) static inline struct leaknode * leaktree_RBT_LEFT(struct leaknode *elm) { return _rb_left( leaktree_RBT_TYPE, elm); } __attribute__((__unused__)) static inline struct leaknode * leaktree_RBT_RIGHT(struct leaknode * elm) { return _rb_right(leaktree_RBT_TYPE, elm); } __attribute__ ((__unused__)) static inline struct leaknode * leaktree_RBT_PARENT (struct leaknode *elm) { return _rb_parent(leaktree_RBT_TYPE, elm); } __attribute__((__unused__)) static inline void leaktree_RBT_SET_LEFT (struct leaknode *elm, struct leaknode *left) { _rb_set_left( leaktree_RBT_TYPE, elm, left); } __attribute__((__unused__)) static inline void leaktree_RBT_SET_RIGHT(struct leaknode *elm, struct leaknode *right) { _rb_set_right(leaktree_RBT_TYPE, elm, right ); } __attribute__((__unused__)) static inline void leaktree_RBT_SET_PARENT (struct leaknode *elm, struct leaknode *parent) { _rb_set_parent (leaktree_RBT_TYPE, elm, parent); } __attribute__((__unused__ )) static inline void leaktree_RBT_POISON(struct leaknode *elm , unsigned long poison) { _rb_poison(leaktree_RBT_TYPE, elm, poison ); } __attribute__((__unused__)) static inline int leaktree_RBT_CHECK (struct leaknode *elm, unsigned long poison) { return _rb_check (leaktree_RBT_TYPE, elm, poison); }; | |||
| 2488 | RBT_GENERATE(leaktree, leaknode, entry, leakcmp)static int leaktree_RBT_COMPARE(const void *lptr, const void * rptr) { const struct leaknode *l = lptr, *r = rptr; return leakcmp (l, r); } static const struct rb_type leaktree_RBT_INFO = { leaktree_RBT_COMPARE , ((void *)0), __builtin_offsetof(struct leaknode, entry), }; const struct rb_type *const leaktree_RBT_TYPE = &leaktree_RBT_INFO; | |||
| 2489 | ||||
| 2490 | static void | |||
| 2491 | wrtwarning(const char *func, char *msg, ...) | |||
| 2492 | { | |||
| 2493 | int saved_errno = errno(*__errno()); | |||
| 2494 | va_list ap; | |||
| 2495 | ||||
| 2496 | dprintf(STDERR_FILENO2, "%s(%d) in %s(): ", __progname, | |||
| 2497 | getpid(), func != NULL((void *)0) ? func : "unknown"); | |||
| 2498 | va_start(ap, msg)__builtin_va_start((ap), msg); | |||
| 2499 | vdprintf(STDERR_FILENO2, msg, ap); | |||
| 2500 | va_end(ap)__builtin_va_end((ap)); | |||
| 2501 | dprintf(STDERR_FILENO2, "\n"); | |||
| 2502 | ||||
| 2503 | errno(*__errno()) = saved_errno; | |||
| 2504 | } | |||
| 2505 | ||||
| 2506 | static void | |||
| 2507 | putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt) | |||
| 2508 | { | |||
| 2509 | struct leaknode key, *p; | |||
| 2510 | static struct leaknode *page; | |||
| 2511 | static unsigned int used; | |||
| 2512 | ||||
| 2513 | if (cnt == 0 || page == MAP_FAILED((void *)-1)) | |||
| 2514 | return; | |||
| 2515 | ||||
| 2516 | key.d.f = f; | |||
| 2517 | p = RBT_FIND(leaktree, leaks, &key)leaktree_RBT_FIND(leaks, &key); | |||
| 2518 | if (p == NULL((void *)0)) { | |||
| 2519 | if (page == NULL((void *)0) || | |||
| 2520 | used >= MALLOC_PAGESIZE(1UL << 12) / sizeof(struct leaknode)) { | |||
| 2521 | page = MMAP(MALLOC_PAGESIZE, 0)mmap(((void *)0), ((1UL << 12)), 0x01 | 0x02, 0x1000 | 0x0002 | (0), -1, 0); | |||
| 2522 | if (page == MAP_FAILED((void *)-1)) { | |||
| 2523 | wrtwarning(__func__, strerror(errno(*__errno()))); | |||
| 2524 | return; | |||
| 2525 | } | |||
| 2526 | used = 0; | |||
| 2527 | } | |||
| 2528 | p = &page[used++]; | |||
| 2529 | p->d.f = f; | |||
| 2530 | p->d.total_size = sz * cnt; | |||
| 2531 | p->d.count = cnt; | |||
| 2532 | RBT_INSERT(leaktree, leaks, p)leaktree_RBT_INSERT(leaks, p); | |||
| 2533 | } else { | |||
| 2534 | p->d.total_size += sz * cnt; | |||
| 2535 | p->d.count += cnt; | |||
| 2536 | } | |||
| 2537 | } | |||
| 2538 | ||||
| 2539 | static void | |||
| 2540 | fabstorel(const void *f, char *buf, size_t size) | |||
| 2541 | { | |||
| 2542 | Dl_info info; | |||
| 2543 | const char *object = "."; | |||
| 2544 | const char *caller; | |||
| 2545 | ||||
| 2546 | caller = f; | |||
| 2547 | if (caller != NULL((void *)0) && dladdr(f, &info) != 0) { | |||
| 2548 | caller -= (uintptr_t)info.dli_fbase; | |||
| 2549 | object = info.dli_fname; | |||
| 2550 | } | |||
| 2551 | snprintf(buf, size, "%s %p", object, caller); | |||
| 2552 | } | |||
| 2553 | ||||
| 2554 | static void | |||
| 2555 | dump_leak(struct leaknode *p) | |||
| 2556 | { | |||
| 2557 | int i; | |||
| 2558 | char buf[128]; | |||
| 2559 | ||||
| 2560 | if (p->d.f == NULL((void *)0)) { | |||
| 2561 | fabstorel(NULL((void *)0), buf, sizeof(buf)); | |||
| 2562 | ulog("%18p %7zu %6u %6zu addr2line -e %s\n", | |||
| 2563 | p->d.f, p->d.total_size, p->d.count, | |||
| 2564 | p->d.total_size / p->d.count, buf); | |||
| 2565 | return; | |||
| 2566 | } | |||
| 2567 | ||||
| 2568 | for (i = 0; i < DO_STATSmalloc_readonly.mopts.malloc_stats; i++) { | |||
| 2569 | const char *abscaller; | |||
| 2570 | ||||
| 2571 | abscaller = ((struct btnode*)p->d.f)->caller[i]; | |||
| 2572 | if (abscaller == NULL((void *)0)) | |||
| 2573 | break; | |||
| 2574 | fabstorel(abscaller, buf, sizeof(buf)); | |||
| 2575 | if (i == 0) | |||
| 2576 | ulog("%18p %7zu %6u %6zu addr2line -e %s\n", | |||
| 2577 | abscaller, p->d.total_size, p->d.count, | |||
| 2578 | p->d.total_size / p->d.count, buf); | |||
| 2579 | else | |||
| 2580 | ulog("%*p %*s %6s %6s addr2line -e %s\n", | |||
| 2581 | i + 18, abscaller, 7 - i, "-", "-", "-", buf); | |||
| 2582 | } | |||
| 2583 | } | |||
| 2584 | ||||
| 2585 | static void | |||
| 2586 | dump_leaks(struct leaktree *leaks) | |||
| 2587 | { | |||
| 2588 | struct leaknode *p; | |||
| 2589 | ||||
| 2590 | ulog("Leak report:\n"); | |||
| 2591 | ulog(" f sum # avg\n"); | |||
| 2592 | ||||
| 2593 | RBT_FOREACH(p, leaktree, leaks)for ((p) = leaktree_RBT_MIN((leaks)); (p) != ((void *)0); (p) = leaktree_RBT_NEXT((p))) | |||
| 2594 | dump_leak(p); | |||
| 2595 | } | |||
| 2596 | ||||
| 2597 | static void | |||
| 2598 | dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f, | |||
| 2599 | int fromfreelist) | |||
| 2600 | { | |||
| 2601 | while (p != NULL((void *)0)) { | |||
| 2602 | if (moptsmalloc_readonly.mopts.malloc_verbose) | |||
| 2603 | ulog("chunk %18p %18p %4zu %d/%d\n", | |||
| 2604 | p->page, NULL((void *)0), | |||
| 2605 | B2SIZE(p->bucket)((p->bucket) * (1UL << 4)), p->free, p->total); | |||
| 2606 | if (!fromfreelist) { | |||
| 2607 | size_t i, sz = B2SIZE(p->bucket)((p->bucket) * (1UL << 4)); | |||
| 2608 | for (i = 0; i < p->total; i++) { | |||
| 2609 | if (!CHUNK_FREE(p, i)((p)->bits[(i) / (8 * sizeof(u_short))] & (1U << ((i) % (8 * sizeof(u_short)))))) | |||
| 2610 | putleakinfo(leaks, f[i], sz, 1); | |||
| 2611 | } | |||
| 2612 | break; | |||
| 2613 | } | |||
| 2614 | p = LIST_NEXT(p, entries)((p)->entries.le_next); | |||
| 2615 | if (moptsmalloc_readonly.mopts.malloc_verbose && p != NULL((void *)0)) | |||
| 2616 | ulog(" ->"); | |||
| 2617 | } | |||
| 2618 | } | |||
| 2619 | ||||
| 2620 | static void | |||
| 2621 | dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks) | |||
| 2622 | { | |||
| 2623 | u_int i, j, count; | |||
| 2624 | struct chunk_info *p; | |||
| 2625 | ||||
| 2626 | ulog("Free chunk structs:\n"); | |||
| 2627 | ulog("Bkt) #CI page" | |||
| 2628 | " f size free/n\n"); | |||
| 2629 | for (i = 0; i <= BUCKETS((1 << (12 - 1)) / (1UL << 4)); i++) { | |||
| 2630 | count = 0; | |||
| 2631 | LIST_FOREACH(p, &d->chunk_info_list[i], entries)for((p) = ((&d->chunk_info_list[i])->lh_first); (p) != ((void *)0); (p) = ((p)->entries.le_next)) | |||
| 2632 | count++; | |||
| 2633 | for (j = 0; j < MALLOC_CHUNK_LISTS4; j++) { | |||
| 2634 | p = LIST_FIRST(&d->chunk_dir[i][j])((&d->chunk_dir[i][j])->lh_first); | |||
| 2635 | if (p == NULL((void *)0) && count == 0) | |||
| 2636 | continue; | |||
| 2637 | if (j == 0) | |||
| 2638 | ulog("%3d) %3d ", i, count); | |||
| 2639 | else | |||
| 2640 | ulog(" "); | |||
| 2641 | if (p != NULL((void *)0)) | |||
| 2642 | dump_chunk(leaks, p, NULL((void *)0), 1); | |||
| 2643 | else | |||
| 2644 | ulog(".\n"); | |||
| 2645 | } | |||
| 2646 | } | |||
| 2647 | ||||
| 2648 | } | |||
| 2649 | ||||
| 2650 | static void | |||
| 2651 | dump_free_page_info(struct dir_info *d) | |||
| 2652 | { | |||
| 2653 | struct smallcache *cache; | |||
| 2654 | size_t i, total = 0; | |||
| 2655 | ||||
| 2656 | ulog("Cached in small cache:\n"); | |||
| 2657 | for (i = 0; i < MAX_SMALLCACHEABLE_SIZE32; i++) { | |||
| 2658 | cache = &d->smallcache[i]; | |||
| 2659 | if (cache->length != 0) | |||
| 2660 | ulog("%zu(%u): %u = %zu\n", i + 1, cache->max, | |||
| 2661 | cache->length, cache->length * (i + 1)); | |||
| 2662 | total += cache->length * (i + 1); | |||
| 2663 | } | |||
| 2664 | ||||
| 2665 | ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used, | |||
| 2666 | d->bigcache_size); | |||
| 2667 | for (i = 0; i < d->bigcache_size; i++) { | |||
| 2668 | if (d->bigcache[i].psize != 0) | |||
| 2669 | ulog("%zu: %zu\n", i, d->bigcache[i].psize); | |||
| 2670 | total += d->bigcache[i].psize; | |||
| 2671 | } | |||
| 2672 | ulog("Free pages cached: %zu\n", total); | |||
| 2673 | } | |||
| 2674 | ||||
| 2675 | static void | |||
| 2676 | malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks) | |||
| 2677 | { | |||
| 2678 | size_t i, realsize; | |||
| 2679 | ||||
| 2680 | if (moptsmalloc_readonly.mopts.malloc_verbose) { | |||
| 2681 | ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d); | |||
| 2682 | ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk, | |||
| 2683 | d->mmap_flag); | |||
| 2684 | ulog("Region slots free %zu/%zu\n", | |||
| 2685 | d->regions_free, d->regions_total); | |||
| 2686 | ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions); | |||
| 2687 | ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves); | |||
| 2688 | ulog("Cheap reallocs %zu/%zu\n", | |||
| 2689 | d->cheap_reallocs, d->cheap_realloc_tries); | |||
| 2690 | ulog("In use %zu\n", d->malloc_used); | |||
| 2691 | ulog("Guarded %zu\n", d->malloc_guarded); | |||
| 2692 | dump_free_chunk_info(d, leaks); | |||
| 2693 | dump_free_page_info(d); | |||
| 2694 | ulog("Hash table:\n"); | |||
| 2695 | ulog("slot) hash d type page " | |||
| 2696 | "f size [free/n]\n"); | |||
| 2697 | } | |||
| 2698 | for (i = 0; i < d->regions_total; i++) { | |||
| 2699 | if (d->r[i].p != NULL((void *)0)) { | |||
| 2700 | size_t h = hash(d->r[i].p) & | |||
| 2701 | (d->regions_total - 1); | |||
| 2702 | if (moptsmalloc_readonly.mopts.malloc_verbose) | |||
| 2703 | ulog("%4zx) #%4zx %zd ", | |||
| 2704 | i, h, h - i); | |||
| 2705 | REALSIZE(realsize, &d->r[i])(realsize) = (uintptr_t)(&d->r[i])->p & ((1UL << 12) - 1), (realsize) = ((realsize) == 0 ? (&d->r[i])-> size : (((realsize) - 1) * (1UL << 4))); | |||
| 2706 | if (realsize > MALLOC_MAXCHUNK(1 << (12 - 1))) { | |||
| 2707 | putleakinfo(leaks, d->r[i].f, realsize, 1); | |||
| 2708 | if (moptsmalloc_readonly.mopts.malloc_verbose) | |||
| 2709 | ulog("pages %18p %18p %zu\n", d->r[i].p, | |||
| 2710 | d->r[i].f, realsize); | |||
| 2711 | } else | |||
| 2712 | dump_chunk(leaks, | |||
| 2713 | (struct chunk_info *)d->r[i].size, | |||
| 2714 | d->r[i].f, 0); | |||
| 2715 | } | |||
| 2716 | } | |||
| 2717 | if (moptsmalloc_readonly.mopts.malloc_verbose) | |||
| 2718 | ulog("\n"); | |||
| 2719 | } | |||
| 2720 | ||||
| 2721 | static void | |||
| 2722 | malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks) | |||
| 2723 | { | |||
| 2724 | int i; | |||
| 2725 | void *p; | |||
| 2726 | struct region_info *r; | |||
| 2727 | ||||
| 2728 | if (pool == NULL((void *)0) || pool->r == NULL((void *)0)) | |||
| 2729 | return; | |||
| 2730 | for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK15 + 1; i++) { | |||
| 2731 | p = pool->delayed_chunks[i]; | |||
| 2732 | if (p == NULL((void *)0)) | |||
| 2733 | continue; | |||
| 2734 | r = find(pool, p); | |||
| 2735 | if (r == NULL((void *)0)) | |||
| 2736 | wrterror(pool, "bogus pointer in malloc_dump %p", p); | |||
| 2737 | free_bytes(pool, r, p); | |||
| 2738 | pool->delayed_chunks[i] = NULL((void *)0); | |||
| 2739 | } | |||
| 2740 | malloc_dump1(poolno, pool, leaks); | |||
| 2741 | } | |||
| 2742 | ||||
| 2743 | void | |||
| 2744 | malloc_dump(void) | |||
| 2745 | { | |||
| 2746 | u_int i; | |||
| 2747 | int saved_errno = errno(*__errno()); | |||
| 2748 | ||||
| 2749 | /* XXX leak when run multiple times */ | |||
| 2750 | struct leaktree leaks = RBT_INITIALIZER(&leaks){ { ((void *)0) } }; | |||
| 2751 | ||||
| 2752 | for (i = 0; i < moptsmalloc_readonly.mopts.malloc_mutexes; i++) | |||
| 2753 | malloc_dump0(i, moptsmalloc_readonly.mopts.malloc_pool[i], &leaks); | |||
| 2754 | ||||
| 2755 | dump_leaks(&leaks); | |||
| 2756 | ulog("\n"); | |||
| 2757 | errno(*__errno()) = saved_errno; | |||
| 2758 | } | |||
| 2759 | DEF_WEAK(malloc_dump)__asm__(".weak " "malloc_dump" " ; " "malloc_dump" " = " "_libc_malloc_dump" ); | |||
| 2760 | ||||
| 2761 | static void | |||
| 2762 | malloc_exit(void) | |||
| 2763 | { | |||
| 2764 | int save_errno = errno(*__errno()); | |||
| 2765 | ||||
| 2766 | ulog("******** Start dump %s *******\n", __progname); | |||
| 2767 | ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u " | |||
| 2768 | "G=%zu\n", | |||
| 2769 | moptsmalloc_readonly.mopts.malloc_mutexes, | |||
| 2770 | moptsmalloc_readonly.mopts.internal_funcs, moptsmalloc_readonly.mopts.malloc_freecheck, | |||
| 2771 | moptsmalloc_readonly.mopts.malloc_freeunmap, moptsmalloc_readonly.mopts.def_malloc_junk, | |||
| 2772 | moptsmalloc_readonly.mopts.malloc_realloc, moptsmalloc_readonly.mopts.malloc_xmalloc, | |||
| 2773 | moptsmalloc_readonly.mopts.chunk_canaries, moptsmalloc_readonly.mopts.def_maxcache, | |||
| 2774 | moptsmalloc_readonly.mopts.malloc_guard); | |||
| 2775 | ||||
| 2776 | malloc_dump(); | |||
| 2777 | ulog("******** End dump %s *******\n", __progname); | |||
| 2778 | errno(*__errno()) = save_errno; | |||
| 2779 | } | |||
| 2780 | ||||
| 2781 | #endif /* MALLOC_STATS */ |