25 #include "SDL_stdinc.h"
27 #if defined(HAVE_MALLOC)
36 return calloc(nmemb, size);
51 #define LACKS_SYS_TYPES_H
53 #define LACKS_STRINGS_H
54 #define LACKS_STRING_H
55 #define LACKS_STDLIB_H
504 #define WIN32_LEAN_AND_MEAN
507 #define HAVE_MORECORE 0
508 #define LACKS_UNISTD_H
509 #define LACKS_SYS_PARAM_H
510 #define LACKS_SYS_MMAN_H
511 #define LACKS_STRING_H
512 #define LACKS_STRINGS_H
513 #define LACKS_SYS_TYPES_H
514 #define LACKS_ERRNO_H
515 #define LACKS_FCNTL_H
516 #define MALLOC_FAILURE_ACTION
517 #define MMAP_CLEARS 0
520 #if defined(DARWIN) || defined(_DARWIN)
522 #ifndef HAVE_MORECORE
523 #define HAVE_MORECORE 0
528 #ifndef LACKS_SYS_TYPES_H
533 #define MAX_SIZE_T (~(size_t)0)
536 #define ONLY_MSPACES 0
545 #ifndef MALLOC_ALIGNMENT
546 #define MALLOC_ALIGNMENT ((size_t)8U)
552 #define ABORT abort()
554 #ifndef ABORT_ON_ASSERT_FAILURE
555 #define ABORT_ON_ASSERT_FAILURE 1
557 #ifndef PROCEED_ON_ERROR
558 #define PROCEED_ON_ERROR 0
570 #define MMAP_CLEARS 1
574 #define HAVE_MREMAP 1
576 #define HAVE_MREMAP 0
579 #ifndef MALLOC_FAILURE_ACTION
580 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
582 #ifndef HAVE_MORECORE
584 #define HAVE_MORECORE 0
586 #define HAVE_MORECORE 1
590 #define MORECORE_CONTIGUOUS 0
593 #define MORECORE sbrk
595 #ifndef MORECORE_CONTIGUOUS
596 #define MORECORE_CONTIGUOUS 1
599 #ifndef DEFAULT_GRANULARITY
600 #if MORECORE_CONTIGUOUS
601 #define DEFAULT_GRANULARITY (0)
603 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
606 #ifndef DEFAULT_TRIM_THRESHOLD
607 #ifndef MORECORE_CANNOT_TRIM
608 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
610 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
613 #ifndef DEFAULT_MMAP_THRESHOLD
615 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
617 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
620 #ifndef USE_BUILTIN_FFS
621 #define USE_BUILTIN_FFS 0
623 #ifndef USE_DEV_RANDOM
624 #define USE_DEV_RANDOM 0
627 #define NO_MALLINFO 0
629 #ifndef MALLINFO_FIELD_TYPE
630 #define MALLINFO_FIELD_TYPE size_t
633 #define memset SDL_memset
634 #define memcpy SDL_memcpy
635 #define malloc SDL_malloc
636 #define calloc SDL_calloc
637 #define realloc SDL_realloc
638 #define free SDL_free
647 #define M_TRIM_THRESHOLD (-1)
648 #define M_GRANULARITY (-2)
649 #define M_MMAP_THRESHOLD (-3)
678 #ifdef HAVE_USR_INCLUDE_MALLOC_H
679 #include "/usr/include/malloc.h"
708 #ifndef USE_DL_PREFIX
709 #define dlcalloc calloc
711 #define dlmalloc malloc
712 #define dlmemalign memalign
713 #define dlrealloc realloc
714 #define dlvalloc valloc
715 #define dlpvalloc pvalloc
716 #define dlmallinfo mallinfo
717 #define dlmallopt mallopt
718 #define dlmalloc_trim malloc_trim
719 #define dlmalloc_stats malloc_stats
720 #define dlmalloc_usable_size malloc_usable_size
721 #define dlmalloc_footprint malloc_footprint
722 #define dlmalloc_max_footprint malloc_max_footprint
723 #define dlindependent_calloc independent_calloc
724 #define dlindependent_comalloc independent_comalloc
1067 typedef void *mspace;
1080 mspace create_mspace(
size_t capacity,
int locked);
1088 size_t destroy_mspace(mspace msp);
1099 mspace create_mspace_with_base(
void *base,
size_t capacity,
int locked);
1105 void *mspace_malloc(mspace msp,
size_t bytes);
1115 void mspace_free(mspace msp,
void *mem);
1126 void *mspace_realloc(mspace msp,
void *mem,
size_t newsize);
1132 void *mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1138 void *mspace_memalign(mspace msp,
size_t alignment,
size_t bytes);
1144 void **mspace_independent_calloc(mspace msp,
size_t n_elements,
1145 size_t elem_size,
void *chunks[]);
1151 void **mspace_independent_comalloc(mspace msp,
size_t n_elements,
1152 size_t sizes[],
void *chunks[]);
1158 size_t mspace_footprint(mspace msp);
1164 size_t mspace_max_footprint(mspace msp);
1172 struct mallinfo mspace_mallinfo(mspace msp);
1179 void mspace_malloc_stats(mspace msp);
1185 int mspace_trim(mspace msp,
size_t pad);
1190 int mspace_mallopt(
int,
int);
1211 #pragma warning( disable : 4146 )
1214 #ifndef LACKS_STDIO_H
1218 #ifndef LACKS_ERRNO_H
1224 #ifndef LACKS_STDLIB_H
1228 #if ABORT_ON_ASSERT_FAILURE
1229 #define assert(x) if(!(x)) ABORT
1236 #ifndef LACKS_STRING_H
1240 #ifndef LACKS_STRINGS_H
1241 #include <strings.h>
1245 #ifndef LACKS_SYS_MMAN_H
1246 #include <sys/mman.h>
1248 #ifndef LACKS_FCNTL_H
1253 #ifndef LACKS_UNISTD_H
1256 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1257 extern void *sbrk(ptrdiff_t);
1263 #ifndef malloc_getpagesize
1264 # ifdef _SC_PAGESIZE
1265 # ifndef _SC_PAGE_SIZE
1266 # define _SC_PAGE_SIZE _SC_PAGESIZE
1269 # ifdef _SC_PAGE_SIZE
1270 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1272 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1273 extern size_t getpagesize();
1274 # define malloc_getpagesize getpagesize()
1277 # define malloc_getpagesize getpagesize()
1279 # ifndef LACKS_SYS_PARAM_H
1280 # include <sys/param.h>
1282 # ifdef EXEC_PAGESIZE
1283 # define malloc_getpagesize EXEC_PAGESIZE
1287 # define malloc_getpagesize NBPG
1289 # define malloc_getpagesize (NBPG * CLSIZE)
1293 # define malloc_getpagesize NBPC
1296 # define malloc_getpagesize PAGESIZE
1298 # define malloc_getpagesize ((size_t)4096U)
1312 #define SIZE_T_SIZE (sizeof(size_t))
1313 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1317 #define SIZE_T_ZERO ((size_t)0)
1318 #define SIZE_T_ONE ((size_t)1)
1319 #define SIZE_T_TWO ((size_t)2)
1320 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1321 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1322 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1323 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1326 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1329 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1332 #define align_offset(A)\
1333 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1334 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1346 #define MFAIL ((void*)(MAX_SIZE_T))
1347 #define CMFAIL ((char*)(MFAIL))
1350 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1351 #define USE_MMAP_BIT (SIZE_T_ZERO)
1352 #define CALL_MMAP(s) MFAIL
1353 #define CALL_MUNMAP(a, s) (-1)
1354 #define DIRECT_MMAP(s) MFAIL
1357 #define IS_MMAPPED_BIT (SIZE_T_ONE)
1358 #define USE_MMAP_BIT (SIZE_T_ONE)
1361 #define CALL_MUNMAP(a, s) munmap((a), (s))
1362 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1363 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1364 #define MAP_ANONYMOUS MAP_ANON
1366 #ifdef MAP_ANONYMOUS
1367 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1368 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1374 #define MMAP_FLAGS (MAP_PRIVATE)
1376 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1377 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1378 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1379 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1382 #define DIRECT_MMAP(s) CALL_MMAP(s)
1387 win32mmap(
size_t size)
1390 VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1391 return (ptr != 0) ? ptr :
MFAIL;
1396 win32direct_mmap(
size_t size)
1398 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1400 return (ptr != 0) ? ptr :
MFAIL;
1405 win32munmap(
void *ptr,
size_t size)
1407 MEMORY_BASIC_INFORMATION minfo;
1410 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1412 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1413 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1415 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1417 cptr += minfo.RegionSize;
1418 size -= minfo.RegionSize;
1423 #define CALL_MMAP(s) win32mmap(s)
1424 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1425 #define DIRECT_MMAP(s) win32direct_mmap(s)
1429 #if HAVE_MMAP && HAVE_MREMAP
1430 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1432 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1436 #define CALL_MORECORE(S) MORECORE(S)
1438 #define CALL_MORECORE(S) MFAIL
1442 #define USE_NONCONTIGUOUS_BIT (4U)
1445 #define EXTERN_BIT (8U)
1468 #include <pthread.h>
1469 #define MLOCK_T pthread_mutex_t
1470 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1471 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1472 #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1475 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1478 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1486 #define MLOCK_T long
1488 win32_acquire_lock(MLOCK_T * sl)
1491 #ifdef InterlockedCompareExchangePointer
1492 if (!InterlockedCompareExchange(sl, 1, 0))
1495 if (!InterlockedCompareExchange((
void **) sl, (
void *) 1, (
void *) 0))
1503 win32_release_lock(MLOCK_T * sl)
1505 InterlockedExchange(sl, 0);
1508 #define INITIAL_LOCK(l) *(l)=0
1509 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1510 #define RELEASE_LOCK(l) win32_release_lock(l)
1512 static MLOCK_T morecore_mutex;
1514 static MLOCK_T magic_init_mutex;
1517 #define USE_LOCK_BIT (2U)
1519 #define USE_LOCK_BIT (0U)
1520 #define INITIAL_LOCK(l)
1523 #if USE_LOCKS && HAVE_MORECORE
1524 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1525 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1527 #define ACQUIRE_MORECORE_LOCK()
1528 #define RELEASE_MORECORE_LOCK()
1532 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1533 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1535 #define ACQUIRE_MAGIC_INIT_LOCK()
1536 #define RELEASE_MAGIC_INIT_LOCK()
1681 struct malloc_chunk *fd;
1682 struct malloc_chunk *bk;
1694 #define MCHUNK_SIZE (sizeof(mchunk))
1697 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1699 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
1703 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1705 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1708 #define MIN_CHUNK_SIZE\
1709 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1712 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1713 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1715 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1718 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1719 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1722 #define pad_request(req) \
1723 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1726 #define request2size(req) \
1727 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1740 #define PINUSE_BIT (SIZE_T_ONE)
1741 #define CINUSE_BIT (SIZE_T_TWO)
1742 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1745 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1748 #define cinuse(p) ((p)->head & CINUSE_BIT)
1749 #define pinuse(p) ((p)->head & PINUSE_BIT)
1750 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
1752 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1753 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1756 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1757 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1760 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1761 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1764 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1767 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1768 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1771 #define set_size_and_pinuse_of_free_chunk(p, s)\
1772 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1775 #define set_free_with_pinuse(p, s, n)\
1776 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1778 #define is_mmapped(p)\
1779 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1782 #define overhead_for(p)\
1783 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1787 #define calloc_must_clear(p) (!is_mmapped(p))
1789 #define calloc_must_clear(p) (1)
1883 struct malloc_tree_chunk
1888 struct malloc_tree_chunk *fd;
1889 struct malloc_tree_chunk *bk;
1891 struct malloc_tree_chunk *child[2];
1892 struct malloc_tree_chunk *parent;
1901 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1960 struct malloc_segment
1964 struct malloc_segment *next;
1968 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
1969 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2050 #define NSMALLBINS (32U)
2051 #define NTREEBINS (32U)
2052 #define SMALLBIN_SHIFT (3U)
2053 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2054 #define TREEBIN_SHIFT (8U)
2055 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2056 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2057 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2073 size_t max_footprint;
2091 struct malloc_params
2096 size_t mmap_threshold;
2097 size_t trim_threshold;
2098 flag_t default_mflags;
2106 #define is_global(M) ((M) == &_gm_)
2107 #define is_initialized(M) ((M)->top != 0)
2113 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2114 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2115 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2117 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2118 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2119 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2121 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2122 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2124 #define set_lock(M,L)\
2125 ((M)->mflags = (L)?\
2126 ((M)->mflags | USE_LOCK_BIT) :\
2127 ((M)->mflags & ~USE_LOCK_BIT))
2130 #define page_align(S)\
2131 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2134 #define granularity_align(S)\
2135 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2137 #define is_page_aligned(S)\
2138 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2139 #define is_granularity_aligned(S)\
2140 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2143 #define segment_holds(S, A)\
2144 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2150 msegmentptr sp = &m->seg;
2152 if (addr >= sp->base &&
addr < sp->base + sp->size)
2154 if ((sp = sp->next) == 0)
2163 msegmentptr sp = &m->seg;
2165 if ((
char *) sp >= ss->base && (
char *) sp < ss->base + ss->size)
2167 if ((sp = sp->next) == 0)
2172 #ifndef MORECORE_CANNOT_TRIM
2173 #define should_trim(M,s) ((s) > (M)->trim_check)
2175 #define should_trim(M,s) (0)
2183 #define TOP_FOOT_SIZE\
2184 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2198 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2200 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2201 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2205 #define PREACTION(M) (0)
2209 #define POSTACTION(M)
2222 #if PROCEED_ON_ERROR
2225 int malloc_corruption_error_count;
2228 static void reset_on_error(mstate
m);
2230 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2231 #define USAGE_ERROR_ACTION(m, p)
2235 #ifndef CORRUPTION_ERROR_ACTION
2236 #define CORRUPTION_ERROR_ACTION(m) ABORT
2239 #ifndef USAGE_ERROR_ACTION
2240 #define USAGE_ERROR_ACTION(m,p) ABORT
2249 #define check_free_chunk(M,P)
2250 #define check_inuse_chunk(M,P)
2251 #define check_malloced_chunk(M,P,N)
2252 #define check_mmapped_chunk(M,P)
2253 #define check_malloc_state(M)
2254 #define check_top_chunk(M,P)
2257 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2258 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2259 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2260 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2261 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2262 #define check_malloc_state(M) do_check_malloc_state(M)
2264 static void do_check_any_chunk(mstate
m, mchunkptr
p);
2265 static void do_check_top_chunk(mstate
m, mchunkptr
p);
2266 static void do_check_mmapped_chunk(mstate
m, mchunkptr
p);
2267 static void do_check_inuse_chunk(mstate
m, mchunkptr
p);
2268 static void do_check_free_chunk(mstate
m, mchunkptr
p);
2269 static void do_check_malloced_chunk(mstate
m,
void *mem,
size_t s);
2270 static void do_check_tree(mstate
m, tchunkptr
t);
2271 static void do_check_treebin(mstate
m, bindex_t
i);
2272 static void do_check_smallbin(mstate
m, bindex_t
i);
2273 static void do_check_malloc_state(mstate
m);
2274 static int bin_find(mstate
m, mchunkptr
x);
2275 static size_t traverse_and_check(mstate
m);
2280 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2281 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2282 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2283 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2286 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2287 #define treebin_at(M,i) (&((M)->treebins[i]))
2290 #if defined(__GNUC__) && defined(i386)
2291 #define compute_tree_index(S, I)\
2293 size_t X = S >> TREEBIN_SHIFT;\
2296 else if (X > 0xFFFF)\
2300 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2301 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2305 #define compute_tree_index(S, I)\
2307 size_t X = S >> TREEBIN_SHIFT;\
2310 else if (X > 0xFFFF)\
2313 unsigned int Y = (unsigned int)X;\
2314 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2315 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2317 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2318 K = 14 - N + ((Y <<= K) >> 15);\
2319 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2325 #define bit_for_tree_index(i) \
2326 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2329 #define leftshift_for_tree_index(i) \
2330 ((i == NTREEBINS-1)? 0 : \
2331 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2334 #define minsize_for_tree_index(i) \
2335 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2336 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2342 #define idx2bit(i) ((binmap_t)(1) << (i))
2345 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2346 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2347 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2349 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2350 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2351 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2355 #if defined(__GNUC__) && defined(i386)
2356 #define compute_bit2idx(X, I)\
2359 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2365 #define compute_bit2idx(X, I) I = ffs(X)-1
2368 #define compute_bit2idx(X, I)\
2370 unsigned int Y = X - 1;\
2371 unsigned int K = Y >> (16-4) & 16;\
2372 unsigned int N = K; Y >>= K;\
2373 N += K = Y >> (8-3) & 8; Y >>= K;\
2374 N += K = Y >> (4-2) & 4; Y >>= K;\
2375 N += K = Y >> (2-1) & 2; Y >>= K;\
2376 N += K = Y >> (1-0) & 1; Y >>= K;\
2377 I = (bindex_t)(N + Y);\
2383 #define least_bit(x) ((x) & -(x))
2386 #define left_bits(x) ((x<<1) | -(x<<1))
2389 #define same_or_left_bits(x) ((x) | -(x))
2422 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2424 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2426 #define ok_cinuse(p) cinuse(p)
2428 #define ok_pinuse(p) pinuse(p)
2431 #define ok_address(M, a) (1)
2432 #define ok_next(b, n) (1)
2433 #define ok_cinuse(p) (1)
2434 #define ok_pinuse(p) (1)
2437 #if (FOOTERS && !INSECURE)
2439 #define ok_magic(M) ((M)->magic == mparams.magic)
2441 #define ok_magic(M) (1)
2447 #if defined(__GNUC__) && __GNUC__ >= 3
2448 #define RTCHECK(e) __builtin_expect(e, 1)
2450 #define RTCHECK(e) (e)
2453 #define RTCHECK(e) (1)
2460 #define mark_inuse_foot(M,p,s)
2463 #define set_inuse(M,p,s)\
2464 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2465 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2468 #define set_inuse_and_pinuse(M,p,s)\
2469 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2470 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2473 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2474 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2479 #define mark_inuse_foot(M,p,s)\
2480 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2482 #define get_mstate_for(p)\
2483 ((mstate)(((mchunkptr)((char*)(p) +\
2484 (chunksize(p))))->prev_foot ^ mparams.magic))
2486 #define set_inuse(M,p,s)\
2487 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2488 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2489 mark_inuse_foot(M,p,s))
2491 #define set_inuse_and_pinuse(M,p,s)\
2492 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2493 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2494 mark_inuse_foot(M,p,s))
2496 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2497 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2498 mark_inuse_foot(M, p, s))
2513 #if MORECORE_CONTIGUOUS
2520 #if (FOOTERS && !INSECURE)
2526 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
2527 read(fd,
buf,
sizeof(
buf)) ==
sizeof(
buf)) {
2528 s = *((
size_t *)
buf);
2539 s = (
size_t) 0x58585858U;
2556 SYSTEM_INFO system_info;
2557 GetSystemInfo(&system_info);
2558 mparams.page_size = system_info.dwPageSize;
2559 mparams.granularity = system_info.dwAllocationGranularity;
2569 if ((
sizeof(
size_t) !=
sizeof(
char *)) ||
2571 (
sizeof(
int) < 4) ||
2588 switch (param_number) {
2593 if (val >=
mparams.page_size && ((val & (val - 1)) == 0)) {
2611 do_check_any_chunk(mstate
m, mchunkptr
p)
2619 do_check_top_chunk(mstate m, mchunkptr p)
2626 assert(sz == m->topsize);
2635 do_check_mmapped_chunk(mstate m, mchunkptr p)
2651 do_check_inuse_chunk(mstate m, mchunkptr p)
2653 do_check_any_chunk(m, p);
2659 do_check_mmapped_chunk(m, p);
2664 do_check_free_chunk(mstate m, mchunkptr p)
2668 do_check_any_chunk(m, p);
2672 if (p != m->dv && p != m->top) {
2676 assert(next->prev_foot == sz);
2688 do_check_malloced_chunk(mstate m,
void *mem,
size_t s)
2693 do_check_inuse_chunk(m, p);
2704 do_check_tree(mstate m, tchunkptr
t)
2708 bindex_t tindex = t->index;
2719 do_check_any_chunk(m, ((mchunkptr) u));
2720 assert(u->index == tindex);
2726 if (u->parent == 0) {
2727 assert(u->child[0] == 0);
2728 assert(u->child[1] == 0);
2733 assert(u->parent->child[0] == u ||
2734 u->parent->child[1] == u ||
2735 *((tbinptr *) (u->parent)) == u);
2736 if (u->child[0] != 0) {
2737 assert(u->child[0]->parent == u);
2738 assert(u->child[0] != u);
2739 do_check_tree(m, u->child[0]);
2741 if (u->child[1] != 0) {
2742 assert(u->child[1]->parent == u);
2743 assert(u->child[1] != u);
2744 do_check_tree(m, u->child[1]);
2746 if (u->child[0] != 0 && u->child[1] != 0) {
2757 do_check_treebin(mstate m, bindex_t
i)
2761 int empty = (m->treemap & (1U <<
i)) == 0;
2765 do_check_tree(m, t);
2770 do_check_smallbin(mstate m, bindex_t i)
2773 mchunkptr p = b->bk;
2774 unsigned int empty = (m->smallmap & (1U <<
i)) == 0;
2778 for (; p !=
b; p = p->bk) {
2782 do_check_free_chunk(m, p);
2789 do_check_inuse_chunk(m, q);
2796 bin_find(mstate m, mchunkptr
x)
2807 }
while ((p = p->fd) != b);
2815 while (t != 0 &&
chunksize(t) != size) {
2822 if (u == (tchunkptr)
x)
2824 }
while ((u = u->fd) !=
t);
2833 traverse_and_check(mstate m)
2837 msegmentptr s = &m->seg;
2841 mchunkptr lastq = 0;
2848 do_check_inuse_chunk(m, q);
2850 assert(q == m->dv || bin_find(m, q));
2852 do_check_free_chunk(m, q);
2865 do_check_malloc_state(mstate m)
2871 do_check_smallbin(m, i);
2873 do_check_treebin(m, i);
2875 if (m->dvsize != 0) {
2876 do_check_any_chunk(m, m->dv);
2879 assert(bin_find(m, m->dv) == 0);
2883 do_check_top_chunk(m, m->top);
2886 assert(bin_find(m, m->top) == 0);
2889 total = traverse_and_check(m);
2890 assert(total <= m->footprint);
2891 assert(m->footprint <= m->max_footprint);
2898 static struct mallinfo
2901 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2908 msegmentptr s = &m->seg;
2926 nm.hblkhd = m->footprint - sum;
2927 nm.usmblks = m->max_footprint;
2928 nm.uordblks = m->footprint - mfree;
2929 nm.fordblks = mfree;
2930 nm.keepcost = m->topsize;
2948 msegmentptr s = &m->seg;
2949 maxfp = m->max_footprint;
2964 #ifndef LACKS_STDIO_H
2965 fprintf(stderr,
"max system bytes = %10lu\n",
2966 (
unsigned long) (maxfp));
2967 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long) (fp));
2968 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long) (used));
2985 #define insert_small_chunk(M, P, S) {\
2986 bindex_t I = small_index(S);\
2987 mchunkptr B = smallbin_at(M, I);\
2989 assert(S >= MIN_CHUNK_SIZE);\
2990 if (!smallmap_is_marked(M, I))\
2991 mark_smallmap(M, I);\
2992 else if (RTCHECK(ok_address(M, B->fd)))\
2995 CORRUPTION_ERROR_ACTION(M);\
3004 #define unlink_small_chunk(M, P, S) {\
3005 mchunkptr F = P->fd;\
3006 mchunkptr B = P->bk;\
3007 bindex_t I = small_index(S);\
3010 assert(chunksize(P) == small_index2size(I));\
3012 clear_smallmap(M, I);\
3013 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3014 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3019 CORRUPTION_ERROR_ACTION(M);\
3024 #define unlink_first_small_chunk(M, B, P, I) {\
3025 mchunkptr F = P->fd;\
3028 assert(chunksize(P) == small_index2size(I));\
3030 clear_smallmap(M, I);\
3031 else if (RTCHECK(ok_address(M, F))) {\
3036 CORRUPTION_ERROR_ACTION(M);\
3042 #define replace_dv(M, P, S) {\
3043 size_t DVS = M->dvsize;\
3045 mchunkptr DV = M->dv;\
3046 assert(is_small(DVS));\
3047 insert_small_chunk(M, DV, DVS);\
3056 #define insert_large_chunk(M, X, S) {\
3059 compute_tree_index(S, I);\
3060 H = treebin_at(M, I);\
3062 X->child[0] = X->child[1] = 0;\
3063 if (!treemap_is_marked(M, I)) {\
3064 mark_treemap(M, I);\
3066 X->parent = (tchunkptr)H;\
3071 size_t K = S << leftshift_for_tree_index(I);\
3073 if (chunksize(T) != S) {\
3074 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3078 else if (RTCHECK(ok_address(M, C))) {\
3085 CORRUPTION_ERROR_ACTION(M);\
3090 tchunkptr F = T->fd;\
3091 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3099 CORRUPTION_ERROR_ACTION(M);\
3124 #define unlink_large_chunk(M, X) {\
3125 tchunkptr XP = X->parent;\
3128 tchunkptr F = X->fd;\
3130 if (RTCHECK(ok_address(M, F))) {\
3135 CORRUPTION_ERROR_ACTION(M);\
3140 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3141 ((R = *(RP = &(X->child[0]))) != 0)) {\
3143 while ((*(CP = &(R->child[1])) != 0) ||\
3144 (*(CP = &(R->child[0])) != 0)) {\
3147 if (RTCHECK(ok_address(M, RP)))\
3150 CORRUPTION_ERROR_ACTION(M);\
3155 tbinptr* H = treebin_at(M, X->index);\
3157 if ((*H = R) == 0) \
3158 clear_treemap(M, X->index);\
3160 else if (RTCHECK(ok_address(M, XP))) {\
3161 if (XP->child[0] == X) \
3167 CORRUPTION_ERROR_ACTION(M);\
3169 if (RTCHECK(ok_address(M, R))) {\
3172 if ((C0 = X->child[0]) != 0) {\
3173 if (RTCHECK(ok_address(M, C0))) {\
3178 CORRUPTION_ERROR_ACTION(M);\
3180 if ((C1 = X->child[1]) != 0) {\
3181 if (RTCHECK(ok_address(M, C1))) {\
3186 CORRUPTION_ERROR_ACTION(M);\
3190 CORRUPTION_ERROR_ACTION(M);\
3197 #define insert_chunk(M, P, S)\
3198 if (is_small(S)) insert_small_chunk(M, P, S)\
3199 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3201 #define unlink_chunk(M, P, S)\
3202 if (is_small(S)) unlink_small_chunk(M, P, S)\
3203 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3209 #define internal_malloc(m, b) mspace_malloc(m, b)
3210 #define internal_free(m, mem) mspace_free(m,mem);
3213 #define internal_malloc(m, b)\
3214 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3215 #define internal_free(m, mem)\
3216 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3218 #define internal_malloc(m, b) dlmalloc(b)
3219 #define internal_free(m, mem) dlfree(mem)
3246 mchunkptr p = (
mchunkptr) (mm + offset);
3253 if (mm < m->least_addr)
3255 if ((m->footprint += mmsize) > m->max_footprint)
3256 m->max_footprint = m->footprint;
3274 (oldsize - nb) <= (
mparams.granularity << 1))
3282 oldmmsize, newmmsize, 1);
3284 mchunkptr newp = (
mchunkptr) (cp + offset);
3291 if (cp < m->least_addr)
3293 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3294 m->max_footprint = m->footprint;
3318 m->trim_check =
mparams.trim_threshold;
3329 bin->fd = bin->bk = bin;
3333 #if PROCEED_ON_ERROR
3337 reset_on_error(mstate m)
3340 ++malloc_corruption_error_count;
3342 m->smallbins = m->treebins = 0;
3343 m->dvsize = m->topsize = 0;
3360 size_t psize = (
char *) oldfirst - (
char *)
p;
3362 size_t qsize = psize - nb;
3365 assert((
char *) oldfirst > (
char *) q);
3370 if (oldfirst == m->top) {
3371 size_t tsize = m->topsize += qsize;
3375 }
else if (oldfirst == m->dv) {
3376 size_t dsize = m->dvsize += qsize;
3401 char *old_top = (
char *) m->top;
3403 char *old_end = oldsp->base + oldsp->size;
3404 size_t ssize =
pad_request(
sizeof(
struct malloc_segment));
3407 char *asp = rawsp +
offset;
3412 mchunkptr p = tnext;
3422 m->seg.base = tbase;
3423 m->seg.size = tsize;
3424 m->seg.sflags = mmapped;
3432 if ((
char *) (&(nextp->head)) < old_end)
3440 if (csp != old_top) {
3442 size_t psize = csp - old_top;
3459 flag_t mmap_flag = 0;
3578 size_t ssize = end - br;
3589 if ((m->footprint += tsize) > m->max_footprint)
3590 m->max_footprint = m->footprint;
3593 m->seg.base = m->least_addr = tbase;
3594 m->seg.size = tsize;
3595 m->seg.sflags = mmap_flag;
3604 (
size_t) ((tbase + tsize) - (
char *) mn) -
3611 msegmentptr sp = &m->seg;
3612 while (sp != 0 && tbase != sp->base + sp->size)
3616 init_top(m, m->top, m->topsize + tsize);
3618 if (tbase < m->least_addr)
3619 m->least_addr = tbase;
3621 while (sp != 0 && sp->base != tbase + tsize)
3626 char *oldbase = sp->base;
3635 if (nb < m->topsize) {
3636 size_t rsize = m->topsize -= nb;
3637 mchunkptr p = m->top;
3657 size_t released = 0;
3658 msegmentptr pred = &m->seg;
3659 msegmentptr sp = pred->next;
3661 char *base = sp->base;
3662 size_t size = sp->size;
3663 msegmentptr next = sp->next;
3680 m->footprint -=
size;
3698 size_t released = 0;
3702 if (m->topsize > pad) {
3704 size_t unit =
mparams.granularity;
3705 size_t extra = ((m->topsize - pad + (unit -
SIZE_T_ONE)) / unit -
3712 size_t newsize = sp->size - extra;
3714 if ((
CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
3716 || (
CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3727 if (old_br == sp->base + sp->size) {
3730 if (rel_br !=
CMFAIL && new_br < old_br)
3731 released = old_br - new_br;
3738 if (released != 0) {
3739 sp->size -= released;
3740 m->footprint -= released;
3741 init_top(m, m->top, m->topsize - released);
3755 return (released != 0) ? 1 : 0;
3779 if ((rsize = trem) == 0)
3784 if (rt != 0 && rt != t)
3794 if (t == 0 && v == 0) {
3796 if (leftbits != 0) {
3798 binmap_t leastbit =
least_bit(leftbits);
3814 if (v != 0 && rsize < (
size_t) (m->dvsize - nb)) {
3842 binmap_t leastbit =
least_bit(m->treemap);
3899 else if (oldsize >= nb) {
3900 size_t rsize = oldsize - nb;
3908 }
else if (next == m->top && oldsize + m->topsize > nb) {
3910 size_t newsize = oldsize + m->topsize;
3911 size_t newtopsize = newsize - nb;
3916 m->topsize = newtopsize;
3937 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3955 if ((alignment & (alignment -
SIZE_T_ONE)) != 0) {
3957 while (a < alignment)
3977 if ((((
size_t) (mem)) % alignment) != 0) {
3991 ((
size_t) (br - (
char *) (
p)) >=
3994 size_t leadsize = pos - (
char *) (p);
3995 size_t newsize =
chunksize(p) - leadsize;
3998 newp->prev_foot = p->prev_foot + leadsize;
4012 size_t remainder_size = size - nb;
4015 set_inuse(m, remainder, remainder_size);
4039 ialloc(mstate m,
size_t n_elements,
size_t *
sizes,
int opts,
void *chunks[])
4050 size_t element_size;
4051 size_t contents_size;
4055 size_t remainder_size;
4057 mchunkptr array_chunk;
4064 if (n_elements == 0)
4070 if (n_elements == 0)
4073 array_size =
request2size(n_elements * (
sizeof(
void *)));
4079 contents_size = n_elements * element_size;
4083 for (i = 0; i != n_elements; ++
i)
4087 size = contents_size + array_size;
4115 size_t array_chunk_size;
4117 array_chunk_size = remainder_size - contents_size;
4118 marray = (
void **) (
chunk2mem(array_chunk));
4120 remainder_size = contents_size;
4126 if (i != n_elements - 1) {
4127 if (element_size != 0)
4128 size = element_size;
4131 remainder_size -=
size;
4141 if (marray != chunks) {
4143 if (element_size != 0) {
4144 assert(remainder_size == element_size);
4150 for (i = 0; i != n_elements; ++
i)
4198 smallbits =
gm->smallmap >>
idx;
4200 if ((smallbits & 0x3U) != 0) {
4202 idx += ~smallbits & 1;
4213 else if (nb >
gm->dvsize) {
4214 if (smallbits != 0) {
4220 binmap_t leastbit =
least_bit(leftbits);
4241 else if (
gm->treemap != 0
4257 if (nb <= gm->dvsize) {
4258 size_t rsize =
gm->dvsize - nb;
4259 mchunkptr p =
gm->dv;
4266 size_t dvs =
gm->dvsize;
4276 else if (nb < gm->topsize) {
4277 size_t rsize =
gm->topsize -= nb;
4278 mchunkptr p =
gm->top;
4310 mstate
fm = get_mstate_for(p);
4324 size_t prevsize = p->prev_foot;
4326 prevsize &= ~IS_MMAPPED_BIT;
4328 if (
CALL_MUNMAP((
char *) p - prevsize, psize) == 0)
4329 fm->footprint -= psize;
4351 if (next == fm->top) {
4352 size_t tsize = fm->topsize += psize;
4362 }
else if (next == fm->dv) {
4363 size_t dsize = fm->dvsize += psize;
4400 if (n_elements != 0) {
4401 req = n_elements * elem_size;
4402 if (((n_elements | elem_size) & ~(
size_t) 0xffff) &&
4403 (req / n_elements != elem_size))
4417 #ifdef REALLOC_ZERO_BYTES_FREES
4427 mstate m = get_mstate_for(
mem2chunk(oldmem));
4446 size_t sz = elem_size;
4447 return ialloc(
gm, n_elements, &sz, 3, chunks);
4453 return ialloc(
gm, n_elements, sizes, 0, chunks);
4489 return gm->footprint;
4495 return gm->max_footprint;
4536 init_user_mstate(
char *tbase,
size_t tsize)
4538 size_t msize =
pad_request(
sizeof(
struct malloc_state));
4545 m->seg.base = m->least_addr = tbase;
4546 m->seg.size = m->footprint = m->max_footprint = tsize;
4548 m->mflags =
mparams.default_mflags;
4558 create_mspace(
size_t capacity,
int locked)
4561 size_t msize =
pad_request(
sizeof(
struct malloc_state));
4565 size_t rs = ((capacity == 0) ?
mparams.granularity :
4568 char *tbase = (
char *) (
CALL_MMAP(tsize));
4570 m = init_user_mstate(tbase, tsize);
4579 create_mspace_with_base(
void *base,
size_t capacity,
int locked)
4582 size_t msize =
pad_request(
sizeof(
struct malloc_state));
4587 m = init_user_mstate((
char *) base, capacity);
4595 destroy_mspace(mspace msp)
4598 mstate ms = (
mstate) msp;
4600 msegmentptr sp = &ms->seg;
4602 char *base = sp->base;
4603 size_t size = sp->size;
4604 flag_t flag = sp->sflags;
4623 mspace_malloc(mspace msp,
size_t bytes)
4625 mstate ms = (
mstate) msp;
4638 smallbits = ms->smallmap >>
idx;
4640 if ((smallbits & 0x3U) != 0) {
4642 idx += ~smallbits & 1;
4653 else if (nb > ms->dvsize) {
4654 if (smallbits != 0) {
4660 binmap_t leastbit =
least_bit(leftbits);
4681 else if (ms->treemap != 0
4691 if (ms->treemap != 0 && (mem =
tmalloc_large(ms, nb)) != 0) {
4697 if (nb <= ms->dvsize) {
4698 size_t rsize = ms->dvsize - nb;
4699 mchunkptr p = ms->dv;
4706 size_t dvs = ms->dvsize;
4716 else if (nb < ms->topsize) {
4717 size_t rsize = ms->topsize -= nb;
4718 mchunkptr p = ms->top;
4739 mspace_free(mspace msp,
void *mem)
4744 mstate
fm = get_mstate_for(p);
4746 mstate fm = (
mstate) msp;
4758 size_t prevsize = p->prev_foot;
4760 prevsize &= ~IS_MMAPPED_BIT;
4762 if (
CALL_MUNMAP((
char *) p - prevsize, psize) == 0)
4763 fm->footprint -= psize;
4785 if (next == fm->top) {
4786 size_t tsize = fm->topsize += psize;
4796 }
else if (next == fm->dv) {
4797 size_t dsize = fm->dvsize += psize;
4827 mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size)
4831 mstate ms = (
mstate) msp;
4836 if (n_elements != 0) {
4837 req = n_elements * elem_size;
4838 if (((n_elements | elem_size) & ~(
size_t) 0xffff) &&
4839 (req / n_elements != elem_size))
4849 mspace_realloc(mspace msp,
void *oldmem,
size_t bytes)
4852 return mspace_malloc(msp, bytes);
4853 #ifdef REALLOC_ZERO_BYTES_FREES
4855 mspace_free(msp, oldmem);
4862 mstate ms = get_mstate_for(p);
4864 mstate ms = (
mstate) msp;
4875 mspace_memalign(mspace msp,
size_t alignment,
size_t bytes)
4877 mstate ms = (
mstate) msp;
4886 mspace_independent_calloc(mspace msp,
size_t n_elements,
4887 size_t elem_size,
void *chunks[])
4889 size_t sz = elem_size;
4890 mstate ms = (
mstate) msp;
4895 return ialloc(ms, n_elements, &sz, 3, chunks);
4899 mspace_independent_comalloc(mspace msp,
size_t n_elements,
4900 size_t sizes[],
void *chunks[])
4902 mstate ms = (
mstate) msp;
4907 return ialloc(ms, n_elements, sizes, 0, chunks);
4911 mspace_trim(mspace msp,
size_t pad)
4914 mstate ms = (
mstate) msp;
4927 mspace_malloc_stats(mspace msp)
4929 mstate ms = (
mstate) msp;
4938 mspace_footprint(mspace msp)
4941 mstate ms = (
mstate) msp;
4943 result = ms->footprint;
4951 mspace_max_footprint(mspace msp)
4954 mstate ms = (
mstate) msp;
4956 result = ms->max_footprint;
4965 mspace_mallinfo(mspace msp)
4967 mstate ms = (
mstate) msp;
4976 mspace_mallopt(
int param_number,
int value)
#define USAGE_ERROR_ACTION(m, p)
#define DEFAULT_MMAP_THRESHOLD
#define unlink_large_chunk(M, X)
#define request2size(req)
GLuint const GLfloat * val
static void * sys_alloc(mstate m, size_t nb)
#define minsize_for_tree_index(i)
#define ACQUIRE_MAGIC_INIT_LOCK()
static int has_segment_link(mstate m, msegmentptr ss)
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
#define compute_tree_index(S, I)
GLvoid **typedef void(GLAPIENTRY *PFNGLGETVERTEXATTRIBDVPROC)(GLuint
DECLSPEC void *SDLCALL SDL_calloc(size_t nmemb, size_t size)
struct malloc_state * mstate
#define insert_chunk(M, P, S)
static int change_mparam(int param_number, int value)
#define mark_inuse_foot(M, p, s)
#define DEFAULT_TRIM_THRESHOLD
#define MALLOC_FAILURE_ACTION
static void * tmalloc_large(mstate m, size_t nb)
#define smallbin_at(M, i)
#define replace_dv(M, P, S)
#define disable_contiguous(M)
DECLSPEC void *SDLCALL SDL_realloc(void *mem, size_t size)
DECLSPEC void SDLCALL SDL_free(void *mem)
#define is_initialized(M)
#define set_free_with_pinuse(p, s, n)
GLboolean GLboolean GLboolean GLboolean a
#define dlindependent_calloc
#define compute_bit2idx(X, I)
GLuint GLsizei const GLuint const GLintptr const GLsizeiptr * sizes
#define dlmalloc_usable_size
#define chunk_plus_offset(p, s)
#define CALL_MUNMAP(a, s)
#define leftmost_child(t)
#define FOUR_SIZE_T_SIZES
#define CORRUPTION_ERROR_ACTION(m)
struct malloc_chunk * mchunkptr
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
#define dlmalloc_footprint
#define USE_NONCONTIGUOUS_BIT
#define small_index2size(i)
#define ACQUIRE_MORECORE_LOCK()
#define use_noncontiguous(M)
static int sys_trim(mstate m, size_t pad)
#define unlink_first_small_chunk(M, B, P, I)
#define check_inuse_chunk(M, P)
struct malloc_tree_chunk tchunk
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
static void init_bins(mstate m)
#define treemap_is_marked(M, i)
static struct malloc_params mparams
#define MORECORE_CONTIGUOUS
#define malloc_getpagesize
#define dlmalloc_max_footprint
#define set_inuse(M, p, s)
static void * tmalloc_small(mstate m, size_t nb)
static const char empty[1]
struct malloc_segment * msegmentptr
static struct mallinfo internal_mallinfo(mstate m)
#define align_as_chunk(A)
#define unlink_chunk(M, P, S)
struct malloc_tree_chunk * tbinptr
DECLSPEC void *SDLCALL SDL_malloc(size_t size)
#define MAX_SMALL_REQUEST
#define leftshift_for_tree_index(i)
#define DEFAULT_GRANULARITY
GLfloat GLfloat GLfloat top
#define chunk_minus_offset(p, s)
static void internal_malloc_stats(mstate m)
#define should_trim(M, s)
GLuint GLfloat GLfloat GLfloat x1
#define RELEASE_MAGIC_INIT_LOCK()
static void init_top(mstate m, mchunkptr p, size_t psize)
#define check_malloc_state(M)
EGLSurface EGLint void ** value
#define check_malloced_chunk(M, P, N)
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
#define insert_large_chunk(M, X, S)
#define segment_holds(S, A)
GLenum GLuint GLsizei const GLchar * buf
static int init_mparams(void)
static size_t release_unused_segments(mstate m)
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
#define RELEASE_MORECORE_LOCK()
GLdouble GLdouble GLdouble GLdouble q
#define CALL_MREMAP(addr, osz, nsz, mv)
#define internal_free(m, mem)
#define check_mmapped_chunk(M, P)
GLdouble GLdouble GLdouble r
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
GLdouble GLdouble GLdouble b
struct malloc_segment msegment
#define smallmap_is_marked(M, i)
#define calloc_must_clear(p)
static msegmentptr segment_holding(mstate m, char *addr)
#define is_mmapped_segment(S)
#define set_size_and_pinuse_of_free_chunk(p, s)
#define is_extern_segment(S)
static struct malloc_state _gm_
#define internal_malloc(m, b)
struct malloc_chunk * sbinptr
#define check_free_chunk(M, P)
#define granularity_align(S)
static void * mmap_alloc(mstate m, size_t nb)
#define MALLINFO_FIELD_TYPE
#define is_page_aligned(S)
#define dlindependent_comalloc
#define check_top_chunk(M, P)
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
INT64 INT64 INT64 remainder
#define set_inuse_and_pinuse(M, p, s)
struct malloc_chunk mchunk
struct malloc_tree_chunk * tchunkptr