zenilib  0.5.3.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
SDL_malloc.c
Go to the documentation of this file.
1 /*
2  Simple DirectMedia Layer
3  Copyright (C) 1997-2013 Sam Lantinga <slouken@libsdl.org>
4 
5  This software is provided 'as-is', without any express or implied
6  warranty. In no event will the authors be held liable for any damages
7  arising from the use of this software.
8 
9  Permission is granted to anyone to use this software for any purpose,
10  including commercial applications, and to alter it and redistribute it
11  freely, subject to the following restrictions:
12 
13  1. The origin of this software must not be misrepresented; you must not
14  claim that you wrote the original software. If you use this software
15  in a product, an acknowledgment in the product documentation would be
16  appreciated but is not required.
17  2. Altered source versions must be plainly marked as such, and must not be
18  misrepresented as being the original software.
19  3. This notice may not be removed or altered from any source distribution.
20 */
21 #include "SDL_config.h"
22 
23 /* This file contains portable memory management functions for SDL */
24 
25 #include "SDL_stdinc.h"
26 
27 #if defined(HAVE_MALLOC)
28 
29 void *SDL_malloc(size_t size)
30 {
31  return malloc(size);
32 }
33 
34 void *SDL_calloc(size_t nmemb, size_t size)
35 {
36  return calloc(nmemb, size);
37 }
38 
39 void *SDL_realloc(void *ptr, size_t size)
40 {
41  return realloc(ptr, size);
42 }
43 
44 void SDL_free(void *ptr)
45 {
46  free(ptr);
47 }
48 
49 #else /* the rest of this is a LOT of tapdancing to implement malloc. :) */
50 
51 #define LACKS_SYS_TYPES_H
52 #define LACKS_STDIO_H
53 #define LACKS_STRINGS_H
54 #define LACKS_STRING_H
55 #define LACKS_STDLIB_H
56 #define ABORT
57 
58 /*
59  This is a version (aka dlmalloc) of malloc/free/realloc written by
60  Doug Lea and released to the public domain, as explained at
61  http://creativecommons.org/licenses/publicdomain. Send questions,
62  comments, complaints, performance data, etc to dl@cs.oswego.edu
63 
64 * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
65 
66  Note: There may be an updated version of this malloc obtainable at
67  ftp://gee.cs.oswego.edu/pub/misc/malloc.c
68  Check before installing!
69 
70 * Quickstart
71 
72  This library is all in one file to simplify the most common usage:
73  ftp it, compile it (-O3), and link it into another program. All of
74  the compile-time options default to reasonable values for use on
75  most platforms. You might later want to step through various
76  compile-time and dynamic tuning options.
77 
78  For convenience, an include file for code using this malloc is at:
79  ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
80  You don't really need this .h file unless you call functions not
81  defined in your system include files. The .h file contains only the
82  excerpts from this file needed for using this malloc on ANSI C/C++
83  systems, so long as you haven't changed compile-time options about
84  naming and tuning parameters. If you do, then you can create your
85  own malloc.h that does include all settings by cutting at the point
86  indicated below. Note that you may already by default be using a C
87  library containing a malloc that is based on some version of this
88  malloc (for example in linux). You might still want to use the one
89  in this file to customize settings or to avoid overheads associated
90  with library versions.
91 
92 * Vital statistics:
93 
94  Supported pointer/size_t representation: 4 or 8 bytes
95  size_t MUST be an unsigned type of the same width as
96  pointers. (If you are using an ancient system that declares
97  size_t as a signed type, or need it to be a different width
98  than pointers, you can use a previous release of this malloc
99  (e.g. 2.7.2) supporting these.)
100 
101  Alignment: 8 bytes (default)
102  This suffices for nearly all current machines and C compilers.
103  However, you can define MALLOC_ALIGNMENT to be wider than this
104  if necessary (up to 128bytes), at the expense of using more space.
105 
106  Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
107  8 or 16 bytes (if 8byte sizes)
108  Each malloced chunk has a hidden word of overhead holding size
109  and status information, and additional cross-check word
110  if FOOTERS is defined.
111 
112  Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
113  8-byte ptrs: 32 bytes (including overhead)
114 
115  Even a request for zero bytes (i.e., malloc(0)) returns a
116  pointer to something of the minimum allocatable size.
117  The maximum overhead wastage (i.e., number of extra bytes
118  allocated than were requested in malloc) is less than or equal
119  to the minimum size, except for requests >= mmap_threshold that
120  are serviced via mmap(), where the worst case wastage is about
121  32 bytes plus the remainder from a system page (the minimal
122  mmap unit); typically 4096 or 8192 bytes.
123 
124  Security: static-safe; optionally more or less
125  The "security" of malloc refers to the ability of malicious
126  code to accentuate the effects of errors (for example, freeing
127  space that is not currently malloc'ed or overwriting past the
128  ends of chunks) in code that calls malloc. This malloc
129  guarantees not to modify any memory locations below the base of
130  heap, i.e., static variables, even in the presence of usage
131  errors. The routines additionally detect most improper frees
132  and reallocs. All this holds as long as the static bookkeeping
133  for malloc itself is not corrupted by some other means. This
134  is only one aspect of security -- these checks do not, and
135  cannot, detect all possible programming errors.
136 
137  If FOOTERS is defined nonzero, then each allocated chunk
138  carries an additional check word to verify that it was malloced
139  from its space. These check words are the same within each
140  execution of a program using malloc, but differ across
141  executions, so externally crafted fake chunks cannot be
142  freed. This improves security by rejecting frees/reallocs that
143  could corrupt heap memory, in addition to the checks preventing
144  writes to statics that are always on. This may further improve
145  security at the expense of time and space overhead. (Note that
146  FOOTERS may also be worth using with MSPACES.)
147 
148  By default detected errors cause the program to abort (calling
149  "abort()"). You can override this to instead proceed past
150  errors by defining PROCEED_ON_ERROR. In this case, a bad free
151  has no effect, and a malloc that encounters a bad address
152  caused by user overwrites will ignore the bad address by
153  dropping pointers and indices to all known memory. This may
154  be appropriate for programs that should continue if at all
155  possible in the face of programming errors, although they may
156  run out of memory because dropped memory is never reclaimed.
157 
158  If you don't like either of these options, you can define
159  CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
160  else. And if if you are sure that your program using malloc has
161  no errors or vulnerabilities, you can define INSECURE to 1,
162  which might (or might not) provide a small performance improvement.
163 
164  Thread-safety: NOT thread-safe unless USE_LOCKS defined
165  When USE_LOCKS is defined, each public call to malloc, free,
166  etc is surrounded with either a pthread mutex or a win32
167  spinlock (depending on WIN32). This is not especially fast, and
168  can be a major bottleneck. It is designed only to provide
169  minimal protection in concurrent environments, and to provide a
170  basis for extensions. If you are using malloc in a concurrent
171  program, consider instead using ptmalloc, which is derived from
172  a version of this malloc. (See http://www.malloc.de).
173 
174  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
175  This malloc can use unix sbrk or any emulation (invoked using
176  the CALL_MORECORE macro) and/or mmap/munmap or any emulation
177  (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
178  memory. On most unix systems, it tends to work best if both
179  MORECORE and MMAP are enabled. On Win32, it uses emulations
180  based on VirtualAlloc. It also uses common C library functions
181  like memset.
182 
183  Compliance: I believe it is compliant with the Single Unix Specification
184  (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
185  others as well.
186 
187 * Overview of algorithms
188 
189  This is not the fastest, most space-conserving, most portable, or
190  most tunable malloc ever written. However it is among the fastest
191  while also being among the most space-conserving, portable and
192  tunable. Consistent balance across these factors results in a good
193  general-purpose allocator for malloc-intensive programs.
194 
195  In most ways, this malloc is a best-fit allocator. Generally, it
196  chooses the best-fitting existing chunk for a request, with ties
197  broken in approximately least-recently-used order. (This strategy
198  normally maintains low fragmentation.) However, for requests less
199  than 256bytes, it deviates from best-fit when there is not an
200  exactly fitting available chunk by preferring to use space adjacent
201  to that used for the previous small request, as well as by breaking
202  ties in approximately most-recently-used order. (These enhance
203  locality of series of small allocations.) And for very large requests
204  (>= 256Kb by default), it relies on system memory mapping
205  facilities, if supported. (This helps avoid carrying around and
206  possibly fragmenting memory used only for large chunks.)
207 
208  All operations (except malloc_stats and mallinfo) have execution
209  times that are bounded by a constant factor of the number of bits in
210  a size_t, not counting any clearing in calloc or copying in realloc,
211  or actions surrounding MORECORE and MMAP that have times
212  proportional to the number of non-contiguous regions returned by
213  system allocation routines, which is often just 1.
214 
215  The implementation is not very modular and seriously overuses
216  macros. Perhaps someday all C compilers will do as good a job
217  inlining modular code as can now be done by brute-force expansion,
218  but now, enough of them seem not to.
219 
220  Some compilers issue a lot of warnings about code that is
221  dead/unreachable only on some platforms, and also about intentional
222  uses of negation on unsigned types. All known cases of each can be
223  ignored.
224 
225  For a longer but out of date high-level description, see
226  http://gee.cs.oswego.edu/dl/html/malloc.html
227 
228 * MSPACES
229  If MSPACES is defined, then in addition to malloc, free, etc.,
230  this file also defines mspace_malloc, mspace_free, etc. These
231  are versions of malloc routines that take an "mspace" argument
232  obtained using create_mspace, to control all internal bookkeeping.
233  If ONLY_MSPACES is defined, only these versions are compiled.
234  So if you would like to use this allocator for only some allocations,
235  and your system malloc for others, you can compile with
236  ONLY_MSPACES and then do something like...
237  static mspace mymspace = create_mspace(0,0); // for example
238  #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
239 
240  (Note: If you only need one instance of an mspace, you can instead
241  use "USE_DL_PREFIX" to relabel the global malloc.)
242 
243  You can similarly create thread-local allocators by storing
244  mspaces as thread-locals. For example:
245  static __thread mspace tlms = 0;
246  void* tlmalloc(size_t bytes) {
247  if (tlms == 0) tlms = create_mspace(0, 0);
248  return mspace_malloc(tlms, bytes);
249  }
250  void tlfree(void* mem) { mspace_free(tlms, mem); }
251 
252  Unless FOOTERS is defined, each mspace is completely independent.
253  You cannot allocate from one and free to another (although
254  conformance is only weakly checked, so usage errors are not always
255  caught). If FOOTERS is defined, then each chunk carries around a tag
256  indicating its originating mspace, and frees are directed to their
257  originating spaces.
258 
259  ------------------------- Compile-time options ---------------------------
260 
261 Be careful in setting #define values for numerical constants of type
262 size_t. On some systems, literal values are not automatically extended
263 to size_t precision unless they are explicitly casted.
264 
265 WIN32 default: defined if _WIN32 defined
266  Defining WIN32 sets up defaults for MS environment and compilers.
267  Otherwise defaults are for unix.
268 
269 MALLOC_ALIGNMENT default: (size_t)8
270  Controls the minimum alignment for malloc'ed chunks. It must be a
271  power of two and at least 8, even on machines for which smaller
272  alignments would suffice. It may be defined as larger than this
273  though. Note however that code and data structures are optimized for
274  the case of 8-byte alignment.
275 
276 MSPACES default: 0 (false)
277  If true, compile in support for independent allocation spaces.
278  This is only supported if HAVE_MMAP is true.
279 
280 ONLY_MSPACES default: 0 (false)
281  If true, only compile in mspace versions, not regular versions.
282 
283 USE_LOCKS default: 0 (false)
284  Causes each call to each public routine to be surrounded with
285  pthread or WIN32 mutex lock/unlock. (If set true, this can be
286  overridden on a per-mspace basis for mspace versions.)
287 
288 FOOTERS default: 0
289  If true, provide extra checking and dispatching by placing
290  information in the footers of allocated chunks. This adds
291  space and time overhead.
292 
293 INSECURE default: 0
294  If true, omit checks for usage errors and heap space overwrites.
295 
296 USE_DL_PREFIX default: NOT defined
297  Causes compiler to prefix all public routines with the string 'dl'.
298  This can be useful when you only want to use this malloc in one part
299  of a program, using your regular system malloc elsewhere.
300 
301 ABORT default: defined as abort()
302  Defines how to abort on failed checks. On most systems, a failed
303  check cannot die with an "assert" or even print an informative
304  message, because the underlying print routines in turn call malloc,
305  which will fail again. Generally, the best policy is to simply call
306  abort(). It's not very useful to do more than this because many
307  errors due to overwriting will show up as address faults (null, odd
308  addresses etc) rather than malloc-triggered checks, so will also
309  abort. Also, most compilers know that abort() does not return, so
310  can better optimize code conditionally calling it.
311 
312 PROCEED_ON_ERROR default: defined as 0 (false)
313  Controls whether detected bad addresses cause them to bypassed
314  rather than aborting. If set, detected bad arguments to free and
315  realloc are ignored. And all bookkeeping information is zeroed out
316  upon a detected overwrite of freed heap space, thus losing the
317  ability to ever return it from malloc again, but enabling the
318  application to proceed. If PROCEED_ON_ERROR is defined, the
319  static variable malloc_corruption_error_count is compiled in
320  and can be examined to see if errors have occurred. This option
321  generates slower code than the default abort policy.
322 
323 DEBUG default: NOT defined
324  The DEBUG setting is mainly intended for people trying to modify
325  this code or diagnose problems when porting to new platforms.
326  However, it may also be able to better isolate user errors than just
327  using runtime checks. The assertions in the check routines spell
328  out in more detail the assumptions and invariants underlying the
329  algorithms. The checking is fairly extensive, and will slow down
330  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
331  set will attempt to check every non-mmapped allocated and free chunk
332  in the course of computing the summaries.
333 
334 ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
335  Debugging assertion failures can be nearly impossible if your
336  version of the assert macro causes malloc to be called, which will
337  lead to a cascade of further failures, blowing the runtime stack.
338  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
339  which will usually make debugging easier.
340 
341 MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
342  The action to take before "return 0" when malloc fails to be able to
343  return memory because there is none available.
344 
345 HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
346  True if this system supports sbrk or an emulation of it.
347 
348 MORECORE default: sbrk
349  The name of the sbrk-style system routine to call to obtain more
350  memory. See below for guidance on writing custom MORECORE
351  functions. The type of the argument to sbrk/MORECORE varies across
352  systems. It cannot be size_t, because it supports negative
353  arguments, so it is normally the signed type of the same width as
354  size_t (sometimes declared as "intptr_t"). It doesn't much matter
355  though. Internally, we only call it with arguments less than half
356  the max value of a size_t, which should work across all reasonable
357  possibilities, although sometimes generating compiler warnings. See
358  near the end of this file for guidelines for creating a custom
359  version of MORECORE.
360 
361 MORECORE_CONTIGUOUS default: 1 (true)
362  If true, take advantage of fact that consecutive calls to MORECORE
363  with positive arguments always return contiguous increasing
364  addresses. This is true of unix sbrk. It does not hurt too much to
365  set it true anyway, since malloc copes with non-contiguities.
366  Setting it false when definitely non-contiguous saves time
367  and possibly wasted space it would take to discover this though.
368 
369 MORECORE_CANNOT_TRIM default: NOT defined
370  True if MORECORE cannot release space back to the system when given
371  negative arguments. This is generally necessary only if you are
372  using a hand-crafted MORECORE function that cannot handle negative
373  arguments.
374 
375 HAVE_MMAP default: 1 (true)
376  True if this system supports mmap or an emulation of it. If so, and
377  HAVE_MORECORE is not true, MMAP is used for all system
378  allocation. If set and HAVE_MORECORE is true as well, MMAP is
379  primarily used to directly allocate very large blocks. It is also
380  used as a backup strategy in cases where MORECORE fails to provide
381  space from system. Note: A single call to MUNMAP is assumed to be
382  able to unmap memory that may have be allocated using multiple calls
383  to MMAP, so long as they are adjacent.
384 
385 HAVE_MREMAP default: 1 on linux, else 0
386  If true realloc() uses mremap() to re-allocate large blocks and
387  extend or shrink allocation spaces.
388 
389 MMAP_CLEARS default: 1 on unix
390  True if mmap clears memory so calloc doesn't need to. This is true
391  for standard unix mmap using /dev/zero.
392 
393 USE_BUILTIN_FFS default: 0 (i.e., not used)
394  Causes malloc to use the builtin ffs() function to compute indices.
395  Some compilers may recognize and intrinsify ffs to be faster than the
396  supplied C version. Also, the case of x86 using gcc is special-cased
397  to an asm instruction, so is already as fast as it can be, and so
398  this setting has no effect. (On most x86s, the asm version is only
399  slightly faster than the C version.)
400 
401 malloc_getpagesize default: derive from system includes, or 4096.
402  The system page size. To the extent possible, this malloc manages
403  memory from the system in page-size units. This may be (and
404  usually is) a function rather than a constant. This is ignored
405  if WIN32, where page size is determined using getSystemInfo during
406  initialization.
407 
408 USE_DEV_RANDOM default: 0 (i.e., not used)
409  Causes malloc to use /dev/random to initialize secure magic seed for
410  stamping footers. Otherwise, the current time is used.
411 
412 NO_MALLINFO default: 0
413  If defined, don't compile "mallinfo". This can be a simple way
414  of dealing with mismatches between system declarations and
415  those in this file.
416 
417 MALLINFO_FIELD_TYPE default: size_t
418  The type of the fields in the mallinfo struct. This was originally
419  defined as "int" in SVID etc, but is more usefully defined as
420  size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
421 
422 REALLOC_ZERO_BYTES_FREES default: not defined
423  This should be set if a call to realloc with zero bytes should
424  be the same as a call to free. Some people think it should. Otherwise,
425  since this malloc returns a unique pointer for malloc(0), so does
426  realloc(p, 0).
427 
428 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
429 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
430 LACKS_STDLIB_H default: NOT defined unless on WIN32
431  Define these if your system does not have these header files.
432  You might need to manually insert some of the declarations they provide.
433 
434 DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
435  system_info.dwAllocationGranularity in WIN32,
436  otherwise 64K.
437  Also settable using mallopt(M_GRANULARITY, x)
438  The unit for allocating and deallocating memory from the system. On
439  most systems with contiguous MORECORE, there is no reason to
440  make this more than a page. However, systems with MMAP tend to
441  either require or encourage larger granularities. You can increase
442  this value to prevent system allocation functions to be called so
443  often, especially if they are slow. The value must be at least one
444  page and must be a power of two. Setting to 0 causes initialization
445  to either page size or win32 region size. (Note: In previous
446  versions of malloc, the equivalent of this option was called
447  "TOP_PAD")
448 
449 DEFAULT_TRIM_THRESHOLD default: 2MB
450  Also settable using mallopt(M_TRIM_THRESHOLD, x)
451  The maximum amount of unused top-most memory to keep before
452  releasing via malloc_trim in free(). Automatic trimming is mainly
453  useful in long-lived programs using contiguous MORECORE. Because
454  trimming via sbrk can be slow on some systems, and can sometimes be
455  wasteful (in cases where programs immediately afterward allocate
456  more large chunks) the value should be high enough so that your
457  overall system performance would improve by releasing this much
458  memory. As a rough guide, you might set to a value close to the
459  average size of a process (program) running on your system.
460  Releasing this much memory would allow such a process to run in
461  memory. Generally, it is worth tuning trim thresholds when a
462  program undergoes phases where several large chunks are allocated
463  and released in ways that can reuse each other's storage, perhaps
464  mixed with phases where there are no such chunks at all. The trim
465  value must be greater than page size to have any useful effect. To
466  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
467  some people use of mallocing a huge space and then freeing it at
468  program startup, in an attempt to reserve system memory, doesn't
469  have the intended effect under automatic trimming, since that memory
470  will immediately be returned to the system.
471 
472 DEFAULT_MMAP_THRESHOLD default: 256K
473  Also settable using mallopt(M_MMAP_THRESHOLD, x)
474  The request size threshold for using MMAP to directly service a
475  request. Requests of at least this size that cannot be allocated
476  using already-existing space will be serviced via mmap. (If enough
477  normal freed space already exists it is used instead.) Using mmap
478  segregates relatively large chunks of memory so that they can be
479  individually obtained and released from the host system. A request
480  serviced through mmap is never reused by any other request (at least
481  not directly; the system may just so happen to remap successive
482  requests to the same locations). Segregating space in this way has
483  the benefits that: Mmapped space can always be individually released
484  back to the system, which helps keep the system level memory demands
485  of a long-lived program low. Also, mapped memory doesn't become
486  `locked' between other chunks, as can happen with normally allocated
487  chunks, which means that even trimming via malloc_trim would not
488  release them. However, it has the disadvantage that the space
489  cannot be reclaimed, consolidated, and then used to service later
490  requests, as happens with normal chunks. The advantages of mmap
491  nearly always outweigh disadvantages for "large" chunks, but the
492  value of "large" may vary across systems. The default is an
493  empirically derived value that works well in most systems. You can
494  disable mmap by setting to MAX_SIZE_T.
495 
496 */
497 
498 #ifndef WIN32
499 #ifdef _WIN32
500 #define WIN32 1
501 #endif /* _WIN32 */
502 #endif /* WIN32 */
503 #ifdef WIN32
504 #define WIN32_LEAN_AND_MEAN
505 #include <windows.h>
506 #define HAVE_MMAP 1
507 #define HAVE_MORECORE 0
508 #define LACKS_UNISTD_H
509 #define LACKS_SYS_PARAM_H
510 #define LACKS_SYS_MMAN_H
511 #define LACKS_STRING_H
512 #define LACKS_STRINGS_H
513 #define LACKS_SYS_TYPES_H
514 #define LACKS_ERRNO_H
515 #define LACKS_FCNTL_H
516 #define MALLOC_FAILURE_ACTION
517 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
518 #endif /* WIN32 */
519 
520 #if defined(DARWIN) || defined(_DARWIN)
521 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
522 #ifndef HAVE_MORECORE
523 #define HAVE_MORECORE 0
524 #define HAVE_MMAP 1
525 #endif /* HAVE_MORECORE */
526 #endif /* DARWIN */
527 
528 #ifndef LACKS_SYS_TYPES_H
529 #include <sys/types.h> /* For size_t */
530 #endif /* LACKS_SYS_TYPES_H */
531 
532 /* The maximum possible size_t value has all bits set */
533 #define MAX_SIZE_T (~(size_t)0)
534 
535 #ifndef ONLY_MSPACES
536 #define ONLY_MSPACES 0
537 #endif /* ONLY_MSPACES */
538 #ifndef MSPACES
539 #if ONLY_MSPACES
540 #define MSPACES 1
541 #else /* ONLY_MSPACES */
542 #define MSPACES 0
543 #endif /* ONLY_MSPACES */
544 #endif /* MSPACES */
545 #ifndef MALLOC_ALIGNMENT
546 #define MALLOC_ALIGNMENT ((size_t)8U)
547 #endif /* MALLOC_ALIGNMENT */
548 #ifndef FOOTERS
549 #define FOOTERS 0
550 #endif /* FOOTERS */
551 #ifndef ABORT
552 #define ABORT abort()
553 #endif /* ABORT */
554 #ifndef ABORT_ON_ASSERT_FAILURE
555 #define ABORT_ON_ASSERT_FAILURE 1
556 #endif /* ABORT_ON_ASSERT_FAILURE */
557 #ifndef PROCEED_ON_ERROR
558 #define PROCEED_ON_ERROR 0
559 #endif /* PROCEED_ON_ERROR */
560 #ifndef USE_LOCKS
561 #define USE_LOCKS 0
562 #endif /* USE_LOCKS */
563 #ifndef INSECURE
564 #define INSECURE 0
565 #endif /* INSECURE */
566 #ifndef HAVE_MMAP
567 #define HAVE_MMAP 1
568 #endif /* HAVE_MMAP */
569 #ifndef MMAP_CLEARS
570 #define MMAP_CLEARS 1
571 #endif /* MMAP_CLEARS */
572 #ifndef HAVE_MREMAP
573 #ifdef linux
574 #define HAVE_MREMAP 1
575 #else /* linux */
576 #define HAVE_MREMAP 0
577 #endif /* linux */
578 #endif /* HAVE_MREMAP */
579 #ifndef MALLOC_FAILURE_ACTION
580 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
581 #endif /* MALLOC_FAILURE_ACTION */
582 #ifndef HAVE_MORECORE
583 #if ONLY_MSPACES
584 #define HAVE_MORECORE 0
585 #else /* ONLY_MSPACES */
586 #define HAVE_MORECORE 1
587 #endif /* ONLY_MSPACES */
588 #endif /* HAVE_MORECORE */
589 #if !HAVE_MORECORE
590 #define MORECORE_CONTIGUOUS 0
591 #else /* !HAVE_MORECORE */
592 #ifndef MORECORE
593 #define MORECORE sbrk
594 #endif /* MORECORE */
595 #ifndef MORECORE_CONTIGUOUS
596 #define MORECORE_CONTIGUOUS 1
597 #endif /* MORECORE_CONTIGUOUS */
598 #endif /* HAVE_MORECORE */
599 #ifndef DEFAULT_GRANULARITY
600 #if MORECORE_CONTIGUOUS
601 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
602 #else /* MORECORE_CONTIGUOUS */
603 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
604 #endif /* MORECORE_CONTIGUOUS */
605 #endif /* DEFAULT_GRANULARITY */
606 #ifndef DEFAULT_TRIM_THRESHOLD
607 #ifndef MORECORE_CANNOT_TRIM
608 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
609 #else /* MORECORE_CANNOT_TRIM */
610 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
611 #endif /* MORECORE_CANNOT_TRIM */
612 #endif /* DEFAULT_TRIM_THRESHOLD */
613 #ifndef DEFAULT_MMAP_THRESHOLD
614 #if HAVE_MMAP
615 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
616 #else /* HAVE_MMAP */
617 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
618 #endif /* HAVE_MMAP */
619 #endif /* DEFAULT_MMAP_THRESHOLD */
620 #ifndef USE_BUILTIN_FFS
621 #define USE_BUILTIN_FFS 0
622 #endif /* USE_BUILTIN_FFS */
623 #ifndef USE_DEV_RANDOM
624 #define USE_DEV_RANDOM 0
625 #endif /* USE_DEV_RANDOM */
626 #ifndef NO_MALLINFO
627 #define NO_MALLINFO 0
628 #endif /* NO_MALLINFO */
629 #ifndef MALLINFO_FIELD_TYPE
630 #define MALLINFO_FIELD_TYPE size_t
631 #endif /* MALLINFO_FIELD_TYPE */
632 
633 #define memset SDL_memset
634 #define memcpy SDL_memcpy
635 #define malloc SDL_malloc
636 #define calloc SDL_calloc
637 #define realloc SDL_realloc
638 #define free SDL_free
639 
640 /*
641  mallopt tuning options. SVID/XPG defines four standard parameter
642  numbers for mallopt, normally defined in malloc.h. None of these
643  are used in this malloc, so setting them has no effect. But this
644  malloc does support the following options.
645 */
646 
647 #define M_TRIM_THRESHOLD (-1)
648 #define M_GRANULARITY (-2)
649 #define M_MMAP_THRESHOLD (-3)
650 
651 /* ------------------------ Mallinfo declarations ------------------------ */
652 
653 #if !NO_MALLINFO
654 /*
655  This version of malloc supports the standard SVID/XPG mallinfo
656  routine that returns a struct containing usage properties and
657  statistics. It should work on any system that has a
658  /usr/include/malloc.h defining struct mallinfo. The main
659  declaration needed is the mallinfo struct that is returned (by-copy)
660  by mallinfo(). The malloinfo struct contains a bunch of fields that
661  are not even meaningful in this version of malloc. These fields are
662  are instead filled by mallinfo() with other numbers that might be of
663  interest.
664 
665  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
666  /usr/include/malloc.h file that includes a declaration of struct
667  mallinfo. If so, it is included; else a compliant version is
668  declared below. These must be precisely the same for mallinfo() to
669  work. The original SVID version of this struct, defined on most
670  systems with mallinfo, declares all fields as ints. But some others
671  define as unsigned long. If your system defines the fields using a
672  type of different width than listed here, you MUST #include your
673  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
674 */
675 
676 /* #define HAVE_USR_INCLUDE_MALLOC_H */
677 
678 #ifdef HAVE_USR_INCLUDE_MALLOC_H
679 #include "/usr/include/malloc.h"
680 #else /* HAVE_USR_INCLUDE_MALLOC_H */
681 
682 struct mallinfo
683 {
684  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
685  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
686  MALLINFO_FIELD_TYPE smblks; /* always 0 */
687  MALLINFO_FIELD_TYPE hblks; /* always 0 */
688  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
689  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
690  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
691  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
692  MALLINFO_FIELD_TYPE fordblks; /* total free space */
693  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
694 };
695 
696 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
697 #endif /* NO_MALLINFO */
698 
699 #ifdef __cplusplus
700 extern "C"
701 {
702 #endif /* __cplusplus */
703 
704 #if !ONLY_MSPACES
705 
706 /* ------------------- Declarations of public routines ------------------- */
707 
708 #ifndef USE_DL_PREFIX
709 #define dlcalloc calloc
710 #define dlfree free
711 #define dlmalloc malloc
712 #define dlmemalign memalign
713 #define dlrealloc realloc
714 #define dlvalloc valloc
715 #define dlpvalloc pvalloc
716 #define dlmallinfo mallinfo
717 #define dlmallopt mallopt
718 #define dlmalloc_trim malloc_trim
719 #define dlmalloc_stats malloc_stats
720 #define dlmalloc_usable_size malloc_usable_size
721 #define dlmalloc_footprint malloc_footprint
722 #define dlmalloc_max_footprint malloc_max_footprint
723 #define dlindependent_calloc independent_calloc
724 #define dlindependent_comalloc independent_comalloc
725 #endif /* USE_DL_PREFIX */
726 
727 
728 /*
729  malloc(size_t n)
730  Returns a pointer to a newly allocated chunk of at least n bytes, or
731  null if no space is available, in which case errno is set to ENOMEM
732  on ANSI C systems.
733 
734  If n is zero, malloc returns a minimum-sized chunk. (The minimum
735  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
736  systems.) Note that size_t is an unsigned type, so calls with
737  arguments that would be negative if signed are interpreted as
738  requests for huge amounts of space, which will often fail. The
739  maximum supported value of n differs across systems, but is in all
740  cases less than the maximum representable value of a size_t.
741 */
742  void *dlmalloc(size_t);
743 
744 /*
745  free(void* p)
746  Releases the chunk of memory pointed to by p, that had been previously
747  allocated using malloc or a related routine such as realloc.
748  It has no effect if p is null. If p was not malloced or already
749  freed, free(p) will by default cause the current program to abort.
750 */
751  void dlfree(void *);
752 
753 /*
754  calloc(size_t n_elements, size_t element_size);
755  Returns a pointer to n_elements * element_size bytes, with all locations
756  set to zero.
757 */
758  void *dlcalloc(size_t, size_t);
759 
760 /*
761  realloc(void* p, size_t n)
762  Returns a pointer to a chunk of size n that contains the same data
763  as does chunk p up to the minimum of (n, p's size) bytes, or null
764  if no space is available.
765 
766  The returned pointer may or may not be the same as p. The algorithm
767  prefers extending p in most cases when possible, otherwise it
768  employs the equivalent of a malloc-copy-free sequence.
769 
770  If p is null, realloc is equivalent to malloc.
771 
772  If space is not available, realloc returns null, errno is set (if on
773  ANSI) and p is NOT freed.
774 
775  if n is for fewer bytes than already held by p, the newly unused
776  space is lopped off and freed if possible. realloc with a size
777  argument of zero (re)allocates a minimum-sized chunk.
778 
779  The old unix realloc convention of allowing the last-free'd chunk
780  to be used as an argument to realloc is not supported.
781 */
782 
783  void *dlrealloc(void *, size_t);
784 
785 /*
786  memalign(size_t alignment, size_t n);
787  Returns a pointer to a newly allocated chunk of n bytes, aligned
788  in accord with the alignment argument.
789 
790  The alignment argument should be a power of two. If the argument is
791  not a power of two, the nearest greater power is used.
792  8-byte alignment is guaranteed by normal malloc calls, so don't
793  bother calling memalign with an argument of 8 or less.
794 
795  Overreliance on memalign is a sure way to fragment space.
796 */
797  void *dlmemalign(size_t, size_t);
798 
799 /*
800  valloc(size_t n);
801  Equivalent to memalign(pagesize, n), where pagesize is the page
802  size of the system. If the pagesize is unknown, 4096 is used.
803 */
804  void *dlvalloc(size_t);
805 
806 /*
807  mallopt(int parameter_number, int parameter_value)
808  Sets tunable parameters The format is to provide a
809  (parameter-number, parameter-value) pair. mallopt then sets the
810  corresponding parameter to the argument value if it can (i.e., so
811  long as the value is meaningful), and returns 1 if successful else
812  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
813  normally defined in malloc.h. None of these are use in this malloc,
814  so setting them has no effect. But this malloc also supports other
815  options in mallopt. See below for details. Briefly, supported
816  parameters are as follows (listed defaults are for "typical"
817  configurations).
818 
819  Symbol param # default allowed param values
820  M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
821  M_GRANULARITY -2 page size any power of 2 >= page size
822  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
823 */
824  int dlmallopt(int, int);
825 
826 /*
827  malloc_footprint();
828  Returns the number of bytes obtained from the system. The total
829  number of bytes allocated by malloc, realloc etc., is less than this
830  value. Unlike mallinfo, this function returns only a precomputed
831  result, so can be called frequently to monitor memory consumption.
832  Even if locks are otherwise defined, this function does not use them,
833  so results might not be up to date.
834 */
835  size_t dlmalloc_footprint(void);
836 
837 /*
838  malloc_max_footprint();
839  Returns the maximum number of bytes obtained from the system. This
840  value will be greater than current footprint if deallocated space
841  has been reclaimed by the system. The peak number of bytes allocated
842  by malloc, realloc etc., is less than this value. Unlike mallinfo,
843  this function returns only a precomputed result, so can be called
844  frequently to monitor memory consumption. Even if locks are
845  otherwise defined, this function does not use them, so results might
846  not be up to date.
847 */
848  size_t dlmalloc_max_footprint(void);
849 
850 #if !NO_MALLINFO
851 /*
852  mallinfo()
853  Returns (by copy) a struct containing various summary statistics:
854 
855  arena: current total non-mmapped bytes allocated from system
856  ordblks: the number of free chunks
857  smblks: always zero.
858  hblks: current number of mmapped regions
859  hblkhd: total bytes held in mmapped regions
860  usmblks: the maximum total allocated space. This will be greater
861  than current total if trimming has occurred.
862  fsmblks: always zero
863  uordblks: current total allocated space (normal or mmapped)
864  fordblks: total free space
865  keepcost: the maximum number of bytes that could ideally be released
866  back to system via malloc_trim. ("ideally" means that
867  it ignores page restrictions etc.)
868 
869  Because these fields are ints, but internal bookkeeping may
870  be kept as longs, the reported values may wrap around zero and
871  thus be inaccurate.
872 */
873  struct mallinfo dlmallinfo(void);
874 #endif /* NO_MALLINFO */
875 
876 /*
877  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
878 
879  independent_calloc is similar to calloc, but instead of returning a
880  single cleared space, it returns an array of pointers to n_elements
881  independent elements that can hold contents of size elem_size, each
882  of which starts out cleared, and can be independently freed,
883  realloc'ed etc. The elements are guaranteed to be adjacently
884  allocated (this is not guaranteed to occur with multiple callocs or
885  mallocs), which may also improve cache locality in some
886  applications.
887 
888  The "chunks" argument is optional (i.e., may be null, which is
889  probably the most typical usage). If it is null, the returned array
890  is itself dynamically allocated and should also be freed when it is
891  no longer needed. Otherwise, the chunks array must be of at least
892  n_elements in length. It is filled in with the pointers to the
893  chunks.
894 
895  In either case, independent_calloc returns this pointer array, or
896  null if the allocation failed. If n_elements is zero and "chunks"
897  is null, it returns a chunk representing an array with zero elements
898  (which should be freed if not wanted).
899 
900  Each element must be individually freed when it is no longer
901  needed. If you'd like to instead be able to free all at once, you
902  should instead use regular calloc and assign pointers into this
903  space to represent elements. (In this case though, you cannot
904  independently free elements.)
905 
906  independent_calloc simplifies and speeds up implementations of many
907  kinds of pools. It may also be useful when constructing large data
908  structures that initially have a fixed number of fixed-sized nodes,
909  but the number is not known at compile time, and some of the nodes
910  may later need to be freed. For example:
911 
912  struct Node { int item; struct Node* next; };
913 
914  struct Node* build_list() {
915  struct Node** pool;
916  int n = read_number_of_nodes_needed();
917  if (n <= 0) return 0;
918  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
919  if (pool == 0) die();
920  // organize into a linked list...
921  struct Node* first = pool[0];
922  for (i = 0; i < n-1; ++i)
923  pool[i]->next = pool[i+1];
924  free(pool); // Can now free the array (or not, if it is needed later)
925  return first;
926  }
927 */
928  void **dlindependent_calloc(size_t, size_t, void **);
929 
930 /*
931  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
932 
933  independent_comalloc allocates, all at once, a set of n_elements
934  chunks with sizes indicated in the "sizes" array. It returns
935  an array of pointers to these elements, each of which can be
936  independently freed, realloc'ed etc. The elements are guaranteed to
937  be adjacently allocated (this is not guaranteed to occur with
938  multiple callocs or mallocs), which may also improve cache locality
939  in some applications.
940 
941  The "chunks" argument is optional (i.e., may be null). If it is null
942  the returned array is itself dynamically allocated and should also
943  be freed when it is no longer needed. Otherwise, the chunks array
944  must be of at least n_elements in length. It is filled in with the
945  pointers to the chunks.
946 
947  In either case, independent_comalloc returns this pointer array, or
948  null if the allocation failed. If n_elements is zero and chunks is
949  null, it returns a chunk representing an array with zero elements
950  (which should be freed if not wanted).
951 
952  Each element must be individually freed when it is no longer
953  needed. If you'd like to instead be able to free all at once, you
954  should instead use a single regular malloc, and assign pointers at
955  particular offsets in the aggregate space. (In this case though, you
956  cannot independently free elements.)
957 
958  independent_comallac differs from independent_calloc in that each
959  element may have a different size, and also that it does not
960  automatically clear elements.
961 
962  independent_comalloc can be used to speed up allocation in cases
963  where several structs or objects must always be allocated at the
964  same time. For example:
965 
966  struct Head { ... }
967  struct Foot { ... }
968 
969  void send_message(char* msg) {
970  int msglen = strlen(msg);
971  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
972  void* chunks[3];
973  if (independent_comalloc(3, sizes, chunks) == 0)
974  die();
975  struct Head* head = (struct Head*)(chunks[0]);
976  char* body = (char*)(chunks[1]);
977  struct Foot* foot = (struct Foot*)(chunks[2]);
978  // ...
979  }
980 
981  In general though, independent_comalloc is worth using only for
982  larger values of n_elements. For small values, you probably won't
983  detect enough difference from series of malloc calls to bother.
984 
985  Overuse of independent_comalloc can increase overall memory usage,
986  since it cannot reuse existing noncontiguous small chunks that
987  might be available for some of the elements.
988 */
989  void **dlindependent_comalloc(size_t, size_t *, void **);
990 
991 
992 /*
993  pvalloc(size_t n);
994  Equivalent to valloc(minimum-page-that-holds(n)), that is,
995  round up n to nearest pagesize.
996  */
997  void *dlpvalloc(size_t);
998 
999 /*
1000  malloc_trim(size_t pad);
1001 
1002  If possible, gives memory back to the system (via negative arguments
1003  to sbrk) if there is unused memory at the `high' end of the malloc
1004  pool or in unused MMAP segments. You can call this after freeing
1005  large blocks of memory to potentially reduce the system-level memory
1006  requirements of a program. However, it cannot guarantee to reduce
1007  memory. Under some allocation patterns, some large free blocks of
1008  memory will be locked between two used chunks, so they cannot be
1009  given back to the system.
1010 
1011  The `pad' argument to malloc_trim represents the amount of free
1012  trailing space to leave untrimmed. If this argument is zero, only
1013  the minimum amount of memory to maintain internal data structures
1014  will be left. Non-zero arguments can be supplied to maintain enough
1015  trailing space to service future expected allocations without having
1016  to re-obtain memory from the system.
1017 
1018  Malloc_trim returns 1 if it actually released any memory, else 0.
1019 */
1020  int dlmalloc_trim(size_t);
1021 
1022 /*
1023  malloc_usable_size(void* p);
1024 
1025  Returns the number of bytes you can actually use in
1026  an allocated chunk, which may be more than you requested (although
1027  often not) due to alignment and minimum size constraints.
1028  You can use this many bytes without worrying about
1029  overwriting other allocated objects. This is not a particularly great
1030  programming practice. malloc_usable_size can be more useful in
1031  debugging and assertions, for example:
1032 
1033  p = malloc(n);
1034  assert(malloc_usable_size(p) >= 256);
1035 */
1036  size_t dlmalloc_usable_size(void *);
1037 
1038 /*
1039  malloc_stats();
1040  Prints on stderr the amount of space obtained from the system (both
1041  via sbrk and mmap), the maximum amount (which may be more than
1042  current if malloc_trim and/or munmap got called), and the current
1043  number of bytes allocated via malloc (or realloc, etc) but not yet
1044  freed. Note that this is the number of bytes allocated, not the
1045  number requested. It will be larger than the number requested
1046  because of alignment and bookkeeping overhead. Because it includes
1047  alignment wastage as being in use, this figure may be greater than
1048  zero even when no user-level chunks are allocated.
1049 
1050  The reported current and maximum system memory can be inaccurate if
1051  a program makes other calls to system memory allocation functions
1052  (normally sbrk) outside of malloc.
1053 
1054  malloc_stats prints only the most commonly interesting statistics.
1055  More information can be obtained by calling mallinfo.
1056 */
1057  void dlmalloc_stats(void);
1058 
1059 #endif /* ONLY_MSPACES */
1060 
1061 #if MSPACES
1062 
1063 /*
1064  mspace is an opaque type representing an independent
1065  region of space that supports mspace_malloc, etc.
1066 */
1067  typedef void *mspace;
1068 
1069 /*
1070  create_mspace creates and returns a new independent space with the
1071  given initial capacity, or, if 0, the default granularity size. It
1072  returns null if there is no system memory available to create the
1073  space. If argument locked is non-zero, the space uses a separate
1074  lock to control access. The capacity of the space will grow
1075  dynamically as needed to service mspace_malloc requests. You can
1076  control the sizes of incremental increases of this space by
1077  compiling with a different DEFAULT_GRANULARITY or dynamically
1078  setting with mallopt(M_GRANULARITY, value).
1079 */
1080  mspace create_mspace(size_t capacity, int locked);
1081 
1082 /*
1083  destroy_mspace destroys the given space, and attempts to return all
1084  of its memory back to the system, returning the total number of
1085  bytes freed. After destruction, the results of access to all memory
1086  used by the space become undefined.
1087 */
1088  size_t destroy_mspace(mspace msp);
1089 
1090 /*
1091  create_mspace_with_base uses the memory supplied as the initial base
1092  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1093  space is used for bookkeeping, so the capacity must be at least this
1094  large. (Otherwise 0 is returned.) When this initial space is
1095  exhausted, additional memory will be obtained from the system.
1096  Destroying this space will deallocate all additionally allocated
1097  space (if possible) but not the initial base.
1098 */
1099  mspace create_mspace_with_base(void *base, size_t capacity, int locked);
1100 
1101 /*
1102  mspace_malloc behaves as malloc, but operates within
1103  the given space.
1104 */
1105  void *mspace_malloc(mspace msp, size_t bytes);
1106 
1107 /*
1108  mspace_free behaves as free, but operates within
1109  the given space.
1110 
1111  If compiled with FOOTERS==1, mspace_free is not actually needed.
1112  free may be called instead of mspace_free because freed chunks from
1113  any space are handled by their originating spaces.
1114 */
1115  void mspace_free(mspace msp, void *mem);
1116 
1117 /*
1118  mspace_realloc behaves as realloc, but operates within
1119  the given space.
1120 
1121  If compiled with FOOTERS==1, mspace_realloc is not actually
1122  needed. realloc may be called instead of mspace_realloc because
1123  realloced chunks from any space are handled by their originating
1124  spaces.
1125 */
1126  void *mspace_realloc(mspace msp, void *mem, size_t newsize);
1127 
1128 /*
1129  mspace_calloc behaves as calloc, but operates within
1130  the given space.
1131 */
1132  void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1133 
1134 /*
1135  mspace_memalign behaves as memalign, but operates within
1136  the given space.
1137 */
1138  void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1139 
1140 /*
1141  mspace_independent_calloc behaves as independent_calloc, but
1142  operates within the given space.
1143 */
1144  void **mspace_independent_calloc(mspace msp, size_t n_elements,
1145  size_t elem_size, void *chunks[]);
1146 
1147 /*
1148  mspace_independent_comalloc behaves as independent_comalloc, but
1149  operates within the given space.
1150 */
1151  void **mspace_independent_comalloc(mspace msp, size_t n_elements,
1152  size_t sizes[], void *chunks[]);
1153 
1154 /*
1155  mspace_footprint() returns the number of bytes obtained from the
1156  system for this space.
1157 */
1158  size_t mspace_footprint(mspace msp);
1159 
1160 /*
1161  mspace_max_footprint() returns the peak number of bytes obtained from the
1162  system for this space.
1163 */
1164  size_t mspace_max_footprint(mspace msp);
1165 
1166 
1167 #if !NO_MALLINFO
1168 /*
1169  mspace_mallinfo behaves as mallinfo, but reports properties of
1170  the given space.
1171 */
1172  struct mallinfo mspace_mallinfo(mspace msp);
1173 #endif /* NO_MALLINFO */
1174 
1175 /*
1176  mspace_malloc_stats behaves as malloc_stats, but reports
1177  properties of the given space.
1178 */
1179  void mspace_malloc_stats(mspace msp);
1180 
1181 /*
1182  mspace_trim behaves as malloc_trim, but
1183  operates within the given space.
1184 */
1185  int mspace_trim(mspace msp, size_t pad);
1186 
1187 /*
1188  An alias for mallopt.
1189 */
1190  int mspace_mallopt(int, int);
1191 
1192 #endif /* MSPACES */
1193 
1194 #ifdef __cplusplus
1195 }; /* end of extern "C" */
1196 #endif /* __cplusplus */
1197 
1198 /*
1199  ========================================================================
1200  To make a fully customizable malloc.h header file, cut everything
1201  above this line, put into file malloc.h, edit to suit, and #include it
1202  on the next line, as well as in programs that use this malloc.
1203  ========================================================================
1204 */
1205 
1206 /* #include "malloc.h" */
1207 
1208 /*------------------------------ internal #includes ---------------------- */
1209 
1210 #ifdef _MSC_VER
1211 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1212 #endif /* _MSC_VER */
1213 
1214 #ifndef LACKS_STDIO_H
1215 #include <stdio.h> /* for printing in malloc_stats */
1216 #endif
1217 
1218 #ifndef LACKS_ERRNO_H
1219 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
1220 #endif /* LACKS_ERRNO_H */
1221 #if FOOTERS
1222 #include <time.h> /* for magic initialization */
1223 #endif /* FOOTERS */
1224 #ifndef LACKS_STDLIB_H
1225 #include <stdlib.h> /* for abort() */
1226 #endif /* LACKS_STDLIB_H */
1227 #ifdef DEBUG
1228 #if ABORT_ON_ASSERT_FAILURE
1229 #define assert(x) if(!(x)) ABORT
1230 #else /* ABORT_ON_ASSERT_FAILURE */
1231 #include <assert.h>
1232 #endif /* ABORT_ON_ASSERT_FAILURE */
1233 #else /* DEBUG */
1234 #define assert(x)
1235 #endif /* DEBUG */
1236 #ifndef LACKS_STRING_H
1237 #include <string.h> /* for memset etc */
1238 #endif /* LACKS_STRING_H */
1239 #if USE_BUILTIN_FFS
1240 #ifndef LACKS_STRINGS_H
1241 #include <strings.h> /* for ffs */
1242 #endif /* LACKS_STRINGS_H */
1243 #endif /* USE_BUILTIN_FFS */
1244 #if HAVE_MMAP
1245 #ifndef LACKS_SYS_MMAN_H
1246 #include <sys/mman.h> /* for mmap */
1247 #endif /* LACKS_SYS_MMAN_H */
1248 #ifndef LACKS_FCNTL_H
1249 #include <fcntl.h>
1250 #endif /* LACKS_FCNTL_H */
1251 #endif /* HAVE_MMAP */
1252 #if HAVE_MORECORE
1253 #ifndef LACKS_UNISTD_H
1254 #include <unistd.h> /* for sbrk */
1255 #else /* LACKS_UNISTD_H */
1256 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1257 extern void *sbrk(ptrdiff_t);
1258 #endif /* FreeBSD etc */
1259 #endif /* LACKS_UNISTD_H */
1260 #endif /* HAVE_MMAP */
1261 
1262 #ifndef WIN32
1263 #ifndef malloc_getpagesize
1264 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1265 # ifndef _SC_PAGE_SIZE
1266 # define _SC_PAGE_SIZE _SC_PAGESIZE
1267 # endif
1268 # endif
1269 # ifdef _SC_PAGE_SIZE
1270 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1271 # else
1272 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1273 extern size_t getpagesize();
1274 # define malloc_getpagesize getpagesize()
1275 # else
1276 # ifdef WIN32 /* use supplied emulation of getpagesize */
1277 # define malloc_getpagesize getpagesize()
1278 # else
1279 # ifndef LACKS_SYS_PARAM_H
1280 # include <sys/param.h>
1281 # endif
1282 # ifdef EXEC_PAGESIZE
1283 # define malloc_getpagesize EXEC_PAGESIZE
1284 # else
1285 # ifdef NBPG
1286 # ifndef CLSIZE
1287 # define malloc_getpagesize NBPG
1288 # else
1289 # define malloc_getpagesize (NBPG * CLSIZE)
1290 # endif
1291 # else
1292 # ifdef NBPC
1293 # define malloc_getpagesize NBPC
1294 # else
1295 # ifdef PAGESIZE
1296 # define malloc_getpagesize PAGESIZE
1297 # else /* just guess */
1298 # define malloc_getpagesize ((size_t)4096U)
1299 # endif
1300 # endif
1301 # endif
1302 # endif
1303 # endif
1304 # endif
1305 # endif
1306 #endif
1307 #endif
1308 
1309 /* ------------------- size_t and alignment properties -------------------- */
1310 
1311 /* The byte and bit size of a size_t */
1312 #define SIZE_T_SIZE (sizeof(size_t))
1313 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1314 
1315 /* Some constants coerced to size_t */
1316 /* Annoying but necessary to avoid errors on some plaftorms */
1317 #define SIZE_T_ZERO ((size_t)0)
1318 #define SIZE_T_ONE ((size_t)1)
1319 #define SIZE_T_TWO ((size_t)2)
1320 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1321 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1322 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1323 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1324 
1325 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
1326 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1327 
1328 /* True if address a has acceptable alignment */
1329 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1330 
1331 /* the number of bytes to offset an address to align it */
1332 #define align_offset(A)\
1333  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1334  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1335 
1336 /* -------------------------- MMAP preliminaries ------------------------- */
1337 
1338 /*
1339  If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1340  checks to fail so compiler optimizer can delete code rather than
1341  using so many "#if"s.
1342 */
1343 
1344 
1345 /* MORECORE and MMAP must return MFAIL on failure */
1346 #define MFAIL ((void*)(MAX_SIZE_T))
1347 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1348 
1349 #if !HAVE_MMAP
1350 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1351 #define USE_MMAP_BIT (SIZE_T_ZERO)
1352 #define CALL_MMAP(s) MFAIL
1353 #define CALL_MUNMAP(a, s) (-1)
1354 #define DIRECT_MMAP(s) MFAIL
1355 
1356 #else /* HAVE_MMAP */
1357 #define IS_MMAPPED_BIT (SIZE_T_ONE)
1358 #define USE_MMAP_BIT (SIZE_T_ONE)
1359 
1360 #ifndef WIN32
1361 #define CALL_MUNMAP(a, s) munmap((a), (s))
1362 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1363 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1364 #define MAP_ANONYMOUS MAP_ANON
1365 #endif /* MAP_ANON */
1366 #ifdef MAP_ANONYMOUS
1367 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1368 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1369 #else /* MAP_ANONYMOUS */
1370 /*
1371  Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1372  is unlikely to be needed, but is supplied just in case.
1373 */
1374 #define MMAP_FLAGS (MAP_PRIVATE)
1375 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1376 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1377  (dev_zero_fd = open("/dev/zero", O_RDWR), \
1378  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1379  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1380 #endif /* MAP_ANONYMOUS */
1381 
1382 #define DIRECT_MMAP(s) CALL_MMAP(s)
1383 #else /* WIN32 */
1384 
1385 /* Win32 MMAP via VirtualAlloc */
1386 static void *
1387 win32mmap(size_t size)
1388 {
1389  void *ptr =
1390  VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1391  return (ptr != 0) ? ptr : MFAIL;
1392 }
1393 
1394 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1395 static void *
1396 win32direct_mmap(size_t size)
1397 {
1398  void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1399  PAGE_READWRITE);
1400  return (ptr != 0) ? ptr : MFAIL;
1401 }
1402 
1403 /* This function supports releasing coalesed segments */
1404 static int
1405 win32munmap(void *ptr, size_t size)
1406 {
1407  MEMORY_BASIC_INFORMATION minfo;
1408  char *cptr = ptr;
1409  while (size) {
1410  if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1411  return -1;
1412  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1413  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1414  return -1;
1415  if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1416  return -1;
1417  cptr += minfo.RegionSize;
1418  size -= minfo.RegionSize;
1419  }
1420  return 0;
1421 }
1422 
1423 #define CALL_MMAP(s) win32mmap(s)
1424 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1425 #define DIRECT_MMAP(s) win32direct_mmap(s)
1426 #endif /* WIN32 */
1427 #endif /* HAVE_MMAP */
1428 
1429 #if HAVE_MMAP && HAVE_MREMAP
1430 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1431 #else /* HAVE_MMAP && HAVE_MREMAP */
1432 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1433 #endif /* HAVE_MMAP && HAVE_MREMAP */
1434 
1435 #if HAVE_MORECORE
1436 #define CALL_MORECORE(S) MORECORE(S)
1437 #else /* HAVE_MORECORE */
1438 #define CALL_MORECORE(S) MFAIL
1439 #endif /* HAVE_MORECORE */
1440 
1441 /* mstate bit set if continguous morecore disabled or failed */
1442 #define USE_NONCONTIGUOUS_BIT (4U)
1443 
1444 /* segment bit set in create_mspace_with_base */
1445 #define EXTERN_BIT (8U)
1446 
1447 
1448 /* --------------------------- Lock preliminaries ------------------------ */
1449 
1450 #if USE_LOCKS
1451 
1452 /*
1453  When locks are defined, there are up to two global locks:
1454 
1455  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1456  MORECORE. In many cases sys_alloc requires two calls, that should
1457  not be interleaved with calls by other threads. This does not
1458  protect against direct calls to MORECORE by other threads not
1459  using this lock, so there is still code to cope the best we can on
1460  interference.
1461 
1462  * magic_init_mutex ensures that mparams.magic and other
1463  unique mparams values are initialized only once.
1464 */
1465 
1466 #ifndef WIN32
1467 /* By default use posix locks */
1468 #include <pthread.h>
1469 #define MLOCK_T pthread_mutex_t
1470 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1471 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1472 #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1473 
1474 #if HAVE_MORECORE
1475 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1476 #endif /* HAVE_MORECORE */
1477 
1478 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1479 
1480 #else /* WIN32 */
1481 /*
1482  Because lock-protected regions have bounded times, and there
1483  are no recursive lock calls, we can use simple spinlocks.
1484 */
1485 
1486 #define MLOCK_T long
1487 static int
1488 win32_acquire_lock(MLOCK_T * sl)
1489 {
1490  for (;;) {
1491 #ifdef InterlockedCompareExchangePointer
1492  if (!InterlockedCompareExchange(sl, 1, 0))
1493  return 0;
1494 #else /* Use older void* version */
1495  if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
1496  return 0;
1497 #endif /* InterlockedCompareExchangePointer */
1498  Sleep(0);
1499  }
1500 }
1501 
1502 static void
1503 win32_release_lock(MLOCK_T * sl)
1504 {
1505  InterlockedExchange(sl, 0);
1506 }
1507 
1508 #define INITIAL_LOCK(l) *(l)=0
1509 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1510 #define RELEASE_LOCK(l) win32_release_lock(l)
1511 #if HAVE_MORECORE
1512 static MLOCK_T morecore_mutex;
1513 #endif /* HAVE_MORECORE */
1514 static MLOCK_T magic_init_mutex;
1515 #endif /* WIN32 */
1516 
1517 #define USE_LOCK_BIT (2U)
1518 #else /* USE_LOCKS */
1519 #define USE_LOCK_BIT (0U)
1520 #define INITIAL_LOCK(l)
1521 #endif /* USE_LOCKS */
1522 
1523 #if USE_LOCKS && HAVE_MORECORE
1524 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1525 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1526 #else /* USE_LOCKS && HAVE_MORECORE */
1527 #define ACQUIRE_MORECORE_LOCK()
1528 #define RELEASE_MORECORE_LOCK()
1529 #endif /* USE_LOCKS && HAVE_MORECORE */
1530 
1531 #if USE_LOCKS
1532 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1533 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1534 #else /* USE_LOCKS */
1535 #define ACQUIRE_MAGIC_INIT_LOCK()
1536 #define RELEASE_MAGIC_INIT_LOCK()
1537 #endif /* USE_LOCKS */
1538 
1539 
1540 /* ----------------------- Chunk representations ------------------------ */
1541 
1542 /*
1543  (The following includes lightly edited explanations by Colin Plumb.)
1544 
1545  The malloc_chunk declaration below is misleading (but accurate and
1546  necessary). It declares a "view" into memory allowing access to
1547  necessary fields at known offsets from a given base.
1548 
1549  Chunks of memory are maintained using a `boundary tag' method as
1550  originally described by Knuth. (See the paper by Paul Wilson
1551  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1552  techniques.) Sizes of free chunks are stored both in the front of
1553  each chunk and at the end. This makes consolidating fragmented
1554  chunks into bigger chunks fast. The head fields also hold bits
1555  representing whether chunks are free or in use.
1556 
1557  Here are some pictures to make it clearer. They are "exploded" to
1558  show that the state of a chunk can be thought of as extending from
1559  the high 31 bits of the head field of its header through the
1560  prev_foot and PINUSE_BIT bit of the following chunk header.
1561 
1562  A chunk that's in use looks like:
1563 
1564  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1565  | Size of previous chunk (if P = 1) |
1566  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1567  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1568  | Size of this chunk 1| +-+
1569  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1570  | |
1571  +- -+
1572  | |
1573  +- -+
1574  | :
1575  +- size - sizeof(size_t) available payload bytes -+
1576  : |
1577  chunk-> +- -+
1578  | |
1579  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1580  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1581  | Size of next chunk (may or may not be in use) | +-+
1582  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1583 
1584  And if it's free, it looks like this:
1585 
1586  chunk-> +- -+
1587  | User payload (must be in use, or we would have merged!) |
1588  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1589  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1590  | Size of this chunk 0| +-+
1591  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1592  | Next pointer |
1593  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1594  | Prev pointer |
1595  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1596  | :
1597  +- size - sizeof(struct chunk) unused bytes -+
1598  : |
1599  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1600  | Size of this chunk |
1601  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1602  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1603  | Size of next chunk (must be in use, or we would have merged)| +-+
1604  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1605  | :
1606  +- User payload -+
1607  : |
1608  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1609  |0|
1610  +-+
1611  Note that since we always merge adjacent free chunks, the chunks
1612  adjacent to a free chunk must be in use.
1613 
1614  Given a pointer to a chunk (which can be derived trivially from the
1615  payload pointer) we can, in O(1) time, find out whether the adjacent
1616  chunks are free, and if so, unlink them from the lists that they
1617  are on and merge them with the current chunk.
1618 
1619  Chunks always begin on even word boundaries, so the mem portion
1620  (which is returned to the user) is also on an even word boundary, and
1621  thus at least double-word aligned.
1622 
1623  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1624  chunk size (which is always a multiple of two words), is an in-use
1625  bit for the *previous* chunk. If that bit is *clear*, then the
1626  word before the current chunk size contains the previous chunk
1627  size, and can be used to find the front of the previous chunk.
1628  The very first chunk allocated always has this bit set, preventing
1629  access to non-existent (or non-owned) memory. If pinuse is set for
1630  any given chunk, then you CANNOT determine the size of the
1631  previous chunk, and might even get a memory addressing fault when
1632  trying to do so.
1633 
1634  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1635  the chunk size redundantly records whether the current chunk is
1636  inuse. This redundancy enables usage checks within free and realloc,
1637  and reduces indirection when freeing and consolidating chunks.
1638 
1639  Each freshly allocated chunk must have both cinuse and pinuse set.
1640  That is, each allocated chunk borders either a previously allocated
1641  and still in-use chunk, or the base of its memory arena. This is
1642  ensured by making all allocations from the the `lowest' part of any
1643  found chunk. Further, no free chunk physically borders another one,
1644  so each free chunk is known to be preceded and followed by either
1645  inuse chunks or the ends of memory.
1646 
1647  Note that the `foot' of the current chunk is actually represented
1648  as the prev_foot of the NEXT chunk. This makes it easier to
1649  deal with alignments etc but can be very confusing when trying
1650  to extend or adapt this code.
1651 
1652  The exceptions to all this are
1653 
1654  1. The special chunk `top' is the top-most available chunk (i.e.,
1655  the one bordering the end of available memory). It is treated
1656  specially. Top is never included in any bin, is used only if
1657  no other chunk is available, and is released back to the
1658  system if it is very large (see M_TRIM_THRESHOLD). In effect,
1659  the top chunk is treated as larger (and thus less well
1660  fitting) than any other available chunk. The top chunk
1661  doesn't update its trailing size field since there is no next
1662  contiguous chunk that would have to index off it. However,
1663  space is still allocated for it (TOP_FOOT_SIZE) to enable
1664  separation or merging when space is extended.
1665 
1666  3. Chunks allocated via mmap, which have the lowest-order bit
1667  (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1668  PINUSE_BIT in their head fields. Because they are allocated
1669  one-by-one, each must carry its own prev_foot field, which is
1670  also used to hold the offset this chunk has within its mmapped
1671  region, which is needed to preserve alignment. Each mmapped
1672  chunk is trailed by the first two fields of a fake next-chunk
1673  for sake of usage checks.
1674 
1675 */
1676 
1677 struct malloc_chunk
1678 {
1679  size_t prev_foot; /* Size of previous chunk (if free). */
1680  size_t head; /* Size and inuse bits. */
1681  struct malloc_chunk *fd; /* double links -- used only if free. */
1682  struct malloc_chunk *bk;
1683 };
1684 
1685 typedef struct malloc_chunk mchunk;
1686 typedef struct malloc_chunk *mchunkptr;
1687 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1688 typedef size_t bindex_t; /* Described below */
1689 typedef unsigned int binmap_t; /* Described below */
1690 typedef unsigned int flag_t; /* The type of various bit flag sets */
1691 
1692 /* ------------------- Chunks sizes and alignments ----------------------- */
1693 
1694 #define MCHUNK_SIZE (sizeof(mchunk))
1695 
1696 #if FOOTERS
1697 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1698 #else /* FOOTERS */
1699 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
1700 #endif /* FOOTERS */
1701 
1702 /* MMapped chunks need a second word of overhead ... */
1703 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1704 /* ... and additional padding for fake next-chunk at foot */
1705 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1706 
1707 /* The smallest size we can malloc is an aligned minimal chunk */
1708 #define MIN_CHUNK_SIZE\
1709  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1710 
1711 /* conversion from malloc headers to user pointers, and back */
1712 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1713 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1714 /* chunk associated with aligned address A */
1715 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1716 
1717 /* Bounds on request (not chunk) sizes. */
1718 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1719 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1720 
1721 /* pad request bytes into a usable size */
1722 #define pad_request(req) \
1723  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1724 
1725 /* pad request, checking for minimum (but not maximum) */
1726 #define request2size(req) \
1727  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1728 
1729 
1730 /* ------------------ Operations on head and foot fields ----------------- */
1731 
1732 /*
1733  The head field of a chunk is or'ed with PINUSE_BIT when previous
1734  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1735  use. If the chunk was obtained with mmap, the prev_foot field has
1736  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1737  mmapped region to the base of the chunk.
1738 */
1739 
1740 #define PINUSE_BIT (SIZE_T_ONE)
1741 #define CINUSE_BIT (SIZE_T_TWO)
1742 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1743 
1744 /* Head value for fenceposts */
1745 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1746 
1747 /* extraction of fields from head words */
1748 #define cinuse(p) ((p)->head & CINUSE_BIT)
1749 #define pinuse(p) ((p)->head & PINUSE_BIT)
1750 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
1751 
1752 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1753 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1754 
1755 /* Treat space at ptr +/- offset as a chunk */
1756 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1757 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1758 
1759 /* Ptr to next or previous physical malloc_chunk. */
1760 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1761 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1762 
1763 /* extract next chunk's pinuse bit */
1764 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1765 
1766 /* Get/set size at footer */
1767 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1768 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1769 
1770 /* Set size, pinuse bit, and foot */
1771 #define set_size_and_pinuse_of_free_chunk(p, s)\
1772  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1773 
1774 /* Set size, pinuse bit, foot, and clear next pinuse */
1775 #define set_free_with_pinuse(p, s, n)\
1776  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1777 
1778 #define is_mmapped(p)\
1779  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1780 
1781 /* Get the internal overhead associated with chunk p */
1782 #define overhead_for(p)\
1783  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1784 
1785 /* Return true if malloced space is not necessarily cleared */
1786 #if MMAP_CLEARS
1787 #define calloc_must_clear(p) (!is_mmapped(p))
1788 #else /* MMAP_CLEARS */
1789 #define calloc_must_clear(p) (1)
1790 #endif /* MMAP_CLEARS */
1791 
1792 /* ---------------------- Overlaid data structures ----------------------- */
1793 
1794 /*
1795  When chunks are not in use, they are treated as nodes of either
1796  lists or trees.
1797 
1798  "Small" chunks are stored in circular doubly-linked lists, and look
1799  like this:
1800 
1801  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1802  | Size of previous chunk |
1803  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1804  `head:' | Size of chunk, in bytes |P|
1805  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1806  | Forward pointer to next chunk in list |
1807  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1808  | Back pointer to previous chunk in list |
1809  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1810  | Unused space (may be 0 bytes long) .
1811  . .
1812  . |
1813 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1814  `foot:' | Size of chunk, in bytes |
1815  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1816 
1817  Larger chunks are kept in a form of bitwise digital trees (aka
1818  tries) keyed on chunksizes. Because malloc_tree_chunks are only for
1819  free chunks greater than 256 bytes, their size doesn't impose any
1820  constraints on user chunk sizes. Each node looks like:
1821 
1822  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1823  | Size of previous chunk |
1824  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1825  `head:' | Size of chunk, in bytes |P|
1826  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1827  | Forward pointer to next chunk of same size |
1828  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829  | Back pointer to previous chunk of same size |
1830  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1831  | Pointer to left child (child[0]) |
1832  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1833  | Pointer to right child (child[1]) |
1834  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1835  | Pointer to parent |
1836  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1837  | bin index of this chunk |
1838  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1839  | Unused space .
1840  . |
1841 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1842  `foot:' | Size of chunk, in bytes |
1843  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1844 
1845  Each tree holding treenodes is a tree of unique chunk sizes. Chunks
1846  of the same size are arranged in a circularly-linked list, with only
1847  the oldest chunk (the next to be used, in our FIFO ordering)
1848  actually in the tree. (Tree members are distinguished by a non-null
1849  parent pointer.) If a chunk with the same size an an existing node
1850  is inserted, it is linked off the existing node using pointers that
1851  work in the same way as fd/bk pointers of small chunks.
1852 
1853  Each tree contains a power of 2 sized range of chunk sizes (the
1854  smallest is 0x100 <= x < 0x180), which is is divided in half at each
1855  tree level, with the chunks in the smaller half of the range (0x100
1856  <= x < 0x140 for the top nose) in the left subtree and the larger
1857  half (0x140 <= x < 0x180) in the right subtree. This is, of course,
1858  done by inspecting individual bits.
1859 
1860  Using these rules, each node's left subtree contains all smaller
1861  sizes than its right subtree. However, the node at the root of each
1862  subtree has no particular ordering relationship to either. (The
1863  dividing line between the subtree sizes is based on trie relation.)
1864  If we remove the last chunk of a given size from the interior of the
1865  tree, we need to replace it with a leaf node. The tree ordering
1866  rules permit a node to be replaced by any leaf below it.
1867 
1868  The smallest chunk in a tree (a common operation in a best-fit
1869  allocator) can be found by walking a path to the leftmost leaf in
1870  the tree. Unlike a usual binary tree, where we follow left child
1871  pointers until we reach a null, here we follow the right child
1872  pointer any time the left one is null, until we reach a leaf with
1873  both child pointers null. The smallest chunk in the tree will be
1874  somewhere along that path.
1875 
1876  The worst case number of steps to add, find, or remove a node is
1877  bounded by the number of bits differentiating chunks within
1878  bins. Under current bin calculations, this ranges from 6 up to 21
1879  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1880  is of course much better.
1881 */
1882 
1883 struct malloc_tree_chunk
1884 {
1885  /* The first four fields must be compatible with malloc_chunk */
1886  size_t prev_foot;
1887  size_t head;
1888  struct malloc_tree_chunk *fd;
1889  struct malloc_tree_chunk *bk;
1890 
1891  struct malloc_tree_chunk *child[2];
1892  struct malloc_tree_chunk *parent;
1893  bindex_t index;
1894 };
1895 
1896 typedef struct malloc_tree_chunk tchunk;
1897 typedef struct malloc_tree_chunk *tchunkptr;
1898 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1899 
1900 /* A little helper macro for trees */
1901 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1902 
1903 /* ----------------------------- Segments -------------------------------- */
1904 
1905 /*
1906  Each malloc space may include non-contiguous segments, held in a
1907  list headed by an embedded malloc_segment record representing the
1908  top-most space. Segments also include flags holding properties of
1909  the space. Large chunks that are directly allocated by mmap are not
1910  included in this list. They are instead independently created and
1911  destroyed without otherwise keeping track of them.
1912 
1913  Segment management mainly comes into play for spaces allocated by
1914  MMAP. Any call to MMAP might or might not return memory that is
1915  adjacent to an existing segment. MORECORE normally contiguously
1916  extends the current space, so this space is almost always adjacent,
1917  which is simpler and faster to deal with. (This is why MORECORE is
1918  used preferentially to MMAP when both are available -- see
1919  sys_alloc.) When allocating using MMAP, we don't use any of the
1920  hinting mechanisms (inconsistently) supported in various
1921  implementations of unix mmap, or distinguish reserving from
1922  committing memory. Instead, we just ask for space, and exploit
1923  contiguity when we get it. It is probably possible to do
1924  better than this on some systems, but no general scheme seems
1925  to be significantly better.
1926 
1927  Management entails a simpler variant of the consolidation scheme
1928  used for chunks to reduce fragmentation -- new adjacent memory is
1929  normally prepended or appended to an existing segment. However,
1930  there are limitations compared to chunk consolidation that mostly
1931  reflect the fact that segment processing is relatively infrequent
1932  (occurring only when getting memory from system) and that we
1933  don't expect to have huge numbers of segments:
1934 
1935  * Segments are not indexed, so traversal requires linear scans. (It
1936  would be possible to index these, but is not worth the extra
1937  overhead and complexity for most programs on most platforms.)
1938  * New segments are only appended to old ones when holding top-most
1939  memory; if they cannot be prepended to others, they are held in
1940  different segments.
1941 
1942  Except for the top-most segment of an mstate, each segment record
1943  is kept at the tail of its segment. Segments are added by pushing
1944  segment records onto the list headed by &mstate.seg for the
1945  containing mstate.
1946 
1947  Segment flags control allocation/merge/deallocation policies:
1948  * If EXTERN_BIT set, then we did not allocate this segment,
1949  and so should not try to deallocate or merge with others.
1950  (This currently holds only for the initial segment passed
1951  into create_mspace_with_base.)
1952  * If IS_MMAPPED_BIT set, the segment may be merged with
1953  other surrounding mmapped segments and trimmed/de-allocated
1954  using munmap.
1955  * If neither bit is set, then the segment was obtained using
1956  MORECORE so can be merged with surrounding MORECORE'd segments
1957  and deallocated/trimmed using MORECORE with negative arguments.
1958 */
1959 
1960 struct malloc_segment
1961 {
1962  char *base; /* base address */
1963  size_t size; /* allocated size */
1964  struct malloc_segment *next; /* ptr to next segment */
1965  flag_t sflags; /* mmap and extern flag */
1966 };
1967 
1968 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
1969 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1970 
1971 typedef struct malloc_segment msegment;
1972 typedef struct malloc_segment *msegmentptr;
1973 
1974 /* ---------------------------- malloc_state ----------------------------- */
1975 
1976 /*
1977  A malloc_state holds all of the bookkeeping for a space.
1978  The main fields are:
1979 
1980  Top
1981  The topmost chunk of the currently active segment. Its size is
1982  cached in topsize. The actual size of topmost space is
1983  topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1984  fenceposts and segment records if necessary when getting more
1985  space from the system. The size at which to autotrim top is
1986  cached from mparams in trim_check, except that it is disabled if
1987  an autotrim fails.
1988 
1989  Designated victim (dv)
1990  This is the preferred chunk for servicing small requests that
1991  don't have exact fits. It is normally the chunk split off most
1992  recently to service another small request. Its size is cached in
1993  dvsize. The link fields of this chunk are not maintained since it
1994  is not kept in a bin.
1995 
1996  SmallBins
1997  An array of bin headers for free chunks. These bins hold chunks
1998  with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1999  chunks of all the same size, spaced 8 bytes apart. To simplify
2000  use in double-linked lists, each bin header acts as a malloc_chunk
2001  pointing to the real first node, if it exists (else pointing to
2002  itself). This avoids special-casing for headers. But to avoid
2003  waste, we allocate only the fd/bk pointers of bins, and then use
2004  repositioning tricks to treat these as the fields of a chunk.
2005 
2006  TreeBins
2007  Treebins are pointers to the roots of trees holding a range of
2008  sizes. There are 2 equally spaced treebins for each power of two
2009  from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2010  larger.
2011 
2012  Bin maps
2013  There is one bit map for small bins ("smallmap") and one for
2014  treebins ("treemap). Each bin sets its bit when non-empty, and
2015  clears the bit when empty. Bit operations are then used to avoid
2016  bin-by-bin searching -- nearly all "search" is done without ever
2017  looking at bins that won't be selected. The bit maps
2018  conservatively use 32 bits per map word, even if on 64bit system.
2019  For a good description of some of the bit-based techniques used
2020  here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2021  supplement at http://hackersdelight.org/). Many of these are
2022  intended to reduce the branchiness of paths through malloc etc, as
2023  well as to reduce the number of memory locations read or written.
2024 
2025  Segments
2026  A list of segments headed by an embedded malloc_segment record
2027  representing the initial space.
2028 
2029  Address check support
2030  The least_addr field is the least address ever obtained from
2031  MORECORE or MMAP. Attempted frees and reallocs of any address less
2032  than this are trapped (unless INSECURE is defined).
2033 
2034  Magic tag
2035  A cross-check field that should always hold same value as mparams.magic.
2036 
2037  Flags
2038  Bits recording whether to use MMAP, locks, or contiguous MORECORE
2039 
2040  Statistics
2041  Each space keeps track of current and maximum system memory
2042  obtained via MORECORE or MMAP.
2043 
2044  Locking
2045  If USE_LOCKS is defined, the "mutex" lock is acquired and released
2046  around every public call using this mspace.
2047 */
2048 
2049 /* Bin types, widths and sizes */
2050 #define NSMALLBINS (32U)
2051 #define NTREEBINS (32U)
2052 #define SMALLBIN_SHIFT (3U)
2053 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2054 #define TREEBIN_SHIFT (8U)
2055 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2056 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2057 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2058 
2059 struct malloc_state
2060 {
2061  binmap_t smallmap;
2062  binmap_t treemap;
2063  size_t dvsize;
2064  size_t topsize;
2065  char *least_addr;
2066  mchunkptr dv;
2067  mchunkptr top;
2068  size_t trim_check;
2069  size_t magic;
2070  mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2071  tbinptr treebins[NTREEBINS];
2072  size_t footprint;
2073  size_t max_footprint;
2074  flag_t mflags;
2075 #if USE_LOCKS
2076  MLOCK_T mutex; /* locate lock among fields that rarely change */
2077 #endif /* USE_LOCKS */
2078  msegment seg;
2079 };
2080 
2081 typedef struct malloc_state *mstate;
2082 
2083 /* ------------- Global malloc_state and malloc_params ------------------- */
2084 
2085 /*
2086  malloc_params holds global properties, including those that can be
2087  dynamically set using mallopt. There is a single instance, mparams,
2088  initialized in init_mparams.
2089 */
2090 
2091 struct malloc_params
2092 {
2093  size_t magic;
2094  size_t page_size;
2095  size_t granularity;
2096  size_t mmap_threshold;
2097  size_t trim_threshold;
2098  flag_t default_mflags;
2099 };
2100 
2101 static struct malloc_params mparams;
2102 
2103 /* The global malloc_state used for all non-"mspace" calls */
2104 static struct malloc_state _gm_;
2105 #define gm (&_gm_)
2106 #define is_global(M) ((M) == &_gm_)
2107 #define is_initialized(M) ((M)->top != 0)
2108 
2109 /* -------------------------- system alloc setup ------------------------- */
2110 
2111 /* Operations on mflags */
2112 
2113 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2114 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2115 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2116 
2117 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2118 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2119 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2120 
2121 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2122 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2123 
2124 #define set_lock(M,L)\
2125  ((M)->mflags = (L)?\
2126  ((M)->mflags | USE_LOCK_BIT) :\
2127  ((M)->mflags & ~USE_LOCK_BIT))
2128 
2129 /* page-align a size */
2130 #define page_align(S)\
2131  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2132 
2133 /* granularity-align a size */
2134 #define granularity_align(S)\
2135  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2136 
2137 #define is_page_aligned(S)\
2138  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2139 #define is_granularity_aligned(S)\
2140  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2141 
2142 /* True if segment S holds address A */
2143 #define segment_holds(S, A)\
2144  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2145 
2146 /* Return segment holding given address */
2147 static msegmentptr
2148 segment_holding(mstate m, char *addr)
2149 {
2150  msegmentptr sp = &m->seg;
2151  for (;;) {
2152  if (addr >= sp->base && addr < sp->base + sp->size)
2153  return sp;
2154  if ((sp = sp->next) == 0)
2155  return 0;
2156  }
2157 }
2158 
2159 /* Return true if segment contains a segment link */
2160 static int
2161 has_segment_link(mstate m, msegmentptr ss)
2162 {
2163  msegmentptr sp = &m->seg;
2164  for (;;) {
2165  if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2166  return 1;
2167  if ((sp = sp->next) == 0)
2168  return 0;
2169  }
2170 }
2171 
2172 #ifndef MORECORE_CANNOT_TRIM
2173 #define should_trim(M,s) ((s) > (M)->trim_check)
2174 #else /* MORECORE_CANNOT_TRIM */
2175 #define should_trim(M,s) (0)
2176 #endif /* MORECORE_CANNOT_TRIM */
2177 
2178 /*
2179  TOP_FOOT_SIZE is padding at the end of a segment, including space
2180  that may be needed to place segment records and fenceposts when new
2181  noncontiguous segments are added.
2182 */
2183 #define TOP_FOOT_SIZE\
2184  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2185 
2186 
2187 /* ------------------------------- Hooks -------------------------------- */
2188 
2189 /*
2190  PREACTION should be defined to return 0 on success, and nonzero on
2191  failure. If you are not using locking, you can redefine these to do
2192  anything you like.
2193 */
2194 
2195 #if USE_LOCKS
2196 
2197 /* Ensure locks are initialized */
2198 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2199 
2200 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2201 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2202 #else /* USE_LOCKS */
2203 
2204 #ifndef PREACTION
2205 #define PREACTION(M) (0)
2206 #endif /* PREACTION */
2207 
2208 #ifndef POSTACTION
2209 #define POSTACTION(M)
2210 #endif /* POSTACTION */
2211 
2212 #endif /* USE_LOCKS */
2213 
2214 /*
2215  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2216  USAGE_ERROR_ACTION is triggered on detected bad frees and
2217  reallocs. The argument p is an address that might have triggered the
2218  fault. It is ignored by the two predefined actions, but might be
2219  useful in custom actions that try to help diagnose errors.
2220 */
2221 
2222 #if PROCEED_ON_ERROR
2223 
2224 /* A count of the number of corruption errors causing resets */
2225 int malloc_corruption_error_count;
2226 
2227 /* default corruption action */
2228 static void reset_on_error(mstate m);
2229 
2230 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2231 #define USAGE_ERROR_ACTION(m, p)
2232 
2233 #else /* PROCEED_ON_ERROR */
2234 
2235 #ifndef CORRUPTION_ERROR_ACTION
2236 #define CORRUPTION_ERROR_ACTION(m) ABORT
2237 #endif /* CORRUPTION_ERROR_ACTION */
2238 
2239 #ifndef USAGE_ERROR_ACTION
2240 #define USAGE_ERROR_ACTION(m,p) ABORT
2241 #endif /* USAGE_ERROR_ACTION */
2242 
2243 #endif /* PROCEED_ON_ERROR */
2244 
2245 /* -------------------------- Debugging setup ---------------------------- */
2246 
2247 #if ! DEBUG
2248 
2249 #define check_free_chunk(M,P)
2250 #define check_inuse_chunk(M,P)
2251 #define check_malloced_chunk(M,P,N)
2252 #define check_mmapped_chunk(M,P)
2253 #define check_malloc_state(M)
2254 #define check_top_chunk(M,P)
2255 
2256 #else /* DEBUG */
2257 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2258 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2259 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2260 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2261 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2262 #define check_malloc_state(M) do_check_malloc_state(M)
2263 
2264 static void do_check_any_chunk(mstate m, mchunkptr p);
2265 static void do_check_top_chunk(mstate m, mchunkptr p);
2266 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2267 static void do_check_inuse_chunk(mstate m, mchunkptr p);
2268 static void do_check_free_chunk(mstate m, mchunkptr p);
2269 static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
2270 static void do_check_tree(mstate m, tchunkptr t);
2271 static void do_check_treebin(mstate m, bindex_t i);
2272 static void do_check_smallbin(mstate m, bindex_t i);
2273 static void do_check_malloc_state(mstate m);
2274 static int bin_find(mstate m, mchunkptr x);
2275 static size_t traverse_and_check(mstate m);
2276 #endif /* DEBUG */
2277 
2278 /* ---------------------------- Indexing Bins ---------------------------- */
2279 
2280 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2281 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2282 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2283 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2284 
2285 /* addressing by index. See above about smallbin repositioning */
2286 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2287 #define treebin_at(M,i) (&((M)->treebins[i]))
2288 
2289 /* assign tree index for size S to variable I */
2290 #if defined(__GNUC__) && defined(i386)
2291 #define compute_tree_index(S, I)\
2292 {\
2293  size_t X = S >> TREEBIN_SHIFT;\
2294  if (X == 0)\
2295  I = 0;\
2296  else if (X > 0xFFFF)\
2297  I = NTREEBINS-1;\
2298  else {\
2299  unsigned int K;\
2300  __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2301  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2302  }\
2303 }
2304 #else /* GNUC */
2305 #define compute_tree_index(S, I)\
2306 {\
2307  size_t X = S >> TREEBIN_SHIFT;\
2308  if (X == 0)\
2309  I = 0;\
2310  else if (X > 0xFFFF)\
2311  I = NTREEBINS-1;\
2312  else {\
2313  unsigned int Y = (unsigned int)X;\
2314  unsigned int N = ((Y - 0x100) >> 16) & 8;\
2315  unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2316  N += K;\
2317  N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2318  K = 14 - N + ((Y <<= K) >> 15);\
2319  I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2320  }\
2321 }
2322 #endif /* GNUC */
2323 
2324 /* Bit representing maximum resolved size in a treebin at i */
2325 #define bit_for_tree_index(i) \
2326  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2327 
2328 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
2329 #define leftshift_for_tree_index(i) \
2330  ((i == NTREEBINS-1)? 0 : \
2331  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2332 
2333 /* The size of the smallest chunk held in bin with index i */
2334 #define minsize_for_tree_index(i) \
2335  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2336  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2337 
2338 
2339 /* ------------------------ Operations on bin maps ----------------------- */
2340 
2341 /* bit corresponding to given index */
2342 #define idx2bit(i) ((binmap_t)(1) << (i))
2343 
2344 /* Mark/Clear bits with given index */
2345 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2346 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2347 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2348 
2349 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2350 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2351 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2352 
2353 /* index corresponding to given bit */
2354 
2355 #if defined(__GNUC__) && defined(i386)
2356 #define compute_bit2idx(X, I)\
2357 {\
2358  unsigned int J;\
2359  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2360  I = (bindex_t)J;\
2361 }
2362 
2363 #else /* GNUC */
2364 #if USE_BUILTIN_FFS
2365 #define compute_bit2idx(X, I) I = ffs(X)-1
2366 
2367 #else /* USE_BUILTIN_FFS */
2368 #define compute_bit2idx(X, I)\
2369 {\
2370  unsigned int Y = X - 1;\
2371  unsigned int K = Y >> (16-4) & 16;\
2372  unsigned int N = K; Y >>= K;\
2373  N += K = Y >> (8-3) & 8; Y >>= K;\
2374  N += K = Y >> (4-2) & 4; Y >>= K;\
2375  N += K = Y >> (2-1) & 2; Y >>= K;\
2376  N += K = Y >> (1-0) & 1; Y >>= K;\
2377  I = (bindex_t)(N + Y);\
2378 }
2379 #endif /* USE_BUILTIN_FFS */
2380 #endif /* GNUC */
2381 
2382 /* isolate the least set bit of a bitmap */
2383 #define least_bit(x) ((x) & -(x))
2384 
2385 /* mask with all bits to left of least bit of x on */
2386 #define left_bits(x) ((x<<1) | -(x<<1))
2387 
2388 /* mask with all bits to left of or equal to least bit of x on */
2389 #define same_or_left_bits(x) ((x) | -(x))
2390 
2391 
2392 /* ----------------------- Runtime Check Support ------------------------- */
2393 
2394 /*
2395  For security, the main invariant is that malloc/free/etc never
2396  writes to a static address other than malloc_state, unless static
2397  malloc_state itself has been corrupted, which cannot occur via
2398  malloc (because of these checks). In essence this means that we
2399  believe all pointers, sizes, maps etc held in malloc_state, but
2400  check all of those linked or offsetted from other embedded data
2401  structures. These checks are interspersed with main code in a way
2402  that tends to minimize their run-time cost.
2403 
2404  When FOOTERS is defined, in addition to range checking, we also
2405  verify footer fields of inuse chunks, which can be used guarantee
2406  that the mstate controlling malloc/free is intact. This is a
2407  streamlined version of the approach described by William Robertson
2408  et al in "Run-time Detection of Heap-based Overflows" LISA'03
2409  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2410  of an inuse chunk holds the xor of its mstate and a random seed,
2411  that is checked upon calls to free() and realloc(). This is
2412  (probablistically) unguessable from outside the program, but can be
2413  computed by any code successfully malloc'ing any chunk, so does not
2414  itself provide protection against code that has already broken
2415  security through some other means. Unlike Robertson et al, we
2416  always dynamically check addresses of all offset chunks (previous,
2417  next, etc). This turns out to be cheaper than relying on hashes.
2418 */
2419 
2420 #if !INSECURE
2421 /* Check if address a is at least as high as any from MORECORE or MMAP */
2422 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2423 /* Check if address of next chunk n is higher than base chunk p */
2424 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2425 /* Check if p has its cinuse bit on */
2426 #define ok_cinuse(p) cinuse(p)
2427 /* Check if p has its pinuse bit on */
2428 #define ok_pinuse(p) pinuse(p)
2429 
2430 #else /* !INSECURE */
2431 #define ok_address(M, a) (1)
2432 #define ok_next(b, n) (1)
2433 #define ok_cinuse(p) (1)
2434 #define ok_pinuse(p) (1)
2435 #endif /* !INSECURE */
2436 
2437 #if (FOOTERS && !INSECURE)
2438 /* Check if (alleged) mstate m has expected magic field */
2439 #define ok_magic(M) ((M)->magic == mparams.magic)
2440 #else /* (FOOTERS && !INSECURE) */
2441 #define ok_magic(M) (1)
2442 #endif /* (FOOTERS && !INSECURE) */
2443 
2444 
2445 /* In gcc, use __builtin_expect to minimize impact of checks */
2446 #if !INSECURE
2447 #if defined(__GNUC__) && __GNUC__ >= 3
2448 #define RTCHECK(e) __builtin_expect(e, 1)
2449 #else /* GNUC */
2450 #define RTCHECK(e) (e)
2451 #endif /* GNUC */
2452 #else /* !INSECURE */
2453 #define RTCHECK(e) (1)
2454 #endif /* !INSECURE */
2455 
2456 /* macros to set up inuse chunks with or without footers */
2457 
2458 #if !FOOTERS
2459 
2460 #define mark_inuse_foot(M,p,s)
2461 
2462 /* Set cinuse bit and pinuse bit of next chunk */
2463 #define set_inuse(M,p,s)\
2464  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2465  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2466 
2467 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2468 #define set_inuse_and_pinuse(M,p,s)\
2469  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2470  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2471 
2472 /* Set size, cinuse and pinuse bit of this chunk */
2473 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2474  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2475 
2476 #else /* FOOTERS */
2477 
2478 /* Set foot of inuse chunk to be xor of mstate and seed */
2479 #define mark_inuse_foot(M,p,s)\
2480  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2481 
2482 #define get_mstate_for(p)\
2483  ((mstate)(((mchunkptr)((char*)(p) +\
2484  (chunksize(p))))->prev_foot ^ mparams.magic))
2485 
2486 #define set_inuse(M,p,s)\
2487  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2488  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2489  mark_inuse_foot(M,p,s))
2490 
2491 #define set_inuse_and_pinuse(M,p,s)\
2492  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2493  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2494  mark_inuse_foot(M,p,s))
2495 
2496 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2497  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2498  mark_inuse_foot(M, p, s))
2499 
2500 #endif /* !FOOTERS */
2501 
2502 /* ---------------------------- setting mparams -------------------------- */
2503 
2504 /* Initialize mparams */
2505 static int
2507 {
2508  if (mparams.page_size == 0) {
2509  size_t s;
2510 
2511  mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2512  mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
2513 #if MORECORE_CONTIGUOUS
2514  mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
2515 #else /* MORECORE_CONTIGUOUS */
2516  mparams.default_mflags =
2518 #endif /* MORECORE_CONTIGUOUS */
2519 
2520 #if (FOOTERS && !INSECURE)
2521  {
2522 #if USE_DEV_RANDOM
2523  int fd;
2524  unsigned char buf[sizeof(size_t)];
2525  /* Try to use /dev/urandom, else fall back on using time */
2526  if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2527  read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2528  s = *((size_t *) buf);
2529  close(fd);
2530  } else
2531 #endif /* USE_DEV_RANDOM */
2532  s = (size_t) (time(0) ^ (size_t) 0x55555555U);
2533 
2534  s |= (size_t) 8U; /* ensure nonzero */
2535  s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2536 
2537  }
2538 #else /* (FOOTERS && !INSECURE) */
2539  s = (size_t) 0x58585858U;
2540 #endif /* (FOOTERS && !INSECURE) */
2542  if (mparams.magic == 0) {
2543  mparams.magic = s;
2544  /* Set up lock for main malloc area */
2545  INITIAL_LOCK(&gm->mutex);
2546  gm->mflags = mparams.default_mflags;
2547  }
2549 
2550 #ifndef WIN32
2551  mparams.page_size = malloc_getpagesize;
2552  mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
2553  DEFAULT_GRANULARITY : mparams.page_size);
2554 #else /* WIN32 */
2555  {
2556  SYSTEM_INFO system_info;
2557  GetSystemInfo(&system_info);
2558  mparams.page_size = system_info.dwPageSize;
2559  mparams.granularity = system_info.dwAllocationGranularity;
2560  }
2561 #endif /* WIN32 */
2562 
2563  /* Sanity-check configuration:
2564  size_t must be unsigned and as wide as pointer type.
2565  ints must be at least 4 bytes.
2566  alignment must be at least 8.
2567  Alignment, min chunk size, and page size must all be powers of 2.
2568  */
2569  if ((sizeof(size_t) != sizeof(char *)) ||
2570  (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2571  (sizeof(int) < 4) ||
2572  (MALLOC_ALIGNMENT < (size_t) 8U) ||
2573  ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2574  ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2575  ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
2576  || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2577  ABORT;
2578  }
2579  return 0;
2580 }
2581 
2582 /* support for mallopt */
2583 static int
2584 change_mparam(int param_number, int value)
2585 {
2586  size_t val = (size_t) value;
2587  init_mparams();
2588  switch (param_number) {
2589  case M_TRIM_THRESHOLD:
2590  mparams.trim_threshold = val;
2591  return 1;
2592  case M_GRANULARITY:
2593  if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
2594  mparams.granularity = val;
2595  return 1;
2596  } else
2597  return 0;
2598  case M_MMAP_THRESHOLD:
2599  mparams.mmap_threshold = val;
2600  return 1;
2601  default:
2602  return 0;
2603  }
2604 }
2605 
2606 #if DEBUG
2607 /* ------------------------- Debugging Support --------------------------- */
2608 
2609 /* Check properties of any chunk, whether free, inuse, mmapped etc */
2610 static void
2611 do_check_any_chunk(mstate m, mchunkptr p)
2612 {
2613  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2614  assert(ok_address(m, p));
2615 }
2616 
2617 /* Check properties of top chunk */
2618 static void
2619 do_check_top_chunk(mstate m, mchunkptr p)
2620 {
2621  msegmentptr sp = segment_holding(m, (char *) p);
2622  size_t sz = chunksize(p);
2623  assert(sp != 0);
2624  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2625  assert(ok_address(m, p));
2626  assert(sz == m->topsize);
2627  assert(sz > 0);
2628  assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2629  assert(pinuse(p));
2630  assert(!next_pinuse(p));
2631 }
2632 
2633 /* Check properties of (inuse) mmapped chunks */
2634 static void
2635 do_check_mmapped_chunk(mstate m, mchunkptr p)
2636 {
2637  size_t sz = chunksize(p);
2638  size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2639  assert(is_mmapped(p));
2640  assert(use_mmap(m));
2641  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2642  assert(ok_address(m, p));
2643  assert(!is_small(sz));
2644  assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2646  assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
2647 }
2648 
2649 /* Check properties of inuse chunks */
2650 static void
2651 do_check_inuse_chunk(mstate m, mchunkptr p)
2652 {
2653  do_check_any_chunk(m, p);
2654  assert(cinuse(p));
2655  assert(next_pinuse(p));
2656  /* If not pinuse and not mmapped, previous chunk has OK offset */
2657  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2658  if (is_mmapped(p))
2659  do_check_mmapped_chunk(m, p);
2660 }
2661 
2662 /* Check properties of free chunks */
2663 static void
2664 do_check_free_chunk(mstate m, mchunkptr p)
2665 {
2666  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2667  mchunkptr next = chunk_plus_offset(p, sz);
2668  do_check_any_chunk(m, p);
2669  assert(!cinuse(p));
2670  assert(!next_pinuse(p));
2671  assert(!is_mmapped(p));
2672  if (p != m->dv && p != m->top) {
2673  if (sz >= MIN_CHUNK_SIZE) {
2674  assert((sz & CHUNK_ALIGN_MASK) == 0);
2676  assert(next->prev_foot == sz);
2677  assert(pinuse(p));
2678  assert(next == m->top || cinuse(next));
2679  assert(p->fd->bk == p);
2680  assert(p->bk->fd == p);
2681  } else /* markers are always of size SIZE_T_SIZE */
2682  assert(sz == SIZE_T_SIZE);
2683  }
2684 }
2685 
2686 /* Check properties of malloced chunks at the point they are malloced */
2687 static void
2688 do_check_malloced_chunk(mstate m, void *mem, size_t s)
2689 {
2690  if (mem != 0) {
2691  mchunkptr p = mem2chunk(mem);
2692  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2693  do_check_inuse_chunk(m, p);
2694  assert((sz & CHUNK_ALIGN_MASK) == 0);
2695  assert(sz >= MIN_CHUNK_SIZE);
2696  assert(sz >= s);
2697  /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2698  assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2699  }
2700 }
2701 
2702 /* Check a tree and its subtrees. */
2703 static void
2704 do_check_tree(mstate m, tchunkptr t)
2705 {
2706  tchunkptr head = 0;
2707  tchunkptr u = t;
2708  bindex_t tindex = t->index;
2709  size_t tsize = chunksize(t);
2710  bindex_t idx;
2711  compute_tree_index(tsize, idx);
2712  assert(tindex == idx);
2713  assert(tsize >= MIN_LARGE_SIZE);
2714  assert(tsize >= minsize_for_tree_index(idx));
2715  assert((idx == NTREEBINS - 1)
2716  || (tsize < minsize_for_tree_index((idx + 1))));
2717 
2718  do { /* traverse through chain of same-sized nodes */
2719  do_check_any_chunk(m, ((mchunkptr) u));
2720  assert(u->index == tindex);
2721  assert(chunksize(u) == tsize);
2722  assert(!cinuse(u));
2723  assert(!next_pinuse(u));
2724  assert(u->fd->bk == u);
2725  assert(u->bk->fd == u);
2726  if (u->parent == 0) {
2727  assert(u->child[0] == 0);
2728  assert(u->child[1] == 0);
2729  } else {
2730  assert(head == 0); /* only one node on chain has parent */
2731  head = u;
2732  assert(u->parent != u);
2733  assert(u->parent->child[0] == u ||
2734  u->parent->child[1] == u ||
2735  *((tbinptr *) (u->parent)) == u);
2736  if (u->child[0] != 0) {
2737  assert(u->child[0]->parent == u);
2738  assert(u->child[0] != u);
2739  do_check_tree(m, u->child[0]);
2740  }
2741  if (u->child[1] != 0) {
2742  assert(u->child[1]->parent == u);
2743  assert(u->child[1] != u);
2744  do_check_tree(m, u->child[1]);
2745  }
2746  if (u->child[0] != 0 && u->child[1] != 0) {
2747  assert(chunksize(u->child[0]) < chunksize(u->child[1]));
2748  }
2749  }
2750  u = u->fd;
2751  } while (u != t);
2752  assert(head != 0);
2753 }
2754 
2755 /* Check all the chunks in a treebin. */
2756 static void
2757 do_check_treebin(mstate m, bindex_t i)
2758 {
2759  tbinptr *tb = treebin_at(m, i);
2760  tchunkptr t = *tb;
2761  int empty = (m->treemap & (1U << i)) == 0;
2762  if (t == 0)
2763  assert(empty);
2764  if (!empty)
2765  do_check_tree(m, t);
2766 }
2767 
2768 /* Check all the chunks in a smallbin. */
2769 static void
2770 do_check_smallbin(mstate m, bindex_t i)
2771 {
2772  sbinptr b = smallbin_at(m, i);
2773  mchunkptr p = b->bk;
2774  unsigned int empty = (m->smallmap & (1U << i)) == 0;
2775  if (p == b)
2776  assert(empty);
2777  if (!empty) {
2778  for (; p != b; p = p->bk) {
2779  size_t size = chunksize(p);
2780  mchunkptr q;
2781  /* each chunk claims to be free */
2782  do_check_free_chunk(m, p);
2783  /* chunk belongs in bin */
2784  assert(small_index(size) == i);
2785  assert(p->bk == b || chunksize(p->bk) == chunksize(p));
2786  /* chunk is followed by an inuse chunk */
2787  q = next_chunk(p);
2788  if (q->head != FENCEPOST_HEAD)
2789  do_check_inuse_chunk(m, q);
2790  }
2791  }
2792 }
2793 
2794 /* Find x in a bin. Used in other check functions. */
2795 static int
2796 bin_find(mstate m, mchunkptr x)
2797 {
2798  size_t size = chunksize(x);
2799  if (is_small(size)) {
2800  bindex_t sidx = small_index(size);
2801  sbinptr b = smallbin_at(m, sidx);
2802  if (smallmap_is_marked(m, sidx)) {
2803  mchunkptr p = b;
2804  do {
2805  if (p == x)
2806  return 1;
2807  } while ((p = p->fd) != b);
2808  }
2809  } else {
2810  bindex_t tidx;
2811  compute_tree_index(size, tidx);
2812  if (treemap_is_marked(m, tidx)) {
2813  tchunkptr t = *treebin_at(m, tidx);
2814  size_t sizebits = size << leftshift_for_tree_index(tidx);
2815  while (t != 0 && chunksize(t) != size) {
2816  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2817  sizebits <<= 1;
2818  }
2819  if (t != 0) {
2820  tchunkptr u = t;
2821  do {
2822  if (u == (tchunkptr) x)
2823  return 1;
2824  } while ((u = u->fd) != t);
2825  }
2826  }
2827  }
2828  return 0;
2829 }
2830 
2831 /* Traverse each chunk and check it; return total */
2832 static size_t
2833 traverse_and_check(mstate m)
2834 {
2835  size_t sum = 0;
2836  if (is_initialized(m)) {
2837  msegmentptr s = &m->seg;
2838  sum += m->topsize + TOP_FOOT_SIZE;
2839  while (s != 0) {
2840  mchunkptr q = align_as_chunk(s->base);
2841  mchunkptr lastq = 0;
2842  assert(pinuse(q));
2843  while (segment_holds(s, q) &&
2844  q != m->top && q->head != FENCEPOST_HEAD) {
2845  sum += chunksize(q);
2846  if (cinuse(q)) {
2847  assert(!bin_find(m, q));
2848  do_check_inuse_chunk(m, q);
2849  } else {
2850  assert(q == m->dv || bin_find(m, q));
2851  assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
2852  do_check_free_chunk(m, q);
2853  }
2854  lastq = q;
2855  q = next_chunk(q);
2856  }
2857  s = s->next;
2858  }
2859  }
2860  return sum;
2861 }
2862 
2863 /* Check all properties of malloc_state. */
2864 static void
2865 do_check_malloc_state(mstate m)
2866 {
2867  bindex_t i;
2868  size_t total;
2869  /* check bins */
2870  for (i = 0; i < NSMALLBINS; ++i)
2871  do_check_smallbin(m, i);
2872  for (i = 0; i < NTREEBINS; ++i)
2873  do_check_treebin(m, i);
2874 
2875  if (m->dvsize != 0) { /* check dv chunk */
2876  do_check_any_chunk(m, m->dv);
2877  assert(m->dvsize == chunksize(m->dv));
2878  assert(m->dvsize >= MIN_CHUNK_SIZE);
2879  assert(bin_find(m, m->dv) == 0);
2880  }
2881 
2882  if (m->top != 0) { /* check top chunk */
2883  do_check_top_chunk(m, m->top);
2884  assert(m->topsize == chunksize(m->top));
2885  assert(m->topsize > 0);
2886  assert(bin_find(m, m->top) == 0);
2887  }
2888 
2889  total = traverse_and_check(m);
2890  assert(total <= m->footprint);
2891  assert(m->footprint <= m->max_footprint);
2892 }
2893 #endif /* DEBUG */
2894 
2895 /* ----------------------------- statistics ------------------------------ */
2896 
2897 #if !NO_MALLINFO
2898 static struct mallinfo
2900 {
2901  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2902  if (!PREACTION(m)) {
2903  check_malloc_state(m);
2904  if (is_initialized(m)) {
2905  size_t nfree = SIZE_T_ONE; /* top always free */
2906  size_t mfree = m->topsize + TOP_FOOT_SIZE;
2907  size_t sum = mfree;
2908  msegmentptr s = &m->seg;
2909  while (s != 0) {
2910  mchunkptr q = align_as_chunk(s->base);
2911  while (segment_holds(s, q) &&
2912  q != m->top && q->head != FENCEPOST_HEAD) {
2913  size_t sz = chunksize(q);
2914  sum += sz;
2915  if (!cinuse(q)) {
2916  mfree += sz;
2917  ++nfree;
2918  }
2919  q = next_chunk(q);
2920  }
2921  s = s->next;
2922  }
2923 
2924  nm.arena = sum;
2925  nm.ordblks = nfree;
2926  nm.hblkhd = m->footprint - sum;
2927  nm.usmblks = m->max_footprint;
2928  nm.uordblks = m->footprint - mfree;
2929  nm.fordblks = mfree;
2930  nm.keepcost = m->topsize;
2931  }
2932 
2933  POSTACTION(m);
2934  }
2935  return nm;
2936 }
2937 #endif /* !NO_MALLINFO */
2938 
2939 static void
2941 {
2942  if (!PREACTION(m)) {
2943  size_t maxfp = 0;
2944  size_t fp = 0;
2945  size_t used = 0;
2946  check_malloc_state(m);
2947  if (is_initialized(m)) {
2948  msegmentptr s = &m->seg;
2949  maxfp = m->max_footprint;
2950  fp = m->footprint;
2951  used = fp - (m->topsize + TOP_FOOT_SIZE);
2952 
2953  while (s != 0) {
2954  mchunkptr q = align_as_chunk(s->base);
2955  while (segment_holds(s, q) &&
2956  q != m->top && q->head != FENCEPOST_HEAD) {
2957  if (!cinuse(q))
2958  used -= chunksize(q);
2959  q = next_chunk(q);
2960  }
2961  s = s->next;
2962  }
2963  }
2964 #ifndef LACKS_STDIO_H
2965  fprintf(stderr, "max system bytes = %10lu\n",
2966  (unsigned long) (maxfp));
2967  fprintf(stderr, "system bytes = %10lu\n", (unsigned long) (fp));
2968  fprintf(stderr, "in use bytes = %10lu\n", (unsigned long) (used));
2969 #endif
2970 
2971  POSTACTION(m);
2972  }
2973 }
2974 
2975 /* ----------------------- Operations on smallbins ----------------------- */
2976 
2977 /*
2978  Various forms of linking and unlinking are defined as macros. Even
2979  the ones for trees, which are very long but have very short typical
2980  paths. This is ugly but reduces reliance on inlining support of
2981  compilers.
2982 */
2983 
2984 /* Link a free chunk into a smallbin */
2985 #define insert_small_chunk(M, P, S) {\
2986  bindex_t I = small_index(S);\
2987  mchunkptr B = smallbin_at(M, I);\
2988  mchunkptr F = B;\
2989  assert(S >= MIN_CHUNK_SIZE);\
2990  if (!smallmap_is_marked(M, I))\
2991  mark_smallmap(M, I);\
2992  else if (RTCHECK(ok_address(M, B->fd)))\
2993  F = B->fd;\
2994  else {\
2995  CORRUPTION_ERROR_ACTION(M);\
2996  }\
2997  B->fd = P;\
2998  F->bk = P;\
2999  P->fd = F;\
3000  P->bk = B;\
3001 }
3002 
3003 /* Unlink a chunk from a smallbin */
3004 #define unlink_small_chunk(M, P, S) {\
3005  mchunkptr F = P->fd;\
3006  mchunkptr B = P->bk;\
3007  bindex_t I = small_index(S);\
3008  assert(P != B);\
3009  assert(P != F);\
3010  assert(chunksize(P) == small_index2size(I));\
3011  if (F == B)\
3012  clear_smallmap(M, I);\
3013  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3014  (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3015  F->bk = B;\
3016  B->fd = F;\
3017  }\
3018  else {\
3019  CORRUPTION_ERROR_ACTION(M);\
3020  }\
3021 }
3022 
3023 /* Unlink the first chunk from a smallbin */
3024 #define unlink_first_small_chunk(M, B, P, I) {\
3025  mchunkptr F = P->fd;\
3026  assert(P != B);\
3027  assert(P != F);\
3028  assert(chunksize(P) == small_index2size(I));\
3029  if (B == F)\
3030  clear_smallmap(M, I);\
3031  else if (RTCHECK(ok_address(M, F))) {\
3032  B->fd = F;\
3033  F->bk = B;\
3034  }\
3035  else {\
3036  CORRUPTION_ERROR_ACTION(M);\
3037  }\
3038 }
3039 
3040 /* Replace dv node, binning the old one */
3041 /* Used only when dvsize known to be small */
3042 #define replace_dv(M, P, S) {\
3043  size_t DVS = M->dvsize;\
3044  if (DVS != 0) {\
3045  mchunkptr DV = M->dv;\
3046  assert(is_small(DVS));\
3047  insert_small_chunk(M, DV, DVS);\
3048  }\
3049  M->dvsize = S;\
3050  M->dv = P;\
3051 }
3052 
3053 /* ------------------------- Operations on trees ------------------------- */
3054 
3055 /* Insert chunk into tree */
3056 #define insert_large_chunk(M, X, S) {\
3057  tbinptr* H;\
3058  bindex_t I;\
3059  compute_tree_index(S, I);\
3060  H = treebin_at(M, I);\
3061  X->index = I;\
3062  X->child[0] = X->child[1] = 0;\
3063  if (!treemap_is_marked(M, I)) {\
3064  mark_treemap(M, I);\
3065  *H = X;\
3066  X->parent = (tchunkptr)H;\
3067  X->fd = X->bk = X;\
3068  }\
3069  else {\
3070  tchunkptr T = *H;\
3071  size_t K = S << leftshift_for_tree_index(I);\
3072  for (;;) {\
3073  if (chunksize(T) != S) {\
3074  tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3075  K <<= 1;\
3076  if (*C != 0)\
3077  T = *C;\
3078  else if (RTCHECK(ok_address(M, C))) {\
3079  *C = X;\
3080  X->parent = T;\
3081  X->fd = X->bk = X;\
3082  break;\
3083  }\
3084  else {\
3085  CORRUPTION_ERROR_ACTION(M);\
3086  break;\
3087  }\
3088  }\
3089  else {\
3090  tchunkptr F = T->fd;\
3091  if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3092  T->fd = F->bk = X;\
3093  X->fd = F;\
3094  X->bk = T;\
3095  X->parent = 0;\
3096  break;\
3097  }\
3098  else {\
3099  CORRUPTION_ERROR_ACTION(M);\
3100  break;\
3101  }\
3102  }\
3103  }\
3104  }\
3105 }
3106 
3107 /*
3108  Unlink steps:
3109 
3110  1. If x is a chained node, unlink it from its same-sized fd/bk links
3111  and choose its bk node as its replacement.
3112  2. If x was the last node of its size, but not a leaf node, it must
3113  be replaced with a leaf node (not merely one with an open left or
3114  right), to make sure that lefts and rights of descendents
3115  correspond properly to bit masks. We use the rightmost descendent
3116  of x. We could use any other leaf, but this is easy to locate and
3117  tends to counteract removal of leftmosts elsewhere, and so keeps
3118  paths shorter than minimally guaranteed. This doesn't loop much
3119  because on average a node in a tree is near the bottom.
3120  3. If x is the base of a chain (i.e., has parent links) relink
3121  x's parent and children to x's replacement (or null if none).
3122 */
3123 
3124 #define unlink_large_chunk(M, X) {\
3125  tchunkptr XP = X->parent;\
3126  tchunkptr R;\
3127  if (X->bk != X) {\
3128  tchunkptr F = X->fd;\
3129  R = X->bk;\
3130  if (RTCHECK(ok_address(M, F))) {\
3131  F->bk = R;\
3132  R->fd = F;\
3133  }\
3134  else {\
3135  CORRUPTION_ERROR_ACTION(M);\
3136  }\
3137  }\
3138  else {\
3139  tchunkptr* RP;\
3140  if (((R = *(RP = &(X->child[1]))) != 0) ||\
3141  ((R = *(RP = &(X->child[0]))) != 0)) {\
3142  tchunkptr* CP;\
3143  while ((*(CP = &(R->child[1])) != 0) ||\
3144  (*(CP = &(R->child[0])) != 0)) {\
3145  R = *(RP = CP);\
3146  }\
3147  if (RTCHECK(ok_address(M, RP)))\
3148  *RP = 0;\
3149  else {\
3150  CORRUPTION_ERROR_ACTION(M);\
3151  }\
3152  }\
3153  }\
3154  if (XP != 0) {\
3155  tbinptr* H = treebin_at(M, X->index);\
3156  if (X == *H) {\
3157  if ((*H = R) == 0) \
3158  clear_treemap(M, X->index);\
3159  }\
3160  else if (RTCHECK(ok_address(M, XP))) {\
3161  if (XP->child[0] == X) \
3162  XP->child[0] = R;\
3163  else \
3164  XP->child[1] = R;\
3165  }\
3166  else\
3167  CORRUPTION_ERROR_ACTION(M);\
3168  if (R != 0) {\
3169  if (RTCHECK(ok_address(M, R))) {\
3170  tchunkptr C0, C1;\
3171  R->parent = XP;\
3172  if ((C0 = X->child[0]) != 0) {\
3173  if (RTCHECK(ok_address(M, C0))) {\
3174  R->child[0] = C0;\
3175  C0->parent = R;\
3176  }\
3177  else\
3178  CORRUPTION_ERROR_ACTION(M);\
3179  }\
3180  if ((C1 = X->child[1]) != 0) {\
3181  if (RTCHECK(ok_address(M, C1))) {\
3182  R->child[1] = C1;\
3183  C1->parent = R;\
3184  }\
3185  else\
3186  CORRUPTION_ERROR_ACTION(M);\
3187  }\
3188  }\
3189  else\
3190  CORRUPTION_ERROR_ACTION(M);\
3191  }\
3192  }\
3193 }
3194 
3195 /* Relays to large vs small bin operations */
3196 
3197 #define insert_chunk(M, P, S)\
3198  if (is_small(S)) insert_small_chunk(M, P, S)\
3199  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3200 
3201 #define unlink_chunk(M, P, S)\
3202  if (is_small(S)) unlink_small_chunk(M, P, S)\
3203  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3204 
3205 
3206 /* Relays to internal calls to malloc/free from realloc, memalign etc */
3207 
3208 #if ONLY_MSPACES
3209 #define internal_malloc(m, b) mspace_malloc(m, b)
3210 #define internal_free(m, mem) mspace_free(m,mem);
3211 #else /* ONLY_MSPACES */
3212 #if MSPACES
3213 #define internal_malloc(m, b)\
3214  (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3215 #define internal_free(m, mem)\
3216  if (m == gm) dlfree(mem); else mspace_free(m,mem);
3217 #else /* MSPACES */
3218 #define internal_malloc(m, b) dlmalloc(b)
3219 #define internal_free(m, mem) dlfree(mem)
3220 #endif /* MSPACES */
3221 #endif /* ONLY_MSPACES */
3222 
3223 /* ----------------------- Direct-mmapping chunks ----------------------- */
3224 
3225 /*
3226  Directly mmapped chunks are set up with an offset to the start of
3227  the mmapped region stored in the prev_foot field of the chunk. This
3228  allows reconstruction of the required argument to MUNMAP when freed,
3229  and also allows adjustment of the returned chunk to meet alignment
3230  requirements (especially in memalign). There is also enough space
3231  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3232  the PINUSE bit so frees can be checked.
3233 */
3234 
3235 /* Malloc using mmap */
3236 static void *
3237 mmap_alloc(mstate m, size_t nb)
3238 {
3239  size_t mmsize =
3241  if (mmsize > nb) { /* Check for wrap around 0 */
3242  char *mm = (char *) (DIRECT_MMAP(mmsize));
3243  if (mm != CMFAIL) {
3244  size_t offset = align_offset(chunk2mem(mm));
3245  size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3246  mchunkptr p = (mchunkptr) (mm + offset);
3247  p->prev_foot = offset | IS_MMAPPED_BIT;
3248  (p)->head = (psize | CINUSE_BIT);
3249  mark_inuse_foot(m, p, psize);
3250  chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3251  chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
3252 
3253  if (mm < m->least_addr)
3254  m->least_addr = mm;
3255  if ((m->footprint += mmsize) > m->max_footprint)
3256  m->max_footprint = m->footprint;
3258  check_mmapped_chunk(m, p);
3259  return chunk2mem(p);
3260  }
3261  }
3262  return 0;
3263 }
3264 
3265 /* Realloc using mmap */
3266 static mchunkptr
3267 mmap_resize(mstate m, mchunkptr oldp, size_t nb)
3268 {
3269  size_t oldsize = chunksize(oldp);
3270  if (is_small(nb)) /* Can't shrink mmap regions below small size */
3271  return 0;
3272  /* Keep old chunk if big enough but not too big */
3273  if (oldsize >= nb + SIZE_T_SIZE &&
3274  (oldsize - nb) <= (mparams.granularity << 1))
3275  return oldp;
3276  else {
3277  size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3278  size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3279  size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3281  char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
3282  oldmmsize, newmmsize, 1);
3283  if (cp != CMFAIL) {
3284  mchunkptr newp = (mchunkptr) (cp + offset);
3285  size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3286  newp->head = (psize | CINUSE_BIT);
3287  mark_inuse_foot(m, newp, psize);
3288  chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3289  chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
3290 
3291  if (cp < m->least_addr)
3292  m->least_addr = cp;
3293  if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3294  m->max_footprint = m->footprint;
3295  check_mmapped_chunk(m, newp);
3296  return newp;
3297  }
3298  }
3299  return 0;
3300 }
3301 
3302 /* -------------------------- mspace management -------------------------- */
3303 
3304 /* Initialize top chunk and its size */
3305 static void
3306 init_top(mstate m, mchunkptr p, size_t psize)
3307 {
3308  /* Ensure alignment */
3309  size_t offset = align_offset(chunk2mem(p));
3310  p = (mchunkptr) ((char *) p + offset);
3311  psize -= offset;
3312 
3313  m->top = p;
3314  m->topsize = psize;
3315  p->head = psize | PINUSE_BIT;
3316  /* set size of fake trailing chunk holding overhead space only once */
3317  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3318  m->trim_check = mparams.trim_threshold; /* reset on each update */
3319 }
3320 
3321 /* Initialize bins for a new mstate that is otherwise zeroed out */
3322 static void
3323 init_bins(mstate m)
3324 {
3325  /* Establish circular links for smallbins */
3326  bindex_t i;
3327  for (i = 0; i < NSMALLBINS; ++i) {
3328  sbinptr bin = smallbin_at(m, i);
3329  bin->fd = bin->bk = bin;
3330  }
3331 }
3332 
3333 #if PROCEED_ON_ERROR
3334 
3335 /* default corruption action */
3336 static void
3337 reset_on_error(mstate m)
3338 {
3339  int i;
3340  ++malloc_corruption_error_count;
3341  /* Reinitialize fields to forget about all memory */
3342  m->smallbins = m->treebins = 0;
3343  m->dvsize = m->topsize = 0;
3344  m->seg.base = 0;
3345  m->seg.size = 0;
3346  m->seg.next = 0;
3347  m->top = m->dv = 0;
3348  for (i = 0; i < NTREEBINS; ++i)
3349  *treebin_at(m, i) = 0;
3350  init_bins(m);
3351 }
3352 #endif /* PROCEED_ON_ERROR */
3353 
3354 /* Allocate chunk and prepend remainder with chunk in successor base. */
3355 static void *
3356 prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
3357 {
3358  mchunkptr p = align_as_chunk(newbase);
3359  mchunkptr oldfirst = align_as_chunk(oldbase);
3360  size_t psize = (char *) oldfirst - (char *) p;
3361  mchunkptr q = chunk_plus_offset(p, nb);
3362  size_t qsize = psize - nb;
3364 
3365  assert((char *) oldfirst > (char *) q);
3366  assert(pinuse(oldfirst));
3367  assert(qsize >= MIN_CHUNK_SIZE);
3368 
3369  /* consolidate remainder with first chunk of old base */
3370  if (oldfirst == m->top) {
3371  size_t tsize = m->topsize += qsize;
3372  m->top = q;
3373  q->head = tsize | PINUSE_BIT;
3374  check_top_chunk(m, q);
3375  } else if (oldfirst == m->dv) {
3376  size_t dsize = m->dvsize += qsize;
3377  m->dv = q;
3379  } else {
3380  if (!cinuse(oldfirst)) {
3381  size_t nsize = chunksize(oldfirst);
3382  unlink_chunk(m, oldfirst, nsize);
3383  oldfirst = chunk_plus_offset(oldfirst, nsize);
3384  qsize += nsize;
3385  }
3386  set_free_with_pinuse(q, qsize, oldfirst);
3387  insert_chunk(m, q, qsize);
3388  check_free_chunk(m, q);
3389  }
3390 
3391  check_malloced_chunk(m, chunk2mem(p), nb);
3392  return chunk2mem(p);
3393 }
3394 
3395 
3396 /* Add a segment to hold a new noncontiguous region */
3397 static void
3398 add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
3399 {
3400  /* Determine locations and sizes of segment, fenceposts, old top */
3401  char *old_top = (char *) m->top;
3402  msegmentptr oldsp = segment_holding(m, old_top);
3403  char *old_end = oldsp->base + oldsp->size;
3404  size_t ssize = pad_request(sizeof(struct malloc_segment));
3405  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3406  size_t offset = align_offset(chunk2mem(rawsp));
3407  char *asp = rawsp + offset;
3408  char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3409  mchunkptr sp = (mchunkptr) csp;
3410  msegmentptr ss = (msegmentptr) (chunk2mem(sp));
3411  mchunkptr tnext = chunk_plus_offset(sp, ssize);
3412  mchunkptr p = tnext;
3413  int nfences = 0;
3414 
3415  /* reset top to new space */
3416  init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3417 
3418  /* Set up segment record */
3419  assert(is_aligned(ss));
3420  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3421  *ss = m->seg; /* Push current record */
3422  m->seg.base = tbase;
3423  m->seg.size = tsize;
3424  m->seg.sflags = mmapped;
3425  m->seg.next = ss;
3426 
3427  /* Insert trailing fenceposts */
3428  for (;;) {
3429  mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3430  p->head = FENCEPOST_HEAD;
3431  ++nfences;
3432  if ((char *) (&(nextp->head)) < old_end)
3433  p = nextp;
3434  else
3435  break;
3436  }
3437  assert(nfences >= 2);
3438 
3439  /* Insert the rest of old top into a bin as an ordinary free chunk */
3440  if (csp != old_top) {
3441  mchunkptr q = (mchunkptr) old_top;
3442  size_t psize = csp - old_top;
3443  mchunkptr tn = chunk_plus_offset(q, psize);
3444  set_free_with_pinuse(q, psize, tn);
3445  insert_chunk(m, q, psize);
3446  }
3447 
3448  check_top_chunk(m, m->top);
3449 }
3450 
3451 /* -------------------------- System allocation -------------------------- */
3452 
3453 /* Get memory from system using MORECORE or MMAP */
3454 static void *
3455 sys_alloc(mstate m, size_t nb)
3456 {
3457  char *tbase = CMFAIL;
3458  size_t tsize = 0;
3459  flag_t mmap_flag = 0;
3460 
3461  init_mparams();
3462 
3463  /* Directly map large chunks */
3464  if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3465  void *mem = mmap_alloc(m, nb);
3466  if (mem != 0)
3467  return mem;
3468  }
3469 
3470  /*
3471  Try getting memory in any of three ways (in most-preferred to
3472  least-preferred order):
3473  1. A call to MORECORE that can normally contiguously extend memory.
3474  (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3475  or main space is mmapped or a previous contiguous call failed)
3476  2. A call to MMAP new space (disabled if not HAVE_MMAP).
3477  Note that under the default settings, if MORECORE is unable to
3478  fulfill a request, and HAVE_MMAP is true, then mmap is
3479  used as a noncontiguous system allocator. This is a useful backup
3480  strategy for systems with holes in address spaces -- in this case
3481  sbrk cannot contiguously expand the heap, but mmap may be able to
3482  find space.
3483  3. A call to MORECORE that cannot usually contiguously extend memory.
3484  (disabled if not HAVE_MORECORE)
3485  */
3486 
3488  char *br = CMFAIL;
3489  msegmentptr ss =
3490  (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
3491  size_t asize = 0;
3493 
3494  if (ss == 0) { /* First time through or recovery */
3495  char *base = (char *) CALL_MORECORE(0);
3496  if (base != CMFAIL) {
3497  asize =
3499  SIZE_T_ONE);
3500  /* Adjust to end on a page boundary */
3501  if (!is_page_aligned(base))
3502  asize += (page_align((size_t) base) - (size_t) base);
3503  /* Can't call MORECORE if size is negative when treated as signed */
3504  if (asize < HALF_MAX_SIZE_T &&
3505  (br = (char *) (CALL_MORECORE(asize))) == base) {
3506  tbase = base;
3507  tsize = asize;
3508  }
3509  }
3510  } else {
3511  /* Subtract out existing available top space from MORECORE request. */
3512  asize =
3513  granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
3515  /* Use mem here only if it did continuously extend old space */
3516  if (asize < HALF_MAX_SIZE_T &&
3517  (br =
3518  (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
3519  tbase = br;
3520  tsize = asize;
3521  }
3522  }
3523 
3524  if (tbase == CMFAIL) { /* Cope with partial failure */
3525  if (br != CMFAIL) { /* Try to use/extend the space we did get */
3526  if (asize < HALF_MAX_SIZE_T &&
3527  asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3528  size_t esize =
3531  asize);
3532  if (esize < HALF_MAX_SIZE_T) {
3533  char *end = (char *) CALL_MORECORE(esize);
3534  if (end != CMFAIL)
3535  asize += esize;
3536  else { /* Can't use; try to release */
3537  end = (char *) CALL_MORECORE(-asize);
3538  br = CMFAIL;
3539  }
3540  }
3541  }
3542  }
3543  if (br != CMFAIL) { /* Use the space we did get */
3544  tbase = br;
3545  tsize = asize;
3546  } else
3547  disable_contiguous(m); /* Don't try contiguous path in the future */
3548  }
3549 
3551  }
3552 
3553  if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3554  size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
3555  size_t rsize = granularity_align(req);
3556  if (rsize > nb) { /* Fail if wraps around zero */
3557  char *mp = (char *) (CALL_MMAP(rsize));
3558  if (mp != CMFAIL) {
3559  tbase = mp;
3560  tsize = rsize;
3561  mmap_flag = IS_MMAPPED_BIT;
3562  }
3563  }
3564  }
3565 
3566  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3567  size_t asize =
3569  SIZE_T_ONE);
3570  if (asize < HALF_MAX_SIZE_T) {
3571  char *br = CMFAIL;
3572  char *end = CMFAIL;
3574  br = (char *) (CALL_MORECORE(asize));
3575  end = (char *) (CALL_MORECORE(0));
3577  if (br != CMFAIL && end != CMFAIL && br < end) {
3578  size_t ssize = end - br;
3579  if (ssize > nb + TOP_FOOT_SIZE) {
3580  tbase = br;
3581  tsize = ssize;
3582  }
3583  }
3584  }
3585  }
3586 
3587  if (tbase != CMFAIL) {
3588 
3589  if ((m->footprint += tsize) > m->max_footprint)
3590  m->max_footprint = m->footprint;
3591 
3592  if (!is_initialized(m)) { /* first-time initialization */
3593  m->seg.base = m->least_addr = tbase;
3594  m->seg.size = tsize;
3595  m->seg.sflags = mmap_flag;
3596  m->magic = mparams.magic;
3597  init_bins(m);
3598  if (is_global(m))
3599  init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3600  else {
3601  /* Offset top by embedded malloc_state */
3602  mchunkptr mn = next_chunk(mem2chunk(m));
3603  init_top(m, mn,
3604  (size_t) ((tbase + tsize) - (char *) mn) -
3605  TOP_FOOT_SIZE);
3606  }
3607  }
3608 
3609  else {
3610  /* Try to merge with an existing segment */
3611  msegmentptr sp = &m->seg;
3612  while (sp != 0 && tbase != sp->base + sp->size)
3613  sp = sp->next;
3614  if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
3615  sp->size += tsize;
3616  init_top(m, m->top, m->topsize + tsize);
3617  } else {
3618  if (tbase < m->least_addr)
3619  m->least_addr = tbase;
3620  sp = &m->seg;
3621  while (sp != 0 && sp->base != tbase + tsize)
3622  sp = sp->next;
3623  if (sp != 0 &&
3624  !is_extern_segment(sp) &&
3625  (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3626  char *oldbase = sp->base;
3627  sp->base = tbase;
3628  sp->size += tsize;
3629  return prepend_alloc(m, tbase, oldbase, nb);
3630  } else
3631  add_segment(m, tbase, tsize, mmap_flag);
3632  }
3633  }
3634 
3635  if (nb < m->topsize) { /* Allocate from new or extended top space */
3636  size_t rsize = m->topsize -= nb;
3637  mchunkptr p = m->top;
3638  mchunkptr r = m->top = chunk_plus_offset(p, nb);
3639  r->head = rsize | PINUSE_BIT;
3641  check_top_chunk(m, m->top);
3642  check_malloced_chunk(m, chunk2mem(p), nb);
3643  return chunk2mem(p);
3644  }
3645  }
3646 
3648  return 0;
3649 }
3650 
3651 /* ----------------------- system deallocation -------------------------- */
3652 
3653 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3654 static size_t
3656 {
3657  size_t released = 0;
3658  msegmentptr pred = &m->seg;
3659  msegmentptr sp = pred->next;
3660  while (sp != 0) {
3661  char *base = sp->base;
3662  size_t size = sp->size;
3663  msegmentptr next = sp->next;
3664  if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3665  mchunkptr p = align_as_chunk(base);
3666  size_t psize = chunksize(p);
3667  /* Can unmap if first chunk holds entire segment and not pinned */
3668  if (!cinuse(p)
3669  && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3670  tchunkptr tp = (tchunkptr) p;
3671  assert(segment_holds(sp, (char *) sp));
3672  if (p == m->dv) {
3673  m->dv = 0;
3674  m->dvsize = 0;
3675  } else {
3676  unlink_large_chunk(m, tp);
3677  }
3678  if (CALL_MUNMAP(base, size) == 0) {
3679  released += size;
3680  m->footprint -= size;
3681  /* unlink obsoleted record */
3682  sp = pred;
3683  sp->next = next;
3684  } else { /* back out if cannot unmap */
3685  insert_large_chunk(m, tp, psize);
3686  }
3687  }
3688  }
3689  pred = sp;
3690  sp = next;
3691  }
3692  return released;
3693 }
3694 
3695 static int
3696 sys_trim(mstate m, size_t pad)
3697 {
3698  size_t released = 0;
3699  if (pad < MAX_REQUEST && is_initialized(m)) {
3700  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3701 
3702  if (m->topsize > pad) {
3703  /* Shrink top space in granularity-size units, keeping at least one */
3704  size_t unit = mparams.granularity;
3705  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3706  SIZE_T_ONE) * unit;
3707  msegmentptr sp = segment_holding(m, (char *) m->top);
3708 
3709  if (!is_extern_segment(sp)) {
3710  if (is_mmapped_segment(sp)) {
3711  if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
3712  size_t newsize = sp->size - extra;
3713  /* Prefer mremap, fall back to munmap */
3714  if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
3715  MFAIL)
3716  || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3717  released = extra;
3718  }
3719  }
3720  } else if (HAVE_MORECORE) {
3721  if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3722  extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3724  {
3725  /* Make sure end of memory is where we last set it. */
3726  char *old_br = (char *) (CALL_MORECORE(0));
3727  if (old_br == sp->base + sp->size) {
3728  char *rel_br = (char *) (CALL_MORECORE(-extra));
3729  char *new_br = (char *) (CALL_MORECORE(0));
3730  if (rel_br != CMFAIL && new_br < old_br)
3731  released = old_br - new_br;
3732  }
3733  }
3735  }
3736  }
3737 
3738  if (released != 0) {
3739  sp->size -= released;
3740  m->footprint -= released;
3741  init_top(m, m->top, m->topsize - released);
3742  check_top_chunk(m, m->top);
3743  }
3744  }
3745 
3746  /* Unmap any unused mmapped segments */
3747  if (HAVE_MMAP)
3748  released += release_unused_segments(m);
3749 
3750  /* On failure, disable autotrim to avoid repeated failed future calls */
3751  if (released == 0)
3752  m->trim_check = MAX_SIZE_T;
3753  }
3754 
3755  return (released != 0) ? 1 : 0;
3756 }
3757 
3758 /* ---------------------------- malloc support --------------------------- */
3759 
3760 /* allocate a large request from the best fitting chunk in a treebin */
3761 static void *
3762 tmalloc_large(mstate m, size_t nb)
3763 {
3764  tchunkptr v = 0;
3765  size_t rsize = -nb; /* Unsigned negation */
3766  tchunkptr t;
3767  bindex_t idx;
3768  compute_tree_index(nb, idx);
3769 
3770  if ((t = *treebin_at(m, idx)) != 0) {
3771  /* Traverse tree for this bin looking for node with size == nb */
3772  size_t sizebits = nb << leftshift_for_tree_index(idx);
3773  tchunkptr rst = 0; /* The deepest untaken right subtree */
3774  for (;;) {
3775  tchunkptr rt;
3776  size_t trem = chunksize(t) - nb;
3777  if (trem < rsize) {
3778  v = t;
3779  if ((rsize = trem) == 0)
3780  break;
3781  }
3782  rt = t->child[1];
3783  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3784  if (rt != 0 && rt != t)
3785  rst = rt;
3786  if (t == 0) {
3787  t = rst; /* set t to least subtree holding sizes > nb */
3788  break;
3789  }
3790  sizebits <<= 1;
3791  }
3792  }
3793 
3794  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3795  binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3796  if (leftbits != 0) {
3797  bindex_t i;
3798  binmap_t leastbit = least_bit(leftbits);
3799  compute_bit2idx(leastbit, i);
3800  t = *treebin_at(m, i);
3801  }
3802  }
3803 
3804  while (t != 0) { /* find smallest of tree or subtree */
3805  size_t trem = chunksize(t) - nb;
3806  if (trem < rsize) {
3807  rsize = trem;
3808  v = t;
3809  }
3810  t = leftmost_child(t);
3811  }
3812 
3813  /* If dv is a better fit, return 0 so malloc will use it */
3814  if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3815  if (RTCHECK(ok_address(m, v))) { /* split */
3816  mchunkptr r = chunk_plus_offset(v, nb);
3817  assert(chunksize(v) == rsize + nb);
3818  if (RTCHECK(ok_next(v, r))) {
3819  unlink_large_chunk(m, v);
3820  if (rsize < MIN_CHUNK_SIZE)
3821  set_inuse_and_pinuse(m, v, (rsize + nb));
3822  else {
3825  insert_chunk(m, r, rsize);
3826  }
3827  return chunk2mem(v);
3828  }
3829  }
3831  }
3832  return 0;
3833 }
3834 
3835 /* allocate a small request from the best fitting chunk in a treebin */
3836 static void *
3837 tmalloc_small(mstate m, size_t nb)
3838 {
3839  tchunkptr t, v;
3840  size_t rsize;
3841  bindex_t i;
3842  binmap_t leastbit = least_bit(m->treemap);
3843  compute_bit2idx(leastbit, i);
3844 
3845  v = t = *treebin_at(m, i);
3846  rsize = chunksize(t) - nb;
3847 
3848  while ((t = leftmost_child(t)) != 0) {
3849  size_t trem = chunksize(t) - nb;
3850  if (trem < rsize) {
3851  rsize = trem;
3852  v = t;
3853  }
3854  }
3855 
3856  if (RTCHECK(ok_address(m, v))) {
3857  mchunkptr r = chunk_plus_offset(v, nb);
3858  assert(chunksize(v) == rsize + nb);
3859  if (RTCHECK(ok_next(v, r))) {
3860  unlink_large_chunk(m, v);
3861  if (rsize < MIN_CHUNK_SIZE)
3862  set_inuse_and_pinuse(m, v, (rsize + nb));
3863  else {
3866  replace_dv(m, r, rsize);
3867  }
3868  return chunk2mem(v);
3869  }
3870  }
3871 
3873  return 0;
3874 }
3875 
3876 /* --------------------------- realloc support --------------------------- */
3877 
3878 static void *
3879 internal_realloc(mstate m, void *oldmem, size_t bytes)
3880 {
3881  if (bytes >= MAX_REQUEST) {
3883  return 0;
3884  }
3885  if (!PREACTION(m)) {
3886  mchunkptr oldp = mem2chunk(oldmem);
3887  size_t oldsize = chunksize(oldp);
3888  mchunkptr next = chunk_plus_offset(oldp, oldsize);
3889  mchunkptr newp = 0;
3890  void *extra = 0;
3891 
3892  /* Try to either shrink or extend into top. Else malloc-copy-free */
3893 
3894  if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
3895  ok_next(oldp, next) && ok_pinuse(next))) {
3896  size_t nb = request2size(bytes);
3897  if (is_mmapped(oldp))
3898  newp = mmap_resize(m, oldp, nb);
3899  else if (oldsize >= nb) { /* already big enough */
3900  size_t rsize = oldsize - nb;
3901  newp = oldp;
3902  if (rsize >= MIN_CHUNK_SIZE) {
3903  mchunkptr remainder = chunk_plus_offset(newp, nb);
3904  set_inuse(m, newp, nb);
3905  set_inuse(m, remainder, rsize);
3906  extra = chunk2mem(remainder);
3907  }
3908  } else if (next == m->top && oldsize + m->topsize > nb) {
3909  /* Expand into top */
3910  size_t newsize = oldsize + m->topsize;
3911  size_t newtopsize = newsize - nb;
3912  mchunkptr newtop = chunk_plus_offset(oldp, nb);
3913  set_inuse(m, oldp, nb);
3914  newtop->head = newtopsize | PINUSE_BIT;
3915  m->top = newtop;
3916  m->topsize = newtopsize;
3917  newp = oldp;
3918  }
3919  } else {
3920  USAGE_ERROR_ACTION(m, oldmem);
3921  POSTACTION(m);
3922  return 0;
3923  }
3924 
3925  POSTACTION(m);
3926 
3927  if (newp != 0) {
3928  if (extra != 0) {
3929  internal_free(m, extra);
3930  }
3931  check_inuse_chunk(m, newp);
3932  return chunk2mem(newp);
3933  } else {
3934  void *newmem = internal_malloc(m, bytes);
3935  if (newmem != 0) {
3936  size_t oc = oldsize - overhead_for(oldp);
3937  memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3938  internal_free(m, oldmem);
3939  }
3940  return newmem;
3941  }
3942  }
3943  return 0;
3944 }
3945 
3946 /* --------------------------- memalign support -------------------------- */
3947 
3948 static void *
3949 internal_memalign(mstate m, size_t alignment, size_t bytes)
3950 {
3951  if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
3952  return internal_malloc(m, bytes);
3953  if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3954  alignment = MIN_CHUNK_SIZE;
3955  if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
3956  size_t a = MALLOC_ALIGNMENT << 1;
3957  while (a < alignment)
3958  a <<= 1;
3959  alignment = a;
3960  }
3961 
3962  if (bytes >= MAX_REQUEST - alignment) {
3963  if (m != 0) { /* Test isn't needed but avoids compiler warning */
3965  }
3966  } else {
3967  size_t nb = request2size(bytes);
3968  size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3969  char *mem = (char *) internal_malloc(m, req);
3970  if (mem != 0) {
3971  void *leader = 0;
3972  void *trailer = 0;
3973  mchunkptr p = mem2chunk(mem);
3974 
3975  if (PREACTION(m))
3976  return 0;
3977  if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
3978  /*
3979  Find an aligned spot inside chunk. Since we need to give
3980  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3981  the first calculation places us at a spot with less than
3982  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3983  We've allocated enough total room so that this is always
3984  possible.
3985  */
3986  char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
3987  alignment -
3988  SIZE_T_ONE))
3989  & -alignment));
3990  char *pos =
3991  ((size_t) (br - (char *) (p)) >=
3992  MIN_CHUNK_SIZE) ? br : br + alignment;
3993  mchunkptr newp = (mchunkptr) pos;
3994  size_t leadsize = pos - (char *) (p);
3995  size_t newsize = chunksize(p) - leadsize;
3996 
3997  if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3998  newp->prev_foot = p->prev_foot + leadsize;
3999  newp->head = (newsize | CINUSE_BIT);
4000  } else { /* Otherwise, give back leader, use the rest */
4001  set_inuse(m, newp, newsize);
4002  set_inuse(m, p, leadsize);
4003  leader = chunk2mem(p);
4004  }
4005  p = newp;
4006  }
4007 
4008  /* Give back spare room at the end */
4009  if (!is_mmapped(p)) {
4010  size_t size = chunksize(p);
4011  if (size > nb + MIN_CHUNK_SIZE) {
4012  size_t remainder_size = size - nb;
4013  mchunkptr remainder = chunk_plus_offset(p, nb);
4014  set_inuse(m, p, nb);
4015  set_inuse(m, remainder, remainder_size);
4016  trailer = chunk2mem(remainder);
4017  }
4018  }
4019 
4020  assert(chunksize(p) >= nb);
4021  assert((((size_t) (chunk2mem(p))) % alignment) == 0);
4022  check_inuse_chunk(m, p);
4023  POSTACTION(m);
4024  if (leader != 0) {
4025  internal_free(m, leader);
4026  }
4027  if (trailer != 0) {
4028  internal_free(m, trailer);
4029  }
4030  return chunk2mem(p);
4031  }
4032  }
4033  return 0;
4034 }
4035 
4036 /* ------------------------ comalloc/coalloc support --------------------- */
4037 
4038 static void **
4039 ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4040 {
4041  /*
4042  This provides common support for independent_X routines, handling
4043  all of the combinations that can result.
4044 
4045  The opts arg has:
4046  bit 0 set if all elements are same size (using sizes[0])
4047  bit 1 set if elements should be zeroed
4048  */
4049 
4050  size_t element_size; /* chunksize of each element, if all same */
4051  size_t contents_size; /* total size of elements */
4052  size_t array_size; /* request size of pointer array */
4053  void *mem; /* malloced aggregate space */
4054  mchunkptr p; /* corresponding chunk */
4055  size_t remainder_size; /* remaining bytes while splitting */
4056  void **marray; /* either "chunks" or malloced ptr array */
4057  mchunkptr array_chunk; /* chunk for malloced ptr array */
4058  flag_t was_enabled; /* to disable mmap */
4059  size_t size;
4060  size_t i;
4061 
4062  /* compute array length, if needed */
4063  if (chunks != 0) {
4064  if (n_elements == 0)
4065  return chunks; /* nothing to do */
4066  marray = chunks;
4067  array_size = 0;
4068  } else {
4069  /* if empty req, must still return chunk representing empty array */
4070  if (n_elements == 0)
4071  return (void **) internal_malloc(m, 0);
4072  marray = 0;
4073  array_size = request2size(n_elements * (sizeof(void *)));
4074  }
4075 
4076  /* compute total element size */
4077  if (opts & 0x1) { /* all-same-size */
4078  element_size = request2size(*sizes);
4079  contents_size = n_elements * element_size;
4080  } else { /* add up all the sizes */
4081  element_size = 0;
4082  contents_size = 0;
4083  for (i = 0; i != n_elements; ++i)
4084  contents_size += request2size(sizes[i]);
4085  }
4086 
4087  size = contents_size + array_size;
4088 
4089  /*
4090  Allocate the aggregate chunk. First disable direct-mmapping so
4091  malloc won't use it, since we would not be able to later
4092  free/realloc space internal to a segregated mmap region.
4093  */
4094  was_enabled = use_mmap(m);
4095  disable_mmap(m);
4096  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4097  if (was_enabled)
4098  enable_mmap(m);
4099  if (mem == 0)
4100  return 0;
4101 
4102  if (PREACTION(m))
4103  return 0;
4104  p = mem2chunk(mem);
4105  remainder_size = chunksize(p);
4106 
4107  assert(!is_mmapped(p));
4108 
4109  if (opts & 0x2) { /* optionally clear the elements */
4110  memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4111  }
4112 
4113  /* If not provided, allocate the pointer array as final part of chunk */
4114  if (marray == 0) {
4115  size_t array_chunk_size;
4116  array_chunk = chunk_plus_offset(p, contents_size);
4117  array_chunk_size = remainder_size - contents_size;
4118  marray = (void **) (chunk2mem(array_chunk));
4119  set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4120  remainder_size = contents_size;
4121  }
4122 
4123  /* split out elements */
4124  for (i = 0;; ++i) {
4125  marray[i] = chunk2mem(p);
4126  if (i != n_elements - 1) {
4127  if (element_size != 0)
4128  size = element_size;
4129  else
4130  size = request2size(sizes[i]);
4131  remainder_size -= size;
4133  p = chunk_plus_offset(p, size);
4134  } else { /* the final element absorbs any overallocation slop */
4135  set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4136  break;
4137  }
4138  }
4139 
4140 #if DEBUG
4141  if (marray != chunks) {
4142  /* final element must have exactly exhausted chunk */
4143  if (element_size != 0) {
4144  assert(remainder_size == element_size);
4145  } else {
4146  assert(remainder_size == request2size(sizes[i]));
4147  }
4148  check_inuse_chunk(m, mem2chunk(marray));
4149  }
4150  for (i = 0; i != n_elements; ++i)
4151  check_inuse_chunk(m, mem2chunk(marray[i]));
4152 
4153 #endif /* DEBUG */
4154 
4155  POSTACTION(m);
4156  return marray;
4157 }
4158 
4159 
4160 /* -------------------------- public routines ---------------------------- */
4161 
4162 #if !ONLY_MSPACES
4163 
4164 void *
4165 dlmalloc(size_t bytes)
4166 {
4167  /*
4168  Basic algorithm:
4169  If a small request (< 256 bytes minus per-chunk overhead):
4170  1. If one exists, use a remainderless chunk in associated smallbin.
4171  (Remainderless means that there are too few excess bytes to
4172  represent as a chunk.)
4173  2. If it is big enough, use the dv chunk, which is normally the
4174  chunk adjacent to the one used for the most recent small request.
4175  3. If one exists, split the smallest available chunk in a bin,
4176  saving remainder in dv.
4177  4. If it is big enough, use the top chunk.
4178  5. If available, get memory from system and use it
4179  Otherwise, for a large request:
4180  1. Find the smallest available binned chunk that fits, and use it
4181  if it is better fitting than dv chunk, splitting if necessary.
4182  2. If better fitting than any binned chunk, use the dv chunk.
4183  3. If it is big enough, use the top chunk.
4184  4. If request size >= mmap threshold, try to directly mmap this chunk.
4185  5. If available, get memory from system and use it
4186 
4187  The ugly goto's here ensure that postaction occurs along all paths.
4188  */
4189 
4190  if (!PREACTION(gm)) {
4191  void *mem;
4192  size_t nb;
4193  if (bytes <= MAX_SMALL_REQUEST) {
4194  bindex_t idx;
4195  binmap_t smallbits;
4196  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4197  idx = small_index(nb);
4198  smallbits = gm->smallmap >> idx;
4199 
4200  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4201  mchunkptr b, p;
4202  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4203  b = smallbin_at(gm, idx);
4204  p = b->fd;
4205  assert(chunksize(p) == small_index2size(idx));
4206  unlink_first_small_chunk(gm, b, p, idx);
4208  mem = chunk2mem(p);
4209  check_malloced_chunk(gm, mem, nb);
4210  goto postaction;
4211  }
4212 
4213  else if (nb > gm->dvsize) {
4214  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4215  mchunkptr b, p, r;
4216  size_t rsize;
4217  bindex_t i;
4218  binmap_t leftbits =
4219  (smallbits << idx) & left_bits(idx2bit(idx));
4220  binmap_t leastbit = least_bit(leftbits);
4221  compute_bit2idx(leastbit, i);
4222  b = smallbin_at(gm, i);
4223  p = b->fd;
4224  assert(chunksize(p) == small_index2size(i));
4225  unlink_first_small_chunk(gm, b, p, i);
4226  rsize = small_index2size(i) - nb;
4227  /* Fit here cannot be remainderless if 4byte sizes */
4228  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4230  else {
4232  r = chunk_plus_offset(p, nb);
4234  replace_dv(gm, r, rsize);
4235  }
4236  mem = chunk2mem(p);
4237  check_malloced_chunk(gm, mem, nb);
4238  goto postaction;
4239  }
4240 
4241  else if (gm->treemap != 0
4242  && (mem = tmalloc_small(gm, nb)) != 0) {
4243  check_malloced_chunk(gm, mem, nb);
4244  goto postaction;
4245  }
4246  }
4247  } else if (bytes >= MAX_REQUEST)
4248  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4249  else {
4250  nb = pad_request(bytes);
4251  if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4252  check_malloced_chunk(gm, mem, nb);
4253  goto postaction;
4254  }
4255  }
4256 
4257  if (nb <= gm->dvsize) {
4258  size_t rsize = gm->dvsize - nb;
4259  mchunkptr p = gm->dv;
4260  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4261  mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4262  gm->dvsize = rsize;
4265  } else { /* exhaust dv */
4266  size_t dvs = gm->dvsize;
4267  gm->dvsize = 0;
4268  gm->dv = 0;
4269  set_inuse_and_pinuse(gm, p, dvs);
4270  }
4271  mem = chunk2mem(p);
4272  check_malloced_chunk(gm, mem, nb);
4273  goto postaction;
4274  }
4275 
4276  else if (nb < gm->topsize) { /* Split top */
4277  size_t rsize = gm->topsize -= nb;
4278  mchunkptr p = gm->top;
4279  mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4280  r->head = rsize | PINUSE_BIT;
4282  mem = chunk2mem(p);
4283  check_top_chunk(gm, gm->top);
4284  check_malloced_chunk(gm, mem, nb);
4285  goto postaction;
4286  }
4287 
4288  mem = sys_alloc(gm, nb);
4289 
4290  postaction:
4291  POSTACTION(gm);
4292  return mem;
4293  }
4294 
4295  return 0;
4296 }
4297 
4298 void
4299 dlfree(void *mem)
4300 {
4301  /*
4302  Consolidate freed chunks with preceeding or succeeding bordering
4303  free chunks, if they exist, and then place in a bin. Intermixed
4304  with special cases for top, dv, mmapped chunks, and usage errors.
4305  */
4306 
4307  if (mem != 0) {
4308  mchunkptr p = mem2chunk(mem);
4309 #if FOOTERS
4310  mstate fm = get_mstate_for(p);
4311  if (!ok_magic(fm)) {
4312  USAGE_ERROR_ACTION(fm, p);
4313  return;
4314  }
4315 #else /* FOOTERS */
4316 #define fm gm
4317 #endif /* FOOTERS */
4318  if (!PREACTION(fm)) {
4319  check_inuse_chunk(fm, p);
4320  if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4321  size_t psize = chunksize(p);
4322  mchunkptr next = chunk_plus_offset(p, psize);
4323  if (!pinuse(p)) {
4324  size_t prevsize = p->prev_foot;
4325  if ((prevsize & IS_MMAPPED_BIT) != 0) {
4326  prevsize &= ~IS_MMAPPED_BIT;
4327  psize += prevsize + MMAP_FOOT_PAD;
4328  if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4329  fm->footprint -= psize;
4330  goto postaction;
4331  } else {
4332  mchunkptr prev = chunk_minus_offset(p, prevsize);
4333  psize += prevsize;
4334  p = prev;
4335  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4336  if (p != fm->dv) {
4337  unlink_chunk(fm, p, prevsize);
4338  } else if ((next->head & INUSE_BITS) ==
4339  INUSE_BITS) {
4340  fm->dvsize = psize;
4341  set_free_with_pinuse(p, psize, next);
4342  goto postaction;
4343  }
4344  } else
4345  goto erroraction;
4346  }
4347  }
4348 
4349  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4350  if (!cinuse(next)) { /* consolidate forward */
4351  if (next == fm->top) {
4352  size_t tsize = fm->topsize += psize;
4353  fm->top = p;
4354  p->head = tsize | PINUSE_BIT;
4355  if (p == fm->dv) {
4356  fm->dv = 0;
4357  fm->dvsize = 0;
4358  }
4359  if (should_trim(fm, tsize))
4360  sys_trim(fm, 0);
4361  goto postaction;
4362  } else if (next == fm->dv) {
4363  size_t dsize = fm->dvsize += psize;
4364  fm->dv = p;
4366  goto postaction;
4367  } else {
4368  size_t nsize = chunksize(next);
4369  psize += nsize;
4370  unlink_chunk(fm, next, nsize);
4372  if (p == fm->dv) {
4373  fm->dvsize = psize;
4374  goto postaction;
4375  }
4376  }
4377  } else
4378  set_free_with_pinuse(p, psize, next);
4379  insert_chunk(fm, p, psize);
4380  check_free_chunk(fm, p);
4381  goto postaction;
4382  }
4383  }
4384  erroraction:
4385  USAGE_ERROR_ACTION(fm, p);
4386  postaction:
4387  POSTACTION(fm);
4388  }
4389  }
4390 #if !FOOTERS
4391 #undef fm
4392 #endif /* FOOTERS */
4393 }
4394 
4395 void *
4396 dlcalloc(size_t n_elements, size_t elem_size)
4397 {
4398  void *mem;
4399  size_t req = 0;
4400  if (n_elements != 0) {
4401  req = n_elements * elem_size;
4402  if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4403  (req / n_elements != elem_size))
4404  req = MAX_SIZE_T; /* force downstream failure on overflow */
4405  }
4406  mem = dlmalloc(req);
4407  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4408  memset(mem, 0, req);
4409  return mem;
4410 }
4411 
4412 void *
4413 dlrealloc(void *oldmem, size_t bytes)
4414 {
4415  if (oldmem == 0)
4416  return dlmalloc(bytes);
4417 #ifdef REALLOC_ZERO_BYTES_FREES
4418  if (bytes == 0) {
4419  dlfree(oldmem);
4420  return 0;
4421  }
4422 #endif /* REALLOC_ZERO_BYTES_FREES */
4423  else {
4424 #if ! FOOTERS
4425  mstate m = gm;
4426 #else /* FOOTERS */
4427  mstate m = get_mstate_for(mem2chunk(oldmem));
4428  if (!ok_magic(m)) {
4429  USAGE_ERROR_ACTION(m, oldmem);
4430  return 0;
4431  }
4432 #endif /* FOOTERS */
4433  return internal_realloc(m, oldmem, bytes);
4434  }
4435 }
4436 
4437 void *
4438 dlmemalign(size_t alignment, size_t bytes)
4439 {
4440  return internal_memalign(gm, alignment, bytes);
4441 }
4442 
4443 void **
4444 dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
4445 {
4446  size_t sz = elem_size; /* serves as 1-element array */
4447  return ialloc(gm, n_elements, &sz, 3, chunks);
4448 }
4449 
4450 void **
4451 dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
4452 {
4453  return ialloc(gm, n_elements, sizes, 0, chunks);
4454 }
4455 
4456 void *
4457 dlvalloc(size_t bytes)
4458 {
4459  size_t pagesz;
4460  init_mparams();
4461  pagesz = mparams.page_size;
4462  return dlmemalign(pagesz, bytes);
4463 }
4464 
4465 void *
4466 dlpvalloc(size_t bytes)
4467 {
4468  size_t pagesz;
4469  init_mparams();
4470  pagesz = mparams.page_size;
4471  return dlmemalign(pagesz,
4472  (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4473 }
4474 
4475 int
4477 {
4478  int result = 0;
4479  if (!PREACTION(gm)) {
4480  result = sys_trim(gm, pad);
4481  POSTACTION(gm);
4482  }
4483  return result;
4484 }
4485 
4486 size_t
4488 {
4489  return gm->footprint;
4490 }
4491 
4492 size_t
4494 {
4495  return gm->max_footprint;
4496 }
4497 
4498 #if !NO_MALLINFO
4499 struct mallinfo
4501 {
4502  return internal_mallinfo(gm);
4503 }
4504 #endif /* NO_MALLINFO */
4505 
4506 void
4508 {
4510 }
4511 
4512 size_t
4514 {
4515  if (mem != 0) {
4516  mchunkptr p = mem2chunk(mem);
4517  if (cinuse(p))
4518  return chunksize(p) - overhead_for(p);
4519  }
4520  return 0;
4521 }
4522 
4523 int
4524 dlmallopt(int param_number, int value)
4525 {
4526  return change_mparam(param_number, value);
4527 }
4528 
4529 #endif /* !ONLY_MSPACES */
4530 
4531 /* ----------------------------- user mspaces ---------------------------- */
4532 
4533 #if MSPACES
4534 
4535 static mstate
4536 init_user_mstate(char *tbase, size_t tsize)
4537 {
4538  size_t msize = pad_request(sizeof(struct malloc_state));
4539  mchunkptr mn;
4540  mchunkptr msp = align_as_chunk(tbase);
4541  mstate m = (mstate) (chunk2mem(msp));
4542  memset(m, 0, msize);
4543  INITIAL_LOCK(&m->mutex);
4544  msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4545  m->seg.base = m->least_addr = tbase;
4546  m->seg.size = m->footprint = m->max_footprint = tsize;
4547  m->magic = mparams.magic;
4548  m->mflags = mparams.default_mflags;
4549  disable_contiguous(m);
4550  init_bins(m);
4551  mn = next_chunk(mem2chunk(m));
4552  init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4553  check_top_chunk(m, m->top);
4554  return m;
4555 }
4556 
4557 mspace
4558 create_mspace(size_t capacity, int locked)
4559 {
4560  mstate m = 0;
4561  size_t msize = pad_request(sizeof(struct malloc_state));
4562  init_mparams(); /* Ensure pagesize etc initialized */
4563 
4564  if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4565  size_t rs = ((capacity == 0) ? mparams.granularity :
4566  (capacity + TOP_FOOT_SIZE + msize));
4567  size_t tsize = granularity_align(rs);
4568  char *tbase = (char *) (CALL_MMAP(tsize));
4569  if (tbase != CMFAIL) {
4570  m = init_user_mstate(tbase, tsize);
4571  m->seg.sflags = IS_MMAPPED_BIT;
4572  set_lock(m, locked);
4573  }
4574  }
4575  return (mspace) m;
4576 }
4577 
4578 mspace
4579 create_mspace_with_base(void *base, size_t capacity, int locked)
4580 {
4581  mstate m = 0;
4582  size_t msize = pad_request(sizeof(struct malloc_state));
4583  init_mparams(); /* Ensure pagesize etc initialized */
4584 
4585  if (capacity > msize + TOP_FOOT_SIZE &&
4586  capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4587  m = init_user_mstate((char *) base, capacity);
4588  m->seg.sflags = EXTERN_BIT;
4589  set_lock(m, locked);
4590  }
4591  return (mspace) m;
4592 }
4593 
4594 size_t
4595 destroy_mspace(mspace msp)
4596 {
4597  size_t freed = 0;
4598  mstate ms = (mstate) msp;
4599  if (ok_magic(ms)) {
4600  msegmentptr sp = &ms->seg;
4601  while (sp != 0) {
4602  char *base = sp->base;
4603  size_t size = sp->size;
4604  flag_t flag = sp->sflags;
4605  sp = sp->next;
4606  if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4607  CALL_MUNMAP(base, size) == 0)
4608  freed += size;
4609  }
4610  } else {
4611  USAGE_ERROR_ACTION(ms, ms);
4612  }
4613  return freed;
4614 }
4615 
4616 /*
4617  mspace versions of routines are near-clones of the global
4618  versions. This is not so nice but better than the alternatives.
4619 */
4620 
4621 
4622 void *
4623 mspace_malloc(mspace msp, size_t bytes)
4624 {
4625  mstate ms = (mstate) msp;
4626  if (!ok_magic(ms)) {
4627  USAGE_ERROR_ACTION(ms, ms);
4628  return 0;
4629  }
4630  if (!PREACTION(ms)) {
4631  void *mem;
4632  size_t nb;
4633  if (bytes <= MAX_SMALL_REQUEST) {
4634  bindex_t idx;
4635  binmap_t smallbits;
4636  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4637  idx = small_index(nb);
4638  smallbits = ms->smallmap >> idx;
4639 
4640  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4641  mchunkptr b, p;
4642  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4643  b = smallbin_at(ms, idx);
4644  p = b->fd;
4645  assert(chunksize(p) == small_index2size(idx));
4646  unlink_first_small_chunk(ms, b, p, idx);
4648  mem = chunk2mem(p);
4649  check_malloced_chunk(ms, mem, nb);
4650  goto postaction;
4651  }
4652 
4653  else if (nb > ms->dvsize) {
4654  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4655  mchunkptr b, p, r;
4656  size_t rsize;
4657  bindex_t i;
4658  binmap_t leftbits =
4659  (smallbits << idx) & left_bits(idx2bit(idx));
4660  binmap_t leastbit = least_bit(leftbits);
4661  compute_bit2idx(leastbit, i);
4662  b = smallbin_at(ms, i);
4663  p = b->fd;
4664  assert(chunksize(p) == small_index2size(i));
4665  unlink_first_small_chunk(ms, b, p, i);
4666  rsize = small_index2size(i) - nb;
4667  /* Fit here cannot be remainderless if 4byte sizes */
4668  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4670  else {
4672  r = chunk_plus_offset(p, nb);
4674  replace_dv(ms, r, rsize);
4675  }
4676  mem = chunk2mem(p);
4677  check_malloced_chunk(ms, mem, nb);
4678  goto postaction;
4679  }
4680 
4681  else if (ms->treemap != 0
4682  && (mem = tmalloc_small(ms, nb)) != 0) {
4683  check_malloced_chunk(ms, mem, nb);
4684  goto postaction;
4685  }
4686  }
4687  } else if (bytes >= MAX_REQUEST)
4688  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4689  else {
4690  nb = pad_request(bytes);
4691  if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4692  check_malloced_chunk(ms, mem, nb);
4693  goto postaction;
4694  }
4695  }
4696 
4697  if (nb <= ms->dvsize) {
4698  size_t rsize = ms->dvsize - nb;
4699  mchunkptr p = ms->dv;
4700  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4701  mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4702  ms->dvsize = rsize;
4705  } else { /* exhaust dv */
4706  size_t dvs = ms->dvsize;
4707  ms->dvsize = 0;
4708  ms->dv = 0;
4709  set_inuse_and_pinuse(ms, p, dvs);
4710  }
4711  mem = chunk2mem(p);
4712  check_malloced_chunk(ms, mem, nb);
4713  goto postaction;
4714  }
4715 
4716  else if (nb < ms->topsize) { /* Split top */
4717  size_t rsize = ms->topsize -= nb;
4718  mchunkptr p = ms->top;
4719  mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4720  r->head = rsize | PINUSE_BIT;
4722  mem = chunk2mem(p);
4723  check_top_chunk(ms, ms->top);
4724  check_malloced_chunk(ms, mem, nb);
4725  goto postaction;
4726  }
4727 
4728  mem = sys_alloc(ms, nb);
4729 
4730  postaction:
4731  POSTACTION(ms);
4732  return mem;
4733  }
4734 
4735  return 0;
4736 }
4737 
4738 void
4739 mspace_free(mspace msp, void *mem)
4740 {
4741  if (mem != 0) {
4742  mchunkptr p = mem2chunk(mem);
4743 #if FOOTERS
4744  mstate fm = get_mstate_for(p);
4745 #else /* FOOTERS */
4746  mstate fm = (mstate) msp;
4747 #endif /* FOOTERS */
4748  if (!ok_magic(fm)) {
4749  USAGE_ERROR_ACTION(fm, p);
4750  return;
4751  }
4752  if (!PREACTION(fm)) {
4753  check_inuse_chunk(fm, p);
4754  if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4755  size_t psize = chunksize(p);
4756  mchunkptr next = chunk_plus_offset(p, psize);
4757  if (!pinuse(p)) {
4758  size_t prevsize = p->prev_foot;
4759  if ((prevsize & IS_MMAPPED_BIT) != 0) {
4760  prevsize &= ~IS_MMAPPED_BIT;
4761  psize += prevsize + MMAP_FOOT_PAD;
4762  if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4763  fm->footprint -= psize;
4764  goto postaction;
4765  } else {
4766  mchunkptr prev = chunk_minus_offset(p, prevsize);
4767  psize += prevsize;
4768  p = prev;
4769  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4770  if (p != fm->dv) {
4771  unlink_chunk(fm, p, prevsize);
4772  } else if ((next->head & INUSE_BITS) ==
4773  INUSE_BITS) {
4774  fm->dvsize = psize;
4775  set_free_with_pinuse(p, psize, next);
4776  goto postaction;
4777  }
4778  } else
4779  goto erroraction;
4780  }
4781  }
4782 
4783  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4784  if (!cinuse(next)) { /* consolidate forward */
4785  if (next == fm->top) {
4786  size_t tsize = fm->topsize += psize;
4787  fm->top = p;
4788  p->head = tsize | PINUSE_BIT;
4789  if (p == fm->dv) {
4790  fm->dv = 0;
4791  fm->dvsize = 0;
4792  }
4793  if (should_trim(fm, tsize))
4794  sys_trim(fm, 0);
4795  goto postaction;
4796  } else if (next == fm->dv) {
4797  size_t dsize = fm->dvsize += psize;
4798  fm->dv = p;
4800  goto postaction;
4801  } else {
4802  size_t nsize = chunksize(next);
4803  psize += nsize;
4804  unlink_chunk(fm, next, nsize);
4806  if (p == fm->dv) {
4807  fm->dvsize = psize;
4808  goto postaction;
4809  }
4810  }
4811  } else
4812  set_free_with_pinuse(p, psize, next);
4813  insert_chunk(fm, p, psize);
4814  check_free_chunk(fm, p);
4815  goto postaction;
4816  }
4817  }
4818  erroraction:
4819  USAGE_ERROR_ACTION(fm, p);
4820  postaction:
4821  POSTACTION(fm);
4822  }
4823  }
4824 }
4825 
4826 void *
4827 mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
4828 {
4829  void *mem;
4830  size_t req = 0;
4831  mstate ms = (mstate) msp;
4832  if (!ok_magic(ms)) {
4833  USAGE_ERROR_ACTION(ms, ms);
4834  return 0;
4835  }
4836  if (n_elements != 0) {
4837  req = n_elements * elem_size;
4838  if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4839  (req / n_elements != elem_size))
4840  req = MAX_SIZE_T; /* force downstream failure on overflow */
4841  }
4842  mem = internal_malloc(ms, req);
4843  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4844  memset(mem, 0, req);
4845  return mem;
4846 }
4847 
4848 void *
4849 mspace_realloc(mspace msp, void *oldmem, size_t bytes)
4850 {
4851  if (oldmem == 0)
4852  return mspace_malloc(msp, bytes);
4853 #ifdef REALLOC_ZERO_BYTES_FREES
4854  if (bytes == 0) {
4855  mspace_free(msp, oldmem);
4856  return 0;
4857  }
4858 #endif /* REALLOC_ZERO_BYTES_FREES */
4859  else {
4860 #if FOOTERS
4861  mchunkptr p = mem2chunk(oldmem);
4862  mstate ms = get_mstate_for(p);
4863 #else /* FOOTERS */
4864  mstate ms = (mstate) msp;
4865 #endif /* FOOTERS */
4866  if (!ok_magic(ms)) {
4867  USAGE_ERROR_ACTION(ms, ms);
4868  return 0;
4869  }
4870  return internal_realloc(ms, oldmem, bytes);
4871  }
4872 }
4873 
4874 void *
4875 mspace_memalign(mspace msp, size_t alignment, size_t bytes)
4876 {
4877  mstate ms = (mstate) msp;
4878  if (!ok_magic(ms)) {
4879  USAGE_ERROR_ACTION(ms, ms);
4880  return 0;
4881  }
4882  return internal_memalign(ms, alignment, bytes);
4883 }
4884 
4885 void **
4886 mspace_independent_calloc(mspace msp, size_t n_elements,
4887  size_t elem_size, void *chunks[])
4888 {
4889  size_t sz = elem_size; /* serves as 1-element array */
4890  mstate ms = (mstate) msp;
4891  if (!ok_magic(ms)) {
4892  USAGE_ERROR_ACTION(ms, ms);
4893  return 0;
4894  }
4895  return ialloc(ms, n_elements, &sz, 3, chunks);
4896 }
4897 
4898 void **
4899 mspace_independent_comalloc(mspace msp, size_t n_elements,
4900  size_t sizes[], void *chunks[])
4901 {
4902  mstate ms = (mstate) msp;
4903  if (!ok_magic(ms)) {
4904  USAGE_ERROR_ACTION(ms, ms);
4905  return 0;
4906  }
4907  return ialloc(ms, n_elements, sizes, 0, chunks);
4908 }
4909 
4910 int
4911 mspace_trim(mspace msp, size_t pad)
4912 {
4913  int result = 0;
4914  mstate ms = (mstate) msp;
4915  if (ok_magic(ms)) {
4916  if (!PREACTION(ms)) {
4917  result = sys_trim(ms, pad);
4918  POSTACTION(ms);
4919  }
4920  } else {
4921  USAGE_ERROR_ACTION(ms, ms);
4922  }
4923  return result;
4924 }
4925 
4926 void
4927 mspace_malloc_stats(mspace msp)
4928 {
4929  mstate ms = (mstate) msp;
4930  if (ok_magic(ms)) {
4932  } else {
4933  USAGE_ERROR_ACTION(ms, ms);
4934  }
4935 }
4936 
4937 size_t
4938 mspace_footprint(mspace msp)
4939 {
4940  size_t result;
4941  mstate ms = (mstate) msp;
4942  if (ok_magic(ms)) {
4943  result = ms->footprint;
4944  }
4945  USAGE_ERROR_ACTION(ms, ms);
4946  return result;
4947 }
4948 
4949 
4950 size_t
4951 mspace_max_footprint(mspace msp)
4952 {
4953  size_t result;
4954  mstate ms = (mstate) msp;
4955  if (ok_magic(ms)) {
4956  result = ms->max_footprint;
4957  }
4958  USAGE_ERROR_ACTION(ms, ms);
4959  return result;
4960 }
4961 
4962 
4963 #if !NO_MALLINFO
4964 struct mallinfo
4965 mspace_mallinfo(mspace msp)
4966 {
4967  mstate ms = (mstate) msp;
4968  if (!ok_magic(ms)) {
4969  USAGE_ERROR_ACTION(ms, ms);
4970  }
4971  return internal_mallinfo(ms);
4972 }
4973 #endif /* NO_MALLINFO */
4974 
4975 int
4976 mspace_mallopt(int param_number, int value)
4977 {
4978  return change_mparam(param_number, value);
4979 }
4980 
4981 #endif /* MSPACES */
4982 
4983 /* -------------------- Alternative MORECORE functions ------------------- */
4984 
4985 /*
4986  Guidelines for creating a custom version of MORECORE:
4987 
4988  * For best performance, MORECORE should allocate in multiples of pagesize.
4989  * MORECORE may allocate more memory than requested. (Or even less,
4990  but this will usually result in a malloc failure.)
4991  * MORECORE must not allocate memory when given argument zero, but
4992  instead return one past the end address of memory from previous
4993  nonzero call.
4994  * For best performance, consecutive calls to MORECORE with positive
4995  arguments should return increasing addresses, indicating that
4996  space has been contiguously extended.
4997  * Even though consecutive calls to MORECORE need not return contiguous
4998  addresses, it must be OK for malloc'ed chunks to span multiple
4999  regions in those cases where they do happen to be contiguous.
5000  * MORECORE need not handle negative arguments -- it may instead
5001  just return MFAIL when given negative arguments.
5002  Negative arguments are always multiples of pagesize. MORECORE
5003  must not misinterpret negative args as large positive unsigned
5004  args. You can suppress all such calls from even occurring by defining
5005  MORECORE_CANNOT_TRIM,
5006 
5007  As an example alternative MORECORE, here is a custom allocator
5008  kindly contributed for pre-OSX macOS. It uses virtually but not
5009  necessarily physically contiguous non-paged memory (locked in,
5010  present and won't get swapped out). You can use it by uncommenting
5011  this section, adding some #includes, and setting up the appropriate
5012  defines above:
5013 
5014  #define MORECORE osMoreCore
5015 
5016  There is also a shutdown routine that should somehow be called for
5017  cleanup upon program exit.
5018 
5019  #define MAX_POOL_ENTRIES 100
5020  #define MINIMUM_MORECORE_SIZE (64 * 1024U)
5021  static int next_os_pool;
5022  void *our_os_pools[MAX_POOL_ENTRIES];
5023 
5024  void *osMoreCore(int size)
5025  {
5026  void *ptr = 0;
5027  static void *sbrk_top = 0;
5028 
5029  if (size > 0)
5030  {
5031  if (size < MINIMUM_MORECORE_SIZE)
5032  size = MINIMUM_MORECORE_SIZE;
5033  if (CurrentExecutionLevel() == kTaskLevel)
5034  ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5035  if (ptr == 0)
5036  {
5037  return (void *) MFAIL;
5038  }
5039  // save ptrs so they can be freed during cleanup
5040  our_os_pools[next_os_pool] = ptr;
5041  next_os_pool++;
5042  ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5043  sbrk_top = (char *) ptr + size;
5044  return ptr;
5045  }
5046  else if (size < 0)
5047  {
5048  // we don't currently support shrink behavior
5049  return (void *) MFAIL;
5050  }
5051  else
5052  {
5053  return sbrk_top;
5054  }
5055  }
5056 
5057  // cleanup any allocated memory pools
5058  // called as last thing before shutting down driver
5059 
5060  void osCleanupMem(void)
5061  {
5062  void **ptr;
5063 
5064  for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5065  if (*ptr)
5066  {
5067  PoolDeallocate(*ptr);
5068  *ptr = 0;
5069  }
5070  }
5071 
5072 */
5073 
5074 
5075 /* -----------------------------------------------------------------------
5076 History:
5077  V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
5078  * Add max_footprint functions
5079  * Ensure all appropriate literals are size_t
5080  * Fix conditional compilation problem for some #define settings
5081  * Avoid concatenating segments with the one provided
5082  in create_mspace_with_base
5083  * Rename some variables to avoid compiler shadowing warnings
5084  * Use explicit lock initialization.
5085  * Better handling of sbrk interference.
5086  * Simplify and fix segment insertion, trimming and mspace_destroy
5087  * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5088  * Thanks especially to Dennis Flanagan for help on these.
5089 
5090  V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
5091  * Fix memalign brace error.
5092 
5093  V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
5094  * Fix improper #endif nesting in C++
5095  * Add explicit casts needed for C++
5096 
5097  V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
5098  * Use trees for large bins
5099  * Support mspaces
5100  * Use segments to unify sbrk-based and mmap-based system allocation,
5101  removing need for emulation on most platforms without sbrk.
5102  * Default safety checks
5103  * Optional footer checks. Thanks to William Robertson for the idea.
5104  * Internal code refactoring
5105  * Incorporate suggestions and platform-specific changes.
5106  Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5107  Aaron Bachmann, Emery Berger, and others.
5108  * Speed up non-fastbin processing enough to remove fastbins.
5109  * Remove useless cfree() to avoid conflicts with other apps.
5110  * Remove internal memcpy, memset. Compilers handle builtins better.
5111  * Remove some options that no one ever used and rename others.
5112 
5113  V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5114  * Fix malloc_state bitmap array misdeclaration
5115 
5116  V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5117  * Allow tuning of FIRST_SORTED_BIN_SIZE
5118  * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5119  * Better detection and support for non-contiguousness of MORECORE.
5120  Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5121  * Bypass most of malloc if no frees. Thanks To Emery Berger.
5122  * Fix freeing of old top non-contiguous chunk im sysmalloc.
5123  * Raised default trim and map thresholds to 256K.
5124  * Fix mmap-related #defines. Thanks to Lubos Lunak.
5125  * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5126  * Branch-free bin calculation
5127  * Default trim and mmap thresholds now 256K.
5128 
5129  V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5130  * Introduce independent_comalloc and independent_calloc.
5131  Thanks to Michael Pachos for motivation and help.
5132  * Make optional .h file available
5133  * Allow > 2GB requests on 32bit systems.
5134  * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5135  Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5136  and Anonymous.
5137  * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5138  helping test this.)
5139  * memalign: check alignment arg
5140  * realloc: don't try to shift chunks backwards, since this
5141  leads to more fragmentation in some programs and doesn't
5142  seem to help in any others.
5143  * Collect all cases in malloc requiring system memory into sysmalloc
5144  * Use mmap as backup to sbrk
5145  * Place all internal state in malloc_state
5146  * Introduce fastbins (although similar to 2.5.1)
5147  * Many minor tunings and cosmetic improvements
5148  * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5149  * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5150  Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5151  * Include errno.h to support default failure action.
5152 
5153  V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5154  * return null for negative arguments
5155  * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5156  * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5157  (e.g. WIN32 platforms)
5158  * Cleanup header file inclusion for WIN32 platforms
5159  * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5160  * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5161  memory allocation routines
5162  * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5163  * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5164  usage of 'assert' in non-WIN32 code
5165  * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5166  avoid infinite loop
5167  * Always call 'fREe()' rather than 'free()'
5168 
5169  V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5170  * Fixed ordering problem with boundary-stamping
5171 
5172  V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5173  * Added pvalloc, as recommended by H.J. Liu
5174  * Added 64bit pointer support mainly from Wolfram Gloger
5175  * Added anonymously donated WIN32 sbrk emulation
5176  * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5177  * malloc_extend_top: fix mask error that caused wastage after
5178  foreign sbrks
5179  * Add linux mremap support code from HJ Liu
5180 
5181  V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5182  * Integrated most documentation with the code.
5183  * Add support for mmap, with help from
5184  Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5185  * Use last_remainder in more cases.
5186  * Pack bins using idea from colin@nyx10.cs.du.edu
5187  * Use ordered bins instead of best-fit threshhold
5188  * Eliminate block-local decls to simplify tracing and debugging.
5189  * Support another case of realloc via move into top
5190  * Fix error occuring when initial sbrk_base not word-aligned.
5191  * Rely on page size for units instead of SBRK_UNIT to
5192  avoid surprises about sbrk alignment conventions.
5193  * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5194  (raymond@es.ele.tue.nl) for the suggestion.
5195  * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5196  * More precautions for cases where other routines call sbrk,
5197  courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5198  * Added macros etc., allowing use in linux libc from
5199  H.J. Lu (hjl@gnu.ai.mit.edu)
5200  * Inverted this history list
5201 
5202  V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5203  * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5204  * Removed all preallocation code since under current scheme
5205  the work required to undo bad preallocations exceeds
5206  the work saved in good cases for most test programs.
5207  * No longer use return list or unconsolidated bins since
5208  no scheme using them consistently outperforms those that don't
5209  given above changes.
5210  * Use best fit for very large chunks to prevent some worst-cases.
5211  * Added some support for debugging
5212 
5213  V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5214  * Removed footers when chunks are in use. Thanks to
5215  Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5216 
5217  V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5218  * Added malloc_trim, with help from Wolfram Gloger
5219  (wmglo@Dent.MED.Uni-Muenchen.DE).
5220 
5221  V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5222 
5223  V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5224  * realloc: try to expand in both directions
5225  * malloc: swap order of clean-bin strategy;
5226  * realloc: only conditionally expand backwards
5227  * Try not to scavenge used bins
5228  * Use bin counts as a guide to preallocation
5229  * Occasionally bin return list chunks in first scan
5230  * Add a few optimizations from colin@nyx10.cs.du.edu
5231 
5232  V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5233  * faster bin computation & slightly different binning
5234  * merged all consolidations to one part of malloc proper
5235  (eliminating old malloc_find_space & malloc_clean_bin)
5236  * Scan 2 returns chunks (not just 1)
5237  * Propagate failure in realloc if malloc returns 0
5238  * Add stuff to allow compilation on non-ANSI compilers
5239  from kpv@research.att.com
5240 
5241  V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5242  * removed potential for odd address access in prev_chunk
5243  * removed dependency on getpagesize.h
5244  * misc cosmetics and a bit more internal documentation
5245  * anticosmetics: mangled names in macros to evade debugger strangeness
5246  * tested on sparc, hp-700, dec-mips, rs6000
5247  with gcc & native cc (hp, dec only) allowing
5248  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5249 
5250  Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5251  * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5252  structure of old version, but most details differ.)
5253 
5254 */
5255 
5256 #endif /* !HAVE_MALLOC */
5257 
5258 /* vi: set ts=4 sw=4 expandtab: */
#define DIRECT_MMAP(s)
Definition: SDL_malloc.c:1382
#define USAGE_ERROR_ACTION(m, p)
Definition: SDL_malloc.c:2240
#define DEFAULT_MMAP_THRESHOLD
Definition: SDL_malloc.c:615
#define unlink_large_chunk(M, X)
Definition: SDL_malloc.c:3124
#define INITIAL_LOCK(l)
Definition: SDL_malloc.c:1520
#define next_chunk(p)
Definition: SDL_malloc.c:1760
#define POSTACTION(M)
Definition: SDL_malloc.c:2209
#define request2size(req)
Definition: SDL_malloc.c:1726
GLuint const GLfloat * val
Definition: glew.h:2715
static void * sys_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3455
size_t bindex_t
Definition: SDL_malloc.c:1688
#define minsize_for_tree_index(i)
Definition: SDL_malloc.c:2334
GLdouble s
Definition: glew.h:1376
#define ACQUIRE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1535
#define HAVE_MMAP
Definition: SDL_malloc.c:567
#define fm
static int has_segment_link(mstate m, msegmentptr ss)
Definition: SDL_malloc.c:2161
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
Definition: SDL_malloc.c:3879
#define compute_tree_index(S, I)
Definition: SDL_malloc.c:2305
GLvoid **typedef void(GLAPIENTRY *PFNGLGETVERTEXATTRIBDVPROC)(GLuint
Definition: glew.h:1824
DECLSPEC void *SDLCALL SDL_calloc(size_t nmemb, size_t size)
unsigned int flag_t
Definition: SDL_malloc.c:1690
struct malloc_state * mstate
Definition: SDL_malloc.c:2081
#define ok_cinuse(p)
Definition: SDL_malloc.c:2426
#define insert_chunk(M, P, S)
Definition: SDL_malloc.c:3197
static int change_mparam(int param_number, int value)
Definition: SDL_malloc.c:2584
#define mark_inuse_foot(M, p, s)
Definition: SDL_malloc.c:2460
#define DEFAULT_TRIM_THRESHOLD
Definition: SDL_malloc.c:608
GLenum GLvoid * addr
Definition: glew.h:10667
#define MALLOC_FAILURE_ACTION
Definition: SDL_malloc.c:580
#define cinuse(p)
Definition: SDL_malloc.c:1748
#define dlmalloc_stats
Definition: SDL_malloc.c:719
#define RTCHECK(e)
Definition: SDL_malloc.c:2450
static void * tmalloc_large(mstate m, size_t nb)
Definition: SDL_malloc.c:3762
#define smallbin_at(M, i)
Definition: SDL_malloc.c:2286
EGLSurface EGLint x
Definition: eglext.h:293
#define replace_dv(M, P, S)
Definition: SDL_malloc.c:3042
#define disable_contiguous(M)
Definition: SDL_malloc.c:2122
#define dlrealloc
Definition: SDL_malloc.c:713
DECLSPEC void *SDLCALL SDL_realloc(void *mem, size_t size)
#define FENCEPOST_HEAD
Definition: SDL_malloc.c:1745
GLdouble GLdouble t
Definition: glew.h:1384
DECLSPEC void SDLCALL SDL_free(void *mem)
#define ABORT
Definition: SDL_malloc.c:56
#define is_initialized(M)
Definition: SDL_malloc.c:2107
#define set_free_with_pinuse(p, s, n)
Definition: SDL_malloc.c:1775
#define IS_MMAPPED_BIT
Definition: SDL_malloc.c:1357
#define memset
Definition: SDL_malloc.c:633
#define dlmemalign
Definition: SDL_malloc.c:712
#define SIX_SIZE_T_SIZES
Definition: SDL_malloc.c:1322
GLboolean GLboolean GLboolean GLboolean a
Definition: glew.h:8736
#define dlindependent_calloc
Definition: SDL_malloc.c:723
#define MCHUNK_SIZE
Definition: SDL_malloc.c:1694
#define compute_bit2idx(X, I)
Definition: SDL_malloc.c:2368
GLuint GLsizei const GLuint const GLintptr const GLsizeiptr * sizes
Definition: glew.h:4750
#define assert(x)
Definition: SDL_malloc.c:1234
#define ok_pinuse(p)
Definition: SDL_malloc.c:2428
GLenum GLsizei len
Definition: glew.h:7035
#define dlmalloc_usable_size
Definition: SDL_malloc.c:720
#define SIZE_T_SIZE
Definition: SDL_malloc.c:1312
#define chunk_plus_offset(p, s)
Definition: SDL_malloc.c:1756
#define CALL_MUNMAP(a, s)
Definition: SDL_malloc.c:1361
#define calloc
Definition: SDL_malloc.c:636
#define leftmost_child(t)
Definition: SDL_malloc.c:1901
#define FOUR_SIZE_T_SIZES
Definition: SDL_malloc.c:1321
#define CORRUPTION_ERROR_ACTION(m)
Definition: SDL_malloc.c:2236
struct malloc_chunk * mchunkptr
Definition: SDL_malloc.c:1686
#define CMFAIL
Definition: SDL_malloc.c:1347
#define EXTERN_BIT
Definition: SDL_malloc.c:1445
#define CHUNK_ALIGN_MASK
Definition: SDL_malloc.c:1326
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
Definition: SDL_malloc.c:3267
#define dlmalloc_footprint
Definition: SDL_malloc.c:721
#define USE_NONCONTIGUOUS_BIT
Definition: SDL_malloc.c:1442
#define Sleep(x)
Definition: allatency.c:34
#define free
Definition: SDL_malloc.c:638
#define small_index2size(i)
Definition: SDL_malloc.c:2282
#define dlvalloc
Definition: SDL_malloc.c:714
#define MIN_LARGE_SIZE
Definition: SDL_malloc.c:2055
#define M_MMAP_THRESHOLD
Definition: SDL_malloc.c:649
#define CHUNK_OVERHEAD
Definition: SDL_malloc.c:1699
#define ACQUIRE_MORECORE_LOCK()
Definition: SDL_malloc.c:1527
#define M_GRANULARITY
Definition: SDL_malloc.c:648
#define CALL_MORECORE(S)
Definition: SDL_malloc.c:1436
#define is_aligned(A)
Definition: SDL_malloc.c:1329
#define use_noncontiguous(M)
Definition: SDL_malloc.c:2121
#define INUSE_BITS
Definition: SDL_malloc.c:1742
#define MIN_REQUEST
Definition: SDL_malloc.c:1719
ALuint u
Definition: alMain.h:58
GLuint64EXT * result
Definition: glew.h:12708
static int sys_trim(mstate m, size_t pad)
Definition: SDL_malloc.c:3696
#define NTREEBINS
Definition: SDL_malloc.c:2051
#define unlink_first_small_chunk(M, B, P, I)
Definition: SDL_malloc.c:3024
#define MFAIL
Definition: SDL_malloc.c:1346
#define check_inuse_chunk(M, P)
Definition: SDL_malloc.c:2250
const GLdouble * v
Definition: glew.h:1377
struct malloc_tree_chunk tchunk
Definition: SDL_malloc.c:1896
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
Definition: SDL_malloc.c:4039
#define CINUSE_BIT
Definition: SDL_malloc.c:1741
#define MIN_CHUNK_SIZE
Definition: SDL_malloc.c:1708
FT_UInt idx
Definition: cffcmap.c:125
static void init_bins(mstate m)
Definition: SDL_malloc.c:3323
#define treemap_is_marked(M, i)
Definition: SDL_malloc.c:2351
static struct malloc_params mparams
Definition: SDL_malloc.c:2101
#define MORECORE_CONTIGUOUS
Definition: SDL_malloc.c:596
#define malloc_getpagesize
Definition: SDL_malloc.c:1298
#define gm
Definition: SDL_malloc.c:2105
#define dlmalloc_max_footprint
Definition: SDL_malloc.c:722
#define USE_MMAP_BIT
Definition: SDL_malloc.c:1358
#define set_inuse(M, p, s)
Definition: SDL_malloc.c:2463
static void * tmalloc_small(mstate m, size_t nb)
Definition: SDL_malloc.c:3837
GLfloat GLfloat p
Definition: glew.h:14938
#define MALLOC_ALIGNMENT
Definition: SDL_malloc.c:546
#define dlmallinfo
Definition: SDL_malloc.c:716
#define MAX_SIZE_T
Definition: SDL_malloc.c:533
static const char empty[1]
Definition: bdflib.c:513
#define dlmalloc
Definition: SDL_malloc.c:711
struct malloc_segment * msegmentptr
Definition: SDL_malloc.c:1972
static struct mallinfo internal_mallinfo(mstate m)
Definition: SDL_malloc.c:2899
#define is_global(M)
Definition: SDL_malloc.c:2106
#define realloc
Definition: SDL_malloc.c:637
#define idx2bit(i)
Definition: SDL_malloc.c:2342
#define align_as_chunk(A)
Definition: SDL_malloc.c:1715
#define unlink_chunk(M, P, S)
Definition: SDL_malloc.c:3201
struct malloc_tree_chunk * tbinptr
Definition: SDL_malloc.c:1898
#define page_align(S)
Definition: SDL_malloc.c:2130
DECLSPEC void *SDLCALL SDL_malloc(size_t size)
GLuint index
Definition: glew.h:1800
#define is_small(s)
Definition: SDL_malloc.c:2280
#define MAX_SMALL_REQUEST
Definition: SDL_malloc.c:2057
#define ok_next(p, n)
Definition: SDL_malloc.c:2424
#define leftshift_for_tree_index(i)
Definition: SDL_malloc.c:2329
#define dlfree
Definition: SDL_malloc.c:710
#define DEFAULT_GRANULARITY
Definition: SDL_malloc.c:601
GLfloat GLfloat GLfloat top
Definition: glew.h:13816
#define SIZE_T_BITSIZE
Definition: SDL_malloc.c:1313
#define chunk_minus_offset(p, s)
Definition: SDL_malloc.c:1757
static void internal_malloc_stats(mstate m)
Definition: SDL_malloc.c:2940
#define should_trim(M, s)
Definition: SDL_malloc.c:2173
#define dlmallopt
Definition: SDL_malloc.c:717
#define set_lock(M, L)
Definition: SDL_malloc.c:2124
#define M_TRIM_THRESHOLD
Definition: SDL_malloc.c:647
#define treebin_at(M, i)
Definition: SDL_malloc.c:2287
GLuint GLfloat GLfloat GLfloat x1
Definition: glew.h:11582
#define RELEASE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1536
#define next_pinuse(p)
Definition: SDL_malloc.c:1764
#define CALL_MMAP(s)
Definition: SDL_malloc.c:1376
#define PREACTION(M)
Definition: SDL_malloc.c:2205
static void init_top(mstate m, mchunkptr p, size_t psize)
Definition: SDL_malloc.c:3306
#define malloc
Definition: SDL_malloc.c:635
#define check_malloc_state(M)
Definition: SDL_malloc.c:2253
#define HALF_MAX_SIZE_T
Definition: SDL_malloc.c:1323
EGLSurface EGLint void ** value
Definition: eglext.h:301
#define HAVE_MORECORE
Definition: SDL_malloc.c:586
#define check_malloced_chunk(M, P, N)
Definition: SDL_malloc.c:2251
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: SDL_malloc.c:3356
#define insert_large_chunk(M, X, S)
Definition: SDL_malloc.c:3056
GLintptr offset
Definition: glew.h:1668
#define segment_holds(S, A)
Definition: SDL_malloc.c:2143
#define left_bits(x)
Definition: SDL_malloc.c:2386
static int dev_zero_fd
Definition: SDL_malloc.c:1375
GLenum GLuint GLsizei const GLchar * buf
Definition: glew.h:2539
static int init_mparams(void)
Definition: SDL_malloc.c:2506
#define enable_mmap(M)
Definition: SDL_malloc.c:2118
static size_t release_unused_segments(mstate m)
Definition: SDL_malloc.c:3655
#define memcpy
Definition: SDL_malloc.c:634
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
Definition: SDL_malloc.c:3398
#define RELEASE_MORECORE_LOCK()
Definition: SDL_malloc.c:1528
GLdouble GLdouble GLdouble GLdouble q
Definition: glew.h:1400
#define NSMALLBINS
Definition: SDL_malloc.c:2050
#define MAX_REQUEST
Definition: SDL_malloc.c:1718
GLfixed GLfixed x2
Definition: glext.h:4559
#define CALL_MREMAP(addr, osz, nsz, mv)
Definition: SDL_malloc.c:1432
#define mem2chunk(mem)
Definition: SDL_malloc.c:1713
#define small_index(s)
Definition: SDL_malloc.c:2281
#define USE_LOCK_BIT
Definition: SDL_malloc.c:1519
#define internal_free(m, mem)
Definition: SDL_malloc.c:3219
#define check_mmapped_chunk(M, P)
Definition: SDL_malloc.c:2252
GLdouble GLdouble GLdouble r
Definition: glew.h:1392
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
Definition: SDL_malloc.c:3949
#define dlcalloc
Definition: SDL_malloc.c:709
GLdouble GLdouble GLdouble b
Definition: glew.h:8383
GLuint GLuint end
Definition: glew.h:1239
struct malloc_segment msegment
Definition: SDL_malloc.c:1971
#define chunksize(p)
Definition: SDL_malloc.c:1750
#define prev_chunk(p)
Definition: SDL_malloc.c:1761
#define smallmap_is_marked(M, i)
Definition: SDL_malloc.c:2347
#define use_mmap(M)
Definition: SDL_malloc.c:2117
#define calloc_must_clear(p)
Definition: SDL_malloc.c:1787
#define overhead_for(p)
Definition: SDL_malloc.c:1782
static msegmentptr segment_holding(mstate m, char *addr)
Definition: SDL_malloc.c:2148
unsigned int binmap_t
Definition: SDL_malloc.c:1689
#define pad_request(req)
Definition: SDL_malloc.c:1722
#define MMAP_FOOT_PAD
Definition: SDL_malloc.c:1705
static SceCtrlData pad
int i
Definition: pngrutil.c:1377
SDL_EventEntry * head
Definition: SDL_events.c:78
#define is_mmapped_segment(S)
Definition: SDL_malloc.c:1968
#define align_offset(A)
Definition: SDL_malloc.c:1332
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: SDL_malloc.c:1771
#define SIZE_T_ONE
Definition: SDL_malloc.c:1318
#define is_extern_segment(S)
Definition: SDL_malloc.c:1969
static struct malloc_state _gm_
Definition: SDL_malloc.c:2104
#define internal_malloc(m, b)
Definition: SDL_malloc.c:3218
struct malloc_chunk * sbinptr
Definition: SDL_malloc.c:1687
#define pinuse(p)
Definition: SDL_malloc.c:1749
#define check_free_chunk(M, P)
Definition: SDL_malloc.c:2249
#define TOP_FOOT_SIZE
Definition: SDL_malloc.c:2183
#define granularity_align(S)
Definition: SDL_malloc.c:2134
static void * mmap_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3237
#define ok_address(M, a)
Definition: SDL_malloc.c:2422
#define m(i, j)
#define least_bit(x)
Definition: SDL_malloc.c:2383
#define chunk2mem(p)
Definition: SDL_malloc.c:1712
#define dlpvalloc
Definition: SDL_malloc.c:715
#define MALLINFO_FIELD_TYPE
Definition: SDL_malloc.c:630
#define PINUSE_BIT
Definition: SDL_malloc.c:1740
#define disable_mmap(M)
Definition: SDL_malloc.c:2119
#define is_page_aligned(S)
Definition: SDL_malloc.c:2137
#define dlindependent_comalloc
Definition: SDL_malloc.c:724
#define check_top_chunk(M, P)
Definition: SDL_malloc.c:2254
static double cp
Definition: e_pow.c:96
#define is_mmapped(p)
Definition: SDL_malloc.c:1778
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: SDL_malloc.c:2473
#define ok_magic(M)
Definition: SDL_malloc.c:2441
INT64 INT64 INT64 remainder
Definition: wglew.h:1155
unsigned int size_t
#define set_inuse_and_pinuse(M, p, s)
Definition: SDL_malloc.c:2468
#define dlmalloc_trim
Definition: SDL_malloc.c:718
struct malloc_chunk mchunk
Definition: SDL_malloc.c:1685
struct malloc_tree_chunk * tchunkptr
Definition: SDL_malloc.c:1897
GLsizei size
Definition: gl2ext.h:1467
const GLdouble * m
Definition: glew.h:8385