summaryrefslogtreecommitdiffstats
path: root/memory
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /memory
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'memory')
-rw-r--r--memory/build/jemalloc_config.cpp164
-rw-r--r--memory/build/malloc_decls.h70
-rw-r--r--memory/build/moz.build55
-rw-r--r--memory/build/mozjemalloc_compat.c182
-rw-r--r--memory/build/mozmemory.h91
-rw-r--r--memory/build/mozmemory_wrap.c178
-rw-r--r--memory/build/mozmemory_wrap.h219
-rw-r--r--memory/build/replace_malloc.c546
-rw-r--r--memory/build/replace_malloc.h133
-rw-r--r--memory/build/replace_malloc_bridge.h202
-rw-r--r--memory/fallible/fallible.cpp11
-rw-r--r--memory/fallible/fallible.h68
-rw-r--r--memory/fallible/moz.build34
-rw-r--r--memory/gtest/TestJemalloc.cpp51
-rw-r--r--memory/gtest/moz.build11
-rw-r--r--memory/jemalloc/README.mozilla9
-rwxr-xr-xmemory/jemalloc/helper/git8
-rw-r--r--memory/jemalloc/moz.build81
-rw-r--r--memory/jemalloc/src/.appveyor.yml28
-rw-r--r--memory/jemalloc/src/.travis.yml29
-rw-r--r--memory/jemalloc/src/COPYING27
-rw-r--r--memory/jemalloc/src/ChangeLog981
-rw-r--r--memory/jemalloc/src/INSTALL414
-rw-r--r--memory/jemalloc/src/Makefile.in506
-rw-r--r--memory/jemalloc/src/README20
-rw-r--r--memory/jemalloc/src/VERSION1
-rwxr-xr-xmemory/jemalloc/src/autogen.sh17
-rw-r--r--memory/jemalloc/src/bin/jemalloc-config.in79
-rw-r--r--memory/jemalloc/src/bin/jemalloc.sh.in9
-rw-r--r--memory/jemalloc/src/bin/jeprof.in5611
-rwxr-xr-xmemory/jemalloc/src/build-aux/config.guess1420
-rwxr-xr-xmemory/jemalloc/src/build-aux/config.sub1797
-rwxr-xr-xmemory/jemalloc/src/build-aux/install-sh250
-rw-r--r--memory/jemalloc/src/config.stamp.in0
-rwxr-xr-xmemory/jemalloc/src/configure10773
-rw-r--r--memory/jemalloc/src/configure.ac1970
-rwxr-xr-xmemory/jemalloc/src/coverage.sh16
-rw-r--r--memory/jemalloc/src/doc/html.xsl.in5
-rw-r--r--memory/jemalloc/src/doc/jemalloc.xml.in2966
-rw-r--r--memory/jemalloc/src/doc/manpages.xsl.in4
-rw-r--r--memory/jemalloc/src/doc/stylesheet.xsl10
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/arena.h1516
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/assert.h45
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/atomic.h651
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/base.h25
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/bitmap.h274
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/chunk.h96
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h37
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/chunk_mmap.h21
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/ckh.h86
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/ctl.h118
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/extent.h239
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/hash.h357
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/huge.h35
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in1288
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h75
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in307
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_macros.h57
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/mb.h115
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/mutex.h147
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/nstime.h48
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/pages.h27
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/ph.h345
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/private_namespace.sh5
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt626
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/private_unnamespace.sh5
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/prng.h207
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/prof.h547
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/public_namespace.sh6
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/public_unnamespace.sh6
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/ql.h81
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/qr.h69
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/quarantine.h60
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/rb.h1003
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/rtree.h366
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/size_classes.sh318
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/smoothstep.h246
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/internal/smoothstep.sh115
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/spin.h51
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/stats.h201
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/tcache.h469
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/ticker.h75
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/tsd.h787
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/util.h338
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/valgrind.h114
-rw-r--r--memory/jemalloc/src/include/jemalloc/internal/witness.h266
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/jemalloc.sh28
-rw-r--r--memory/jemalloc/src/include/jemalloc/jemalloc_defs.h.in45
-rw-r--r--memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in103
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/jemalloc_mangle.sh45
-rw-r--r--memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in66
-rwxr-xr-xmemory/jemalloc/src/include/jemalloc/jemalloc_rename.sh22
-rw-r--r--memory/jemalloc/src/include/jemalloc/jemalloc_typedefs.h.in57
-rw-r--r--memory/jemalloc/src/include/msvc_compat/C99/stdbool.h20
-rw-r--r--memory/jemalloc/src/include/msvc_compat/C99/stdint.h247
-rw-r--r--memory/jemalloc/src/include/msvc_compat/strings.h59
-rw-r--r--memory/jemalloc/src/include/msvc_compat/windows_extra.h6
-rw-r--r--memory/jemalloc/src/jemalloc.pc.in12
-rw-r--r--memory/jemalloc/src/msvc/ReadMe.txt24
-rw-r--r--memory/jemalloc/src/msvc/jemalloc_vc2015.sln63
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj402
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters272
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.cpp89
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.h3
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj327
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters26
-rw-r--r--memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads_main.cpp12
-rw-r--r--memory/jemalloc/src/src/arena.c3781
-rw-r--r--memory/jemalloc/src/src/atomic.c2
-rw-r--r--memory/jemalloc/src/src/base.c177
-rw-r--r--memory/jemalloc/src/src/bitmap.c111
-rw-r--r--memory/jemalloc/src/src/chunk.c783
-rw-r--r--memory/jemalloc/src/src/chunk_dss.c237
-rw-r--r--memory/jemalloc/src/src/chunk_mmap.c78
-rw-r--r--memory/jemalloc/src/src/ckh.c569
-rw-r--r--memory/jemalloc/src/src/ctl.c2254
-rw-r--r--memory/jemalloc/src/src/extent.c53
-rw-r--r--memory/jemalloc/src/src/hash.c2
-rw-r--r--memory/jemalloc/src/src/huge.c473
-rw-r--r--memory/jemalloc/src/src/jemalloc.c2897
-rw-r--r--memory/jemalloc/src/src/mb.c2
-rw-r--r--memory/jemalloc/src/src/mutex.c158
-rw-r--r--memory/jemalloc/src/src/nstime.c194
-rw-r--r--memory/jemalloc/src/src/pages.c273
-rw-r--r--memory/jemalloc/src/src/prng.c2
-rw-r--r--memory/jemalloc/src/src/prof.c2355
-rw-r--r--memory/jemalloc/src/src/quarantine.c183
-rw-r--r--memory/jemalloc/src/src/rtree.c132
-rw-r--r--memory/jemalloc/src/src/spin.c2
-rw-r--r--memory/jemalloc/src/src/stats.c1153
-rw-r--r--memory/jemalloc/src/src/tcache.c555
-rw-r--r--memory/jemalloc/src/src/ticker.c2
-rw-r--r--memory/jemalloc/src/src/tsd.c197
-rw-r--r--memory/jemalloc/src/src/util.c666
-rw-r--r--memory/jemalloc/src/src/valgrind.c34
-rw-r--r--memory/jemalloc/src/src/witness.c136
-rw-r--r--memory/jemalloc/src/src/zone.c330
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-alti.h186
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params.h132
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params11213.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params1279.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params132049.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params19937.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params216091.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params2281.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params4253.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params44497.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params607.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-params86243.h81
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT-sse2.h157
-rw-r--r--memory/jemalloc/src/test/include/test/SFMT.h171
-rw-r--r--memory/jemalloc/src/test/include/test/btalloc.h31
-rw-r--r--memory/jemalloc/src/test/include/test/jemalloc_test.h.in163
-rw-r--r--memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in9
-rw-r--r--memory/jemalloc/src/test/include/test/math.h311
-rw-r--r--memory/jemalloc/src/test/include/test/mq.h109
-rw-r--r--memory/jemalloc/src/test/include/test/mtx.h23
-rw-r--r--memory/jemalloc/src/test/include/test/test.h333
-rw-r--r--memory/jemalloc/src/test/include/test/thd.h9
-rw-r--r--memory/jemalloc/src/test/include/test/timer.h11
-rw-r--r--memory/jemalloc/src/test/integration/MALLOCX_ARENA.c69
-rw-r--r--memory/jemalloc/src/test/integration/aligned_alloc.c139
-rw-r--r--memory/jemalloc/src/test/integration/allocated.c125
-rw-r--r--memory/jemalloc/src/test/integration/chunk.c293
-rw-r--r--memory/jemalloc/src/test/integration/mallocx.c234
-rw-r--r--memory/jemalloc/src/test/integration/overflow.c49
-rw-r--r--memory/jemalloc/src/test/integration/posix_memalign.c133
-rw-r--r--memory/jemalloc/src/test/integration/rallocx.c259
-rw-r--r--memory/jemalloc/src/test/integration/sdallocx.c57
-rw-r--r--memory/jemalloc/src/test/integration/thread_arena.c79
-rw-r--r--memory/jemalloc/src/test/integration/thread_tcache_enabled.c113
-rw-r--r--memory/jemalloc/src/test/integration/xallocx.c497
-rw-r--r--memory/jemalloc/src/test/src/SFMT.c719
-rw-r--r--memory/jemalloc/src/test/src/btalloc.c8
-rw-r--r--memory/jemalloc/src/test/src/btalloc_0.c3
-rw-r--r--memory/jemalloc/src/test/src/btalloc_1.c3
-rw-r--r--memory/jemalloc/src/test/src/math.c2
-rw-r--r--memory/jemalloc/src/test/src/mq.c29
-rw-r--r--memory/jemalloc/src/test/src/mtx.c73
-rw-r--r--memory/jemalloc/src/test/src/test.c133
-rw-r--r--memory/jemalloc/src/test/src/thd.c39
-rw-r--r--memory/jemalloc/src/test/src/timer.c60
-rw-r--r--memory/jemalloc/src/test/stress/microbench.c182
-rw-r--r--memory/jemalloc/src/test/test.sh.in53
-rw-r--r--memory/jemalloc/src/test/unit/SFMT.c1605
-rw-r--r--memory/jemalloc/src/test/unit/a0.c19
-rw-r--r--memory/jemalloc/src/test/unit/arena_reset.c159
-rw-r--r--memory/jemalloc/src/test/unit/atomic.c122
-rw-r--r--memory/jemalloc/src/test/unit/bitmap.c163
-rw-r--r--memory/jemalloc/src/test/unit/ckh.c214
-rw-r--r--memory/jemalloc/src/test/unit/decay.c374
-rw-r--r--memory/jemalloc/src/test/unit/fork.c64
-rw-r--r--memory/jemalloc/src/test/unit/hash.c185
-rw-r--r--memory/jemalloc/src/test/unit/junk.c253
-rw-r--r--memory/jemalloc/src/test/unit/junk_alloc.c3
-rw-r--r--memory/jemalloc/src/test/unit/junk_free.c3
-rw-r--r--memory/jemalloc/src/test/unit/lg_chunk.c26
-rw-r--r--memory/jemalloc/src/test/unit/mallctl.c731
-rw-r--r--memory/jemalloc/src/test/unit/math.c398
-rw-r--r--memory/jemalloc/src/test/unit/mq.c93
-rw-r--r--memory/jemalloc/src/test/unit/mtx.c60
-rw-r--r--memory/jemalloc/src/test/unit/nstime.c227
-rw-r--r--memory/jemalloc/src/test/unit/ph.c290
-rw-r--r--memory/jemalloc/src/test/unit/prng.c263
-rw-r--r--memory/jemalloc/src/test/unit/prof_accum.c91
-rw-r--r--memory/jemalloc/src/test/unit/prof_active.c136
-rw-r--r--memory/jemalloc/src/test/unit/prof_gdump.c81
-rw-r--r--memory/jemalloc/src/test/unit/prof_idump.c51
-rw-r--r--memory/jemalloc/src/test/unit/prof_reset.c303
-rw-r--r--memory/jemalloc/src/test/unit/prof_thread_name.c129
-rw-r--r--memory/jemalloc/src/test/unit/ql.c209
-rw-r--r--memory/jemalloc/src/test/unit/qr.c248
-rw-r--r--memory/jemalloc/src/test/unit/quarantine.c108
-rw-r--r--memory/jemalloc/src/test/unit/rb.c354
-rw-r--r--memory/jemalloc/src/test/unit/rtree.c151
-rw-r--r--memory/jemalloc/src/test/unit/run_quantize.c149
-rw-r--r--memory/jemalloc/src/test/unit/size_classes.c184
-rw-r--r--memory/jemalloc/src/test/unit/smoothstep.c106
-rw-r--r--memory/jemalloc/src/test/unit/stats.c447
-rw-r--r--memory/jemalloc/src/test/unit/ticker.c76
-rw-r--r--memory/jemalloc/src/test/unit/tsd.c112
-rw-r--r--memory/jemalloc/src/test/unit/util.c317
-rw-r--r--memory/jemalloc/src/test/unit/witness.c278
-rw-r--r--memory/jemalloc/src/test/unit/zero.c80
-rwxr-xr-xmemory/jemalloc/update.sh20
-rw-r--r--memory/jemalloc/upstream.info2
-rw-r--r--memory/moz.build28
-rw-r--r--memory/mozalloc/moz.build57
-rw-r--r--memory/mozalloc/mozalloc.cpp221
-rw-r--r--memory/mozalloc/mozalloc.h361
-rw-r--r--memory/mozalloc/mozalloc_abort.cpp89
-rw-r--r--memory/mozalloc/mozalloc_abort.h28
-rw-r--r--memory/mozalloc/mozalloc_oom.cpp53
-rw-r--r--memory/mozalloc/mozalloc_oom.h31
-rw-r--r--memory/mozalloc/msvc_raise_wrappers.cpp63
-rw-r--r--memory/mozalloc/msvc_raise_wrappers.h41
-rw-r--r--memory/mozalloc/staticruntime/moz.build35
-rw-r--r--memory/mozalloc/throw_gcc.h145
-rw-r--r--memory/mozalloc/throw_msvc.h17
-rw-r--r--memory/mozalloc/winheap.cpp74
-rw-r--r--memory/mozjemalloc/Makefile.in13
-rw-r--r--memory/mozjemalloc/jemalloc.c7188
-rw-r--r--memory/mozjemalloc/jemalloc_types.h91
-rw-r--r--memory/mozjemalloc/linkedlist.h77
-rw-r--r--memory/mozjemalloc/moz.build44
-rw-r--r--memory/mozjemalloc/osx_zone_types.h147
-rw-r--r--memory/mozjemalloc/ql.h114
-rw-r--r--memory/mozjemalloc/qr.h98
-rw-r--r--memory/mozjemalloc/rb.h982
-rw-r--r--memory/replace/dmd/DMD.cpp2122
-rw-r--r--memory/replace/dmd/DMD.h310
-rw-r--r--memory/replace/dmd/README2
-rw-r--r--memory/replace/dmd/block_analyzer.py261
-rwxr-xr-xmemory/replace/dmd/dmd.py890
-rw-r--r--memory/replace/dmd/moz.build39
-rw-r--r--memory/replace/dmd/test/SmokeDMD.cpp379
-rw-r--r--memory/replace/dmd/test/basic-scan-32-expected.txt25
-rw-r--r--memory/replace/dmd/test/basic-scan-64-expected.txt25
-rw-r--r--memory/replace/dmd/test/complete-empty-cumulative-expected.txt18
-rw-r--r--memory/replace/dmd/test/complete-empty-dark-matter-expected.txt29
-rw-r--r--memory/replace/dmd/test/complete-empty-live-expected.txt18
-rw-r--r--memory/replace/dmd/test/complete-full1-dark-matter-expected.txt265
-rw-r--r--memory/replace/dmd/test/complete-full1-live-expected.txt127
-rw-r--r--memory/replace/dmd/test/complete-full2-cumulative-expected.txt173
-rw-r--r--memory/replace/dmd/test/complete-full2-dark-matter-expected.txt140
-rw-r--r--memory/replace/dmd/test/complete-partial-live-expected.txt56
-rw-r--r--memory/replace/dmd/test/moz.build26
-rw-r--r--memory/replace/dmd/test/scan-test.py83
-rw-r--r--memory/replace/dmd/test/script-diff-dark-matter-expected.txt127
-rw-r--r--memory/replace/dmd/test/script-diff-dark-matter1.json51
-rw-r--r--memory/replace/dmd/test/script-diff-dark-matter2.json51
-rw-r--r--memory/replace/dmd/test/script-diff-live-expected.txt81
-rw-r--r--memory/replace/dmd/test/script-diff-live1.json51
-rw-r--r--memory/replace/dmd/test/script-diff-live2.json53
-rw-r--r--memory/replace/dmd/test/script-ignore-alloc-fns-expected.txt72
-rw-r--r--memory/replace/dmd/test/script-ignore-alloc-fns.json46
-rw-r--r--memory/replace/dmd/test/script-max-frames-1-expected.txt26
-rw-r--r--memory/replace/dmd/test/script-max-frames-3-expected.txt48
-rw-r--r--memory/replace/dmd/test/script-max-frames-8-expected.txt69
-rw-r--r--memory/replace/dmd/test/script-max-frames.json43
-rw-r--r--memory/replace/dmd/test/script-sort-by-num-blocks-expected.txt46
-rw-r--r--memory/replace/dmd/test/script-sort-by-req-expected.txt46
-rw-r--r--memory/replace/dmd/test/script-sort-by-slop-expected.txt46
-rw-r--r--memory/replace/dmd/test/script-sort-by-usable-expected.txt46
-rw-r--r--memory/replace/dmd/test/script-sort-by.json.gzbin0 -> 292 bytes
-rw-r--r--memory/replace/dmd/test/test_dmd.js226
-rw-r--r--memory/replace/dmd/test/xpcshell.ini35
-rw-r--r--memory/replace/dummy/dummy_replace_malloc.c15
-rw-r--r--memory/replace/dummy/moz.build14
-rw-r--r--memory/replace/jemalloc/moz.build31
-rw-r--r--memory/replace/jemalloc/pthread_atfork.c10
-rw-r--r--memory/replace/logalloc/FdPrintf.cpp132
-rw-r--r--memory/replace/logalloc/FdPrintf.h26
-rw-r--r--memory/replace/logalloc/LogAlloc.cpp276
-rw-r--r--memory/replace/logalloc/README107
-rw-r--r--memory/replace/logalloc/moz.build41
-rw-r--r--memory/replace/logalloc/replay/Makefile.in32
-rw-r--r--memory/replace/logalloc/replay/Replay.cpp558
-rw-r--r--memory/replace/logalloc/replay/logalloc_munge.py147
-rw-r--r--memory/replace/logalloc/replay/moz.build27
-rw-r--r--memory/replace/logalloc/replay/replay.log17
-rw-r--r--memory/replace/moz.build20
-rw-r--r--memory/replace/replace/ReplaceMalloc.cpp253
-rw-r--r--memory/replace/replace/moz.build13
-rw-r--r--memory/volatile/VolatileBuffer.h173
-rw-r--r--memory/volatile/VolatileBufferAshmem.cpp139
-rw-r--r--memory/volatile/VolatileBufferFallback.cpp91
-rw-r--r--memory/volatile/VolatileBufferOSX.cpp129
-rw-r--r--memory/volatile/VolatileBufferWindows.cpp160
-rw-r--r--memory/volatile/moz.build31
-rw-r--r--memory/volatile/tests/TestVolatileBuffer.cpp102
-rw-r--r--memory/volatile/tests/moz.build11
312 files changed, 94028 insertions, 0 deletions
diff --git a/memory/build/jemalloc_config.cpp b/memory/build/jemalloc_config.cpp
new file mode 100644
index 000000000..441fd8a2b
--- /dev/null
+++ b/memory/build/jemalloc_config.cpp
@@ -0,0 +1,164 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef MOZ_JEMALLOC4
+
+#define MOZ_JEMALLOC_IMPL
+
+/* mozmemory_wrap.h needs to be included before MFBT headers */
+#include "mozmemory_wrap.h"
+#include <mozilla/Assertions.h>
+#include "mozilla/Types.h"
+
+#if defined(MOZ_SYSTEM_JEMALLOC)
+#include MALLOC_H
+#else
+#define DLLEXPORT
+#include "jemalloc/jemalloc.h"
+#endif
+
+#ifdef XP_WIN
+#include <windows.h>
+#endif
+#ifdef XP_DARWIN
+#include <sys/mman.h>
+#endif
+
+/* Override some jemalloc defaults */
+#ifdef DEBUG
+#define MOZ_MALLOC_BUILD_OPTIONS ",junk:true"
+#else
+#define MOZ_MALLOC_BUILD_OPTIONS ",junk:free"
+#endif
+
+#define MOZ_MALLOC_OPTIONS "narenas:1,tcache:false"
+MFBT_DATA const char* je_(malloc_conf) =
+ MOZ_MALLOC_OPTIONS MOZ_MALLOC_BUILD_OPTIONS;
+
+#ifdef ANDROID
+#include <android/log.h>
+
+static void
+_je_malloc_message(void* cbopaque, const char* s)
+{
+ __android_log_print(ANDROID_LOG_INFO, "GeckoJemalloc", "%s", s);
+}
+
+void (*je_(malloc_message))(void*, const char* s) = _je_malloc_message;
+#endif
+
+/* Jemalloc supports hooks that are called on chunk
+ * allocate/deallocate/commit/decommit/purge/etc.
+ *
+ * We currently only hook commit, decommit and purge. We do this to tweak
+ * the way chunks are handled so that RSS stays lower than it normally
+ * would with the default jemalloc uses.
+ * This somewhat matches the behavior of mozjemalloc, except it doesn't
+ * rely on a double purge on mac, instead purging directly. (Yes, this
+ * means we can get rid of jemalloc_purge_freed_pages at some point)
+ *
+ * The default for jemalloc is to do the following:
+ * - commit, decommit: nothing
+ * - purge: MEM_RESET on Windows, MADV_FREE on Mac/BSD, MADV_DONTNEED on Linux
+ *
+ * The hooks we setup do the following:
+ * on Windows:
+ * - commit: MEM_COMMIT
+ * - decommit: MEM_DECOMMIT
+ * on Mac:
+ * - purge: mmap new anonymous memory on top of the chunk
+ *
+ * We only set the above hooks, others are left with the default.
+ */
+#if defined(XP_WIN) || defined(XP_DARWIN)
+class JemallocInit {
+public:
+ JemallocInit()
+ {
+ chunk_hooks_t hooks;
+ size_t hooks_len;
+ unsigned narenas;
+ size_t mib[3];
+ size_t size;
+
+ size = sizeof(narenas);
+ je_(mallctl)("arenas.narenas", &narenas, &size, nullptr, 0);
+
+ size = sizeof(mib) / sizeof(mib[0]);
+ je_(mallctlnametomib)("arena.0.chunk_hooks", mib, &size);
+
+ /* Set the hooks on all the existing arenas. */
+ for (unsigned arena = 0; arena < narenas; arena++) {
+ mib[1] = arena;
+ hooks_len = sizeof(hooks);
+ je_(mallctlbymib)(mib, size, &hooks, &hooks_len, nullptr, 0);
+
+#ifdef XP_WIN
+ hooks.commit = CommitHook;
+ hooks.decommit = DecommitHook;
+#endif
+#ifdef XP_DARWIN
+ hooks.purge = PurgeHook;
+#endif
+
+ je_(mallctlbymib)(mib, size, nullptr, nullptr, &hooks, hooks_len);
+ }
+ }
+
+private:
+#ifdef XP_WIN
+ static bool
+ CommitHook(void* chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+ {
+ void* addr = reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
+
+ if (!VirtualAlloc(addr, length, MEM_COMMIT, PAGE_READWRITE))
+ return true;
+
+ return false;
+ }
+
+ static bool
+ DecommitHook(void* chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+ {
+ void* addr = reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
+
+ if (!VirtualFree(addr, length, MEM_DECOMMIT))
+ MOZ_CRASH();
+
+ return false;
+ }
+#endif
+
+#ifdef XP_DARWIN
+ static bool
+ PurgeHook(void* chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+ {
+ void* addr = reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
+
+ void* new_addr = mmap(addr, length, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
+ return (new_addr != addr);
+ }
+#endif
+};
+
+/* For the static constructor from the class above */
+JemallocInit gJemallocInit;
+#endif
+
+#else
+#include <mozilla/Assertions.h>
+#endif /* MOZ_JEMALLOC4 */
+
+/* Provide an abort function for use in jemalloc code */
+extern "C" void moz_abort() {
+ MOZ_CRASH();
+}
diff --git a/memory/build/malloc_decls.h b/memory/build/malloc_decls.h
new file mode 100644
index 000000000..f3f9570ac
--- /dev/null
+++ b/memory/build/malloc_decls.h
@@ -0,0 +1,70 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Helper header to declare all the supported malloc functions.
+ * MALLOC_DECL arguments are:
+ * - function name
+ * - return type
+ * - argument types
+ */
+
+#ifndef malloc_decls_h
+# define malloc_decls_h
+
+# include "jemalloc_types.h"
+
+#ifndef MALLOC_USABLE_SIZE_CONST_PTR
+#define MALLOC_USABLE_SIZE_CONST_PTR const
+#endif
+
+typedef MALLOC_USABLE_SIZE_CONST_PTR void * usable_ptr_t;
+
+# define MALLOC_FUNCS_MALLOC 1
+# define MALLOC_FUNCS_JEMALLOC 2
+# define MALLOC_FUNCS_INIT 4
+# define MALLOC_FUNCS_BRIDGE 8
+# define MALLOC_FUNCS_ALL (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | \
+ MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC)
+
+#endif /* malloc_decls_h */
+
+#ifndef MALLOC_FUNCS
+# define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC)
+#endif
+
+#ifdef MALLOC_DECL
+# ifndef MALLOC_DECL_VOID
+# define MALLOC_DECL_VOID(func, ...) MALLOC_DECL(func, void, __VA_ARGS__)
+# endif
+
+# if MALLOC_FUNCS & MALLOC_FUNCS_INIT
+MALLOC_DECL(init, void, const malloc_table_t *)
+# endif
+# if MALLOC_FUNCS & MALLOC_FUNCS_BRIDGE
+MALLOC_DECL(get_bridge, struct ReplaceMallocBridge*, void)
+# endif
+# if MALLOC_FUNCS & MALLOC_FUNCS_MALLOC
+MALLOC_DECL(malloc, void *, size_t)
+MALLOC_DECL(posix_memalign, int, void **, size_t, size_t)
+MALLOC_DECL(aligned_alloc, void *, size_t, size_t)
+MALLOC_DECL(calloc, void *, size_t, size_t)
+MALLOC_DECL(realloc, void *, void *, size_t)
+MALLOC_DECL_VOID(free, void *)
+MALLOC_DECL(memalign, void *, size_t, size_t)
+MALLOC_DECL(valloc, void *, size_t)
+MALLOC_DECL(malloc_usable_size, size_t, usable_ptr_t)
+MALLOC_DECL(malloc_good_size, size_t, size_t)
+# endif
+# if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
+MALLOC_DECL_VOID(jemalloc_stats, jemalloc_stats_t *)
+MALLOC_DECL_VOID(jemalloc_purge_freed_pages, void)
+MALLOC_DECL_VOID(jemalloc_free_dirty_pages, void)
+# endif
+
+# undef MALLOC_DECL_VOID
+#endif /* MALLOC_DECL */
+
+#undef MALLOC_DECL
+#undef MALLOC_FUNCS
diff --git a/memory/build/moz.build b/memory/build/moz.build
new file mode 100644
index 000000000..8d80561b1
--- /dev/null
+++ b/memory/build/moz.build
@@ -0,0 +1,55 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'mozmemory.h',
+ 'mozmemory_wrap.h',
+]
+
+LIBRARY_DEFINES['MOZ_HAS_MOZGLUE'] = True
+DEFINES['MOZ_MEMORY_IMPL'] = True
+
+if CONFIG['MOZ_REPLACE_MALLOC']:
+ EXPORTS += [
+ 'malloc_decls.h',
+ 'replace_malloc.h',
+ 'replace_malloc_bridge.h',
+ ]
+
+SOURCES += [
+ 'jemalloc_config.cpp',
+ 'mozmemory_wrap.c',
+]
+
+if CONFIG['MOZ_JEMALLOC4']:
+ SOURCES += [
+ 'mozjemalloc_compat.c',
+ ]
+ LOCAL_INCLUDES += ['!../jemalloc/src/include']
+ if CONFIG['_MSC_VER']:
+ LOCAL_INCLUDES += ['/memory/jemalloc/src/include/msvc_compat']
+ if not CONFIG['HAVE_INTTYPES_H']:
+ LOCAL_INCLUDES += ['/memory/jemalloc/src/include/msvc_compat/C99']
+
+if CONFIG['MOZ_REPLACE_MALLOC']:
+ SOURCES += [
+ 'replace_malloc.c',
+ ]
+
+Library('memory')
+
+if CONFIG['MOZ_GLUE_IN_PROGRAM']:
+ SDK_LIBRARY = True
+ DIST_INSTALL = True
+
+# Keep jemalloc separated when mozglue is statically linked
+if CONFIG['MOZ_MEMORY'] and (CONFIG['OS_TARGET'] in ('WINNT', 'Darwin', 'Android') or
+ CONFIG['MOZ_SYSTEM_JEMALLOC']):
+ FINAL_LIBRARY = 'mozglue'
+
+if CONFIG['MOZ_REPLACE_MALLOC'] and CONFIG['OS_TARGET'] == 'Darwin':
+ # The zone allocator for OSX needs some jemalloc internal functions
+ LOCAL_INCLUDES += ['/memory/jemalloc/src/include']
diff --git a/memory/build/mozjemalloc_compat.c b/memory/build/mozjemalloc_compat.c
new file mode 100644
index 000000000..6591892c1
--- /dev/null
+++ b/memory/build/mozjemalloc_compat.c
@@ -0,0 +1,182 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZ_JEMALLOC4
+# error Should only compile this file when building with jemalloc 3
+#endif
+
+#define MOZ_JEMALLOC_IMPL
+
+#include "mozmemory_wrap.h"
+#include "jemalloc_types.h"
+#include "mozilla/Types.h"
+
+#include <stdbool.h>
+
+#if defined(MOZ_SYSTEM_JEMALLOC)
+# include MALLOC_H
+#else
+# include "jemalloc/jemalloc.h"
+#endif
+
+/*
+ * CTL_* macros are from memory/jemalloc/src/src/stats.c with changes:
+ * - drop `t' argument to avoid redundancy in calculating type size
+ * - require `i' argument for arena number explicitly
+ */
+
+#define CTL_GET(n, v) do { \
+ size_t sz = sizeof(v); \
+ je_(mallctl)(n, &v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_I_GET(n, v, i) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(mib[0]); \
+ size_t sz = sizeof(v); \
+ je_(mallctlnametomib)(n, mib, &miblen); \
+ mib[2] = i; \
+ je_(mallctlbymib)(mib, miblen, &v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_IJ_GET(n, v, i, j) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(mib[0]); \
+ size_t sz = sizeof(v); \
+ je_(mallctlnametomib)(n, mib, &miblen); \
+ mib[2] = i; \
+ mib[4] = j; \
+ je_(mallctlbymib)(mib, miblen, &v, &sz, NULL, 0); \
+} while (0)
+
+/*
+ * VARIABLE_ARRAY is copied from
+ * memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in
+ */
+#if __STDC_VERSION__ < 199901L
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# ifdef HAVE_ALLOCA_H
+# include <alloca.h>
+# else
+# include <stdlib.h>
+# endif
+# endif
+# define VARIABLE_ARRAY(type, name, count) \
+ type *name = alloca(sizeof(type) * (count))
+#else
+# define VARIABLE_ARRAY(type, name, count) type name[(count)]
+#endif
+
+MOZ_MEMORY_API size_t
+malloc_good_size_impl(size_t size)
+{
+ /* je_nallocx crashes when given a size of 0. As
+ * malloc_usable_size(malloc(0)) and malloc_usable_size(malloc(1))
+ * return the same value, use a size of 1. */
+ if (size == 0)
+ size = 1;
+ return je_(nallocx)(size, 0);
+}
+
+static void
+compute_bin_unused_and_bookkeeping(jemalloc_stats_t *stats, unsigned int narenas)
+{
+ size_t bin_unused = 0;
+
+ uint32_t nregs; // number of regions per run in the j-th bin
+ size_t reg_size; // size of regions served by the j-th bin
+ size_t curruns; // number of runs belonging to a bin
+ size_t curregs; // number of allocated regions in a bin
+
+ unsigned int nbins; // number of bins per arena
+ unsigned int i, j;
+
+ size_t stats_metadata;
+ size_t stats_ametadata = 0; // total internal allocations in all arenas
+
+ // narenas also counts uninitialized arenas, and initialized arenas
+ // are not guaranteed to be adjacent
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz = sizeof(initialized) / sizeof(initialized[0]);
+
+ je_(mallctl)("arenas.initialized", initialized, &isz, NULL, 0);
+ CTL_GET("arenas.nbins", nbins);
+
+ for (j = 0; j < nbins; j++) {
+ CTL_I_GET("arenas.bin.0.nregs", nregs, j);
+ CTL_I_GET("arenas.bin.0.size", reg_size, j);
+
+ for (i = 0; i < narenas; i++) {
+ if (!initialized[i]) {
+ continue;
+ }
+
+ CTL_IJ_GET("stats.arenas.0.bins.0.curruns", curruns, i, j);
+ CTL_IJ_GET("stats.arenas.0.bins.0.curregs", curregs, i, j);
+
+ bin_unused += (nregs * curruns - curregs) * reg_size;
+ }
+ }
+
+ CTL_GET("stats.metadata", stats_metadata);
+
+ /* get the summation for all arenas, i == narenas */
+ CTL_I_GET("stats.arenas.0.metadata.allocated", stats_ametadata, narenas);
+
+ stats->bookkeeping = stats_metadata - stats_ametadata;
+ stats->bin_unused = bin_unused;
+}
+
+MOZ_JEMALLOC_API void
+jemalloc_stats_impl(jemalloc_stats_t *stats)
+{
+ unsigned narenas;
+ size_t active, allocated, mapped, page, pdirty;
+ size_t lg_chunk;
+
+ // Refresh jemalloc's stats by updating its epoch, see ctl_refresh in
+ // src/ctl.c
+ uint64_t epoch = 0;
+ size_t esz = sizeof(epoch);
+ je_(mallctl)("epoch", &epoch, &esz, &epoch, esz);
+
+ CTL_GET("arenas.narenas", narenas);
+ CTL_GET("arenas.page", page);
+ CTL_GET("stats.active", active);
+ CTL_GET("stats.allocated", allocated);
+ CTL_GET("stats.mapped", mapped);
+ CTL_GET("opt.lg_chunk", lg_chunk);
+
+ /* get the summation for all arenas, i == narenas */
+ CTL_I_GET("stats.arenas.0.pdirty", pdirty, narenas);
+
+ stats->chunksize = (size_t) 1 << lg_chunk;
+ stats->mapped = mapped;
+ stats->allocated = allocated;
+ stats->waste = active - allocated;
+ stats->page_cache = pdirty * page;
+ compute_bin_unused_and_bookkeeping(stats, narenas);
+ stats->waste -= stats->bin_unused;
+}
+
+MOZ_JEMALLOC_API void
+jemalloc_purge_freed_pages_impl()
+{
+}
+
+MOZ_JEMALLOC_API void
+jemalloc_free_dirty_pages_impl()
+{
+ unsigned narenas;
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(mib[0]);
+
+ CTL_GET("arenas.narenas", narenas);
+ je_(mallctlnametomib)("arena.0.purge", mib, &miblen);
+ mib[1] = narenas;
+ je_(mallctlbymib)(mib, miblen, NULL, NULL, NULL, 0);
+}
diff --git a/memory/build/mozmemory.h b/memory/build/mozmemory.h
new file mode 100644
index 000000000..84007fffb
--- /dev/null
+++ b/memory/build/mozmemory.h
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozmemory_h
+#define mozmemory_h
+
+/*
+ * This header is meant to be used when the following functions are
+ * necessary:
+ * - malloc_good_size (used to be called je_malloc_usable_in_advance)
+ * - jemalloc_stats
+ * - jemalloc_purge_freed_pages
+ * - jemalloc_free_dirty_pages
+ */
+
+#ifndef MOZ_MEMORY
+# error Should not include mozmemory.h when MOZ_MEMORY is not set
+#endif
+
+#include "mozmemory_wrap.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Types.h"
+#include "jemalloc_types.h"
+
+MOZ_BEGIN_EXTERN_C
+
+/*
+ * On OSX, malloc/malloc.h contains the declaration for malloc_good_size,
+ * which will call back in jemalloc, through the zone allocator so just use it.
+ */
+#ifdef XP_DARWIN
+# include <malloc/malloc.h>
+#else
+MOZ_MEMORY_API size_t malloc_good_size_impl(size_t size);
+
+/* Note: the MOZ_GLUE_IN_PROGRAM ifdef below is there to avoid -Werror turning
+ * the protective if into errors. MOZ_GLUE_IN_PROGRAM is what triggers MFBT_API
+ * to use weak imports. */
+
+static inline size_t _malloc_good_size(size_t size) {
+# if defined(MOZ_GLUE_IN_PROGRAM) && !defined(IMPL_MFBT)
+ if (!malloc_good_size)
+ return size;
+# endif
+ return malloc_good_size_impl(size);
+}
+
+# define malloc_good_size _malloc_good_size
+#endif
+
+MOZ_JEMALLOC_API void jemalloc_stats(jemalloc_stats_t *stats);
+
+/*
+ * On some operating systems (Mac), we use madvise(MADV_FREE) to hand pages
+ * back to the operating system. On Mac, the operating system doesn't take
+ * this memory back immediately; instead, the OS takes it back only when the
+ * machine is running out of physical memory.
+ *
+ * This is great from the standpoint of efficiency, but it makes measuring our
+ * actual RSS difficult, because pages which we've MADV_FREE'd shouldn't count
+ * against our RSS.
+ *
+ * This function explicitly purges any MADV_FREE'd pages from physical memory,
+ * causing our reported RSS match the amount of memory we're actually using.
+ *
+ * Note that this call is expensive in two ways. First, it may be slow to
+ * execute, because it may make a number of slow syscalls to free memory. This
+ * function holds the big jemalloc locks, so basically all threads are blocked
+ * while this function runs.
+ *
+ * This function is also expensive in that the next time we go to access a page
+ * which we've just explicitly decommitted, the operating system has to attach
+ * to it a physical page! If we hadn't run this function, the OS would have
+ * less work to do.
+ *
+ * If MALLOC_DOUBLE_PURGE is not defined, this function does nothing.
+ */
+MOZ_JEMALLOC_API void jemalloc_purge_freed_pages();
+
+/*
+ * Free all unused dirty pages in all arenas. Calling this function will slow
+ * down subsequent allocations so it is recommended to use it only when
+ * memory needs to be reclaimed at all costs (see bug 805855). This function
+ * provides functionality similar to mallctl("arenas.purge") in jemalloc 3.
+ */
+MOZ_JEMALLOC_API void jemalloc_free_dirty_pages();
+
+MOZ_END_EXTERN_C
+
+#endif /* mozmemory_h */
diff --git a/memory/build/mozmemory_wrap.c b/memory/build/mozmemory_wrap.c
new file mode 100644
index 000000000..dba3ace56
--- /dev/null
+++ b/memory/build/mozmemory_wrap.c
@@ -0,0 +1,178 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <string.h>
+#include "mozmemory_wrap.h"
+#include "mozilla/Types.h"
+
+/* Declare malloc implementation functions with the right return and
+ * argument types. */
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#include "malloc_decls.h"
+
+#ifdef MOZ_WRAP_NEW_DELETE
+/* operator new(unsigned int) */
+MOZ_MEMORY_API void *
+mozmem_malloc_impl(_Znwj)(unsigned int size)
+{
+ return malloc_impl(size);
+}
+/* operator new[](unsigned int) */
+MOZ_MEMORY_API void *
+mozmem_malloc_impl(_Znaj)(unsigned int size)
+{
+ return malloc_impl(size);
+}
+/* operator delete(void*) */
+MOZ_MEMORY_API void
+mozmem_malloc_impl(_ZdlPv)(void *ptr)
+{
+ free_impl(ptr);
+}
+/* operator delete[](void*) */
+MOZ_MEMORY_API void
+mozmem_malloc_impl(_ZdaPv)(void *ptr)
+{
+ free_impl(ptr);
+}
+/*operator new(unsigned int, std::nothrow_t const&)*/
+MOZ_MEMORY_API void *
+mozmem_malloc_impl(_ZnwjRKSt9nothrow_t)(unsigned int size)
+{
+ return malloc_impl(size);
+}
+/*operator new[](unsigned int, std::nothrow_t const&)*/
+MOZ_MEMORY_API void *
+mozmem_malloc_impl(_ZnajRKSt9nothrow_t)(unsigned int size)
+{
+ return malloc_impl(size);
+}
+/* operator delete(void*, std::nothrow_t const&) */
+MOZ_MEMORY_API void
+mozmem_malloc_impl(_ZdlPvRKSt9nothrow_t)(void *ptr)
+{
+ free_impl(ptr);
+}
+/* operator delete[](void*, std::nothrow_t const&) */
+MOZ_MEMORY_API void
+mozmem_malloc_impl(_ZdaPvRKSt9nothrow_t)(void *ptr)
+{
+ free_impl(ptr);
+}
+#endif
+
+/* strndup and strdup may be defined as macros in string.h, which would
+ * clash with the definitions below. */
+#undef strndup
+#undef strdup
+
+#ifndef XP_DARWIN
+MOZ_MEMORY_API char *
+strndup_impl(const char *src, size_t len)
+{
+ char* dst = (char*) malloc_impl(len + 1);
+ if (dst) {
+ strncpy(dst, src, len);
+ dst[len] = '\0';
+ }
+ return dst;
+}
+
+MOZ_MEMORY_API char *
+strdup_impl(const char *src)
+{
+ size_t len = strlen(src);
+ return strndup_impl(src, len);
+}
+#endif /* XP_DARWIN */
+
+#ifdef ANDROID
+#include <stdarg.h>
+#include <stdio.h>
+
+MOZ_MEMORY_API int
+vasprintf_impl(char **str, const char *fmt, va_list ap)
+{
+ char* ptr, *_ptr;
+ int ret;
+
+ if (str == NULL || fmt == NULL) {
+ return -1;
+ }
+
+ ptr = (char*)malloc_impl(128);
+ if (ptr == NULL) {
+ *str = NULL;
+ return -1;
+ }
+
+ ret = vsnprintf(ptr, 128, fmt, ap);
+ if (ret < 0) {
+ free_impl(ptr);
+ *str = NULL;
+ return -1;
+ }
+
+ _ptr = realloc_impl(ptr, ret + 1);
+ if (_ptr == NULL) {
+ free_impl(ptr);
+ *str = NULL;
+ return -1;
+ }
+
+ *str = _ptr;
+
+ return ret;
+}
+
+MOZ_MEMORY_API int
+asprintf_impl(char **str, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, fmt);
+
+ ret = vasprintf_impl(str, fmt, ap);
+
+ va_end(ap);
+
+ return ret;
+}
+#endif
+
+#ifdef XP_WIN
+/*
+ * There's a fun allocator mismatch in (at least) the VS 2010 CRT
+ * (see the giant comment in $(topsrcdir)/mozglue/build/Makefile.in)
+ * that gets redirected here to avoid a crash on shutdown.
+ */
+void
+dumb_free_thunk(void *ptr)
+{
+ return; /* shutdown leaks that we don't care about */
+}
+
+#include <wchar.h>
+
+/*
+ * We also need to provide our own impl of wcsdup so that we don't ask
+ * the CRT for memory from its heap (which will then be unfreeable).
+ */
+wchar_t *
+wcsdup_impl(const wchar_t *src)
+{
+ size_t len = wcslen(src);
+ wchar_t *dst = (wchar_t*) malloc_impl((len + 1) * sizeof(wchar_t));
+ if (dst)
+ wcsncpy(dst, src, len + 1);
+ return dst;
+}
+
+void *
+_aligned_malloc(size_t size, size_t alignment)
+{
+ return memalign_impl(alignment, size);
+}
+#endif /* XP_WIN */
diff --git a/memory/build/mozmemory_wrap.h b/memory/build/mozmemory_wrap.h
new file mode 100644
index 000000000..066d57782
--- /dev/null
+++ b/memory/build/mozmemory_wrap.h
@@ -0,0 +1,219 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozmemory_wrap_h
+#define mozmemory_wrap_h
+
+/*
+ * This header contains #defines which tweak the names of various memory
+ * allocation functions.
+ *
+ * There are several types of functions related to memory allocation
+ * that are meant to be used publicly by the Gecko codebase:
+ *
+ * - malloc implementation functions:
+ * - malloc
+ * - posix_memalign
+ * - aligned_alloc
+ * - calloc
+ * - realloc
+ * - free
+ * - memalign
+ * - valloc
+ * - malloc_usable_size
+ * - malloc_good_size
+ * Some of these functions are specific to some systems, but for
+ * convenience, they are treated as being cross-platform, and available
+ * as such.
+ *
+ * - duplication functions:
+ * - strndup
+ * - strdup
+ * - wcsdup (Windows only)
+ *
+ * - jemalloc specific functions:
+ * - jemalloc_stats
+ * - jemalloc_purge_freed_pages
+ * - jemalloc_free_dirty_pages
+ * (these functions are native to mozjemalloc, and have compatibility
+ * implementations for jemalloc3)
+ *
+ * These functions are all exported as part of libmozglue (see
+ * $(topsrcdir)/mozglue/build/Makefile.in), with a few implementation
+ * peculiarities:
+ *
+ * - On Windows, the malloc implementation functions are all prefixed with
+ * "je_", the duplication functions are prefixed with "wrap_", and jemalloc
+ * specific functions are left unprefixed. All these functions are however
+ * aliased when exporting them, such that the resulting mozglue.dll exports
+ * them unprefixed (see $(topsrcdir)/mozglue/build/mozglue.def.in). The
+ * prefixed malloc implementation and duplication functions are not
+ * exported.
+ *
+ * - On MacOSX, the system libc has a zone allocator, which allows us to
+ * hook custom malloc implementation functions without exporting them.
+ * The malloc implementation functions are all prefixed with "je_" and used
+ * this way from the custom zone allocator. They are not exported.
+ * Duplication functions are not included, since they will call the custom
+ * zone allocator anyways. Jemalloc-specific functions are also left
+ * unprefixed.
+ *
+ * - On Android and Gonk, all functions are left unprefixed. Additionally,
+ * C++ allocation functions (operator new/delete) are also exported and
+ * unprefixed.
+ *
+ * - On other systems (mostly Linux), all functions are left unprefixed.
+ *
+ * Only Android and Gonk add C++ allocation functions.
+ *
+ * Proper exporting of the various functions is done with the MOZ_MEMORY_API
+ * and MOZ_JEMALLOC_API macros. MOZ_MEMORY_API is meant to be used for malloc
+ * implementation and duplication functions, while MOZ_JEMALLOC_API is
+ * dedicated to jemalloc specific functions.
+ *
+ *
+ * All these functions are meant to be called with no prefix from Gecko code.
+ * In most cases, this is because that's how they are available at runtime.
+ * However, on Android, this relies on faulty.lib (the custom dynamic linker)
+ * resolving mozglue symbols before libc symbols, which is guaranteed by the
+ * way faulty.lib works (it respects the DT_NEEDED order, and libc always
+ * appears after mozglue ; which we double check when building anyways)
+ *
+ *
+ * Within libmozglue (when MOZ_MEMORY_IMPL is defined), all the functions
+ * should be suffixed with "_impl" both for declarations and use.
+ * That is, the implementation declaration for e.g. strdup would look like:
+ * char* strdup_impl(const char *)
+ * That implementation would call malloc by using "malloc_impl".
+ *
+ * While mozjemalloc uses these "_impl" suffixed helpers, jemalloc3, being
+ * third-party code, doesn't, but instead has an elaborate way to mangle
+ * individual functions. See under "Run jemalloc configure script" in
+ * $(topsrcdir)/configure.in.
+ *
+ *
+ * When building with replace-malloc support, the above still holds, but
+ * the malloc implementation and jemalloc specific functions are the
+ * replace-malloc functions from replace_malloc.c.
+ *
+ * The actual jemalloc/mozjemalloc implementation is prefixed with "je_".
+ *
+ * Thus, when MOZ_REPLACE_MALLOC is defined, the "_impl" suffixed macros
+ * expand to "je_" prefixed function when building mozjemalloc or
+ * jemalloc3/mozjemalloc_compat, where MOZ_JEMALLOC_IMPL is defined.
+ *
+ * In other cases, the "_impl" suffixed macros follow the original scheme,
+ * except on Windows and MacOSX, where they would expand to "je_" prefixed
+ * functions. Instead, they are left unmodified (malloc_impl expands to
+ * malloc_impl).
+ */
+
+#ifndef MOZ_MEMORY
+# error Should only include mozmemory_wrap.h when MOZ_MEMORY is set.
+#endif
+
+#if defined(MOZ_JEMALLOC_IMPL) && !defined(MOZ_MEMORY_IMPL)
+# define MOZ_MEMORY_IMPL
+#endif
+#if defined(MOZ_MEMORY_IMPL) && !defined(IMPL_MFBT)
+# ifdef MFBT_API /* mozilla/Types.h was already included */
+# error mozmemory_wrap.h has to be included before mozilla/Types.h when MOZ_MEMORY_IMPL is set and IMPL_MFBT is not.
+# endif
+# define IMPL_MFBT
+#endif
+
+#include "mozilla/Types.h"
+
+#if !defined(MOZ_SYSTEM_JEMALLOC)
+# ifdef MOZ_MEMORY_IMPL
+# if defined(MOZ_JEMALLOC_IMPL) && defined(MOZ_REPLACE_MALLOC) && !defined(MOZ_REPLACE_JEMALLOC)
+# define mozmem_malloc_impl(a) je_ ## a
+# define mozmem_jemalloc_impl(a) je_ ## a
+# else
+# define MOZ_JEMALLOC_API MFBT_API
+# ifdef MOZ_REPLACE_JEMALLOC
+# define MOZ_MEMORY_API MFBT_API
+# define mozmem_malloc_impl(a) replace_ ## a
+# define mozmem_jemalloc_impl(a) replace_ ## a
+# elif (defined(XP_WIN) || defined(XP_DARWIN))
+# if defined(MOZ_REPLACE_MALLOC)
+# define mozmem_malloc_impl(a) a ## _impl
+# else
+# define mozmem_malloc_impl(a) je_ ## a
+# endif
+# else
+# define MOZ_MEMORY_API MFBT_API
+# if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK)
+# define MOZ_WRAP_NEW_DELETE
+# endif
+# endif
+# endif
+# ifdef XP_WIN
+# define mozmem_dup_impl(a) wrap_ ## a
+# endif
+# endif
+
+/* All other jemalloc3 functions are prefixed with "je_", except when
+ * building against an unprefixed system jemalloc library */
+# define je_(a) je_ ## a
+#else /* defined(MOZ_SYSTEM_JEMALLOC) */
+# define je_(a) a
+#endif
+
+#if !defined(MOZ_MEMORY_IMPL) || defined(MOZ_SYSTEM_JEMALLOC)
+# define MOZ_MEMORY_API MFBT_API
+# define MOZ_JEMALLOC_API MFBT_API
+#endif
+
+#ifndef MOZ_MEMORY_API
+# define MOZ_MEMORY_API
+#endif
+#ifndef MOZ_JEMALLOC_API
+# define MOZ_JEMALLOC_API
+#endif
+
+#ifndef mozmem_malloc_impl
+# define mozmem_malloc_impl(a) a
+#endif
+#ifndef mozmem_dup_impl
+# define mozmem_dup_impl(a) a
+#endif
+#ifndef mozmem_jemalloc_impl
+# define mozmem_jemalloc_impl(a) a
+#endif
+
+/* Malloc implementation functions */
+#define malloc_impl mozmem_malloc_impl(malloc)
+#define posix_memalign_impl mozmem_malloc_impl(posix_memalign)
+#define aligned_alloc_impl mozmem_malloc_impl(aligned_alloc)
+#define calloc_impl mozmem_malloc_impl(calloc)
+#define realloc_impl mozmem_malloc_impl(realloc)
+#define free_impl mozmem_malloc_impl(free)
+#define memalign_impl mozmem_malloc_impl(memalign)
+#define valloc_impl mozmem_malloc_impl(valloc)
+#define malloc_usable_size_impl mozmem_malloc_impl(malloc_usable_size)
+#define malloc_good_size_impl mozmem_malloc_impl(malloc_good_size)
+
+/* Duplication functions */
+#define strndup_impl mozmem_dup_impl(strndup)
+#define strdup_impl mozmem_dup_impl(strdup)
+#ifdef XP_WIN
+# define wcsdup_impl mozmem_dup_impl(wcsdup)
+#endif
+
+/* String functions */
+#ifdef ANDROID
+/* Bug 801571 and Bug 879668, libstagefright uses vasprintf, causing malloc()/
+ * free() to be mismatched between bionic and mozglue implementation.
+ */
+#define vasprintf_impl mozmem_dup_impl(vasprintf)
+#define asprintf_impl mozmem_dup_impl(asprintf)
+#endif
+
+/* Jemalloc specific function */
+#define jemalloc_stats_impl mozmem_jemalloc_impl(jemalloc_stats)
+#define jemalloc_purge_freed_pages_impl mozmem_jemalloc_impl(jemalloc_purge_freed_pages)
+#define jemalloc_free_dirty_pages_impl mozmem_jemalloc_impl(jemalloc_free_dirty_pages)
+
+#endif /* mozmemory_wrap_h */
diff --git a/memory/build/replace_malloc.c b/memory/build/replace_malloc.c
new file mode 100644
index 000000000..88dfde33c
--- /dev/null
+++ b/memory/build/replace_malloc.c
@@ -0,0 +1,546 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZ_MEMORY
+# error Should not compile this file when MOZ_MEMORY is not set
+#endif
+
+#ifndef MOZ_REPLACE_MALLOC
+# error Should not compile this file when replace-malloc is disabled
+#endif
+
+#ifdef MOZ_SYSTEM_JEMALLOC
+# error Should not compile this file when we want to use native jemalloc
+#endif
+
+#include "mozmemory_wrap.h"
+
+/* Declare all je_* functions */
+#define MALLOC_DECL(name, return_type, ...) \
+ return_type je_ ## name(__VA_ARGS__);
+#include "malloc_decls.h"
+
+#include "mozilla/Likely.h"
+
+/*
+ * Windows doesn't come with weak imports as they are possible with
+ * LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
+ * the replacement functions are defined as variable pointers to the
+ * function resolved with GetProcAddress() instead of weak definitions
+ * of functions. On Android, the same needs to happen as well, because
+ * the Android linker doesn't handle weak linking with non LD_PRELOADed
+ * libraries, but LD_PRELOADing is not very convenient on Android, with
+ * the zygote.
+ */
+#ifdef XP_DARWIN
+# define MOZ_REPLACE_WEAK __attribute__((weak_import))
+#elif defined(XP_WIN) || defined(MOZ_WIDGET_ANDROID)
+# define MOZ_NO_REPLACE_FUNC_DECL
+#elif defined(__GNUC__)
+# define MOZ_REPLACE_WEAK __attribute__((weak))
+#endif
+
+#include "replace_malloc.h"
+
+#define MALLOC_DECL(name, return_type, ...) \
+ je_ ## name,
+
+static const malloc_table_t malloc_table = {
+#include "malloc_decls.h"
+};
+
+#ifdef MOZ_NO_REPLACE_FUNC_DECL
+# define MALLOC_DECL(name, return_type, ...) \
+ typedef return_type (replace_ ## name ## _impl_t)(__VA_ARGS__); \
+ replace_ ## name ## _impl_t *replace_ ## name = NULL;
+# define MALLOC_FUNCS MALLOC_FUNCS_ALL
+# include "malloc_decls.h"
+
+# ifdef XP_WIN
+# include <windows.h>
+static void
+replace_malloc_init_funcs()
+{
+ char replace_malloc_lib[1024];
+ if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", (LPSTR)&replace_malloc_lib,
+ sizeof(replace_malloc_lib)) > 0) {
+ HMODULE handle = LoadLibraryA(replace_malloc_lib);
+ if (handle) {
+#define MALLOC_DECL(name, ...) \
+ replace_ ## name = (replace_ ## name ## _impl_t *) GetProcAddress(handle, "replace_" # name);
+
+# define MALLOC_FUNCS MALLOC_FUNCS_ALL
+#include "malloc_decls.h"
+ }
+ }
+}
+# elif defined(MOZ_WIDGET_ANDROID)
+# include <dlfcn.h>
+# include <stdlib.h>
+static void
+replace_malloc_init_funcs()
+{
+ const char *replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
+ if (replace_malloc_lib && *replace_malloc_lib) {
+ void *handle = dlopen(replace_malloc_lib, RTLD_LAZY);
+ if (handle) {
+#define MALLOC_DECL(name, ...) \
+ replace_ ## name = (replace_ ## name ## _impl_t *) dlsym(handle, "replace_" # name);
+
+# define MALLOC_FUNCS MALLOC_FUNCS_ALL
+#include "malloc_decls.h"
+ }
+ }
+}
+# else
+# error No implementation for replace_malloc_init_funcs()
+# endif
+
+#endif /* MOZ_NO_REPLACE_FUNC_DECL */
+
+/*
+ * Below is the malloc implementation overriding jemalloc and calling the
+ * replacement functions if they exist.
+ */
+
+/*
+ * Malloc implementation functions are MOZ_MEMORY_API, and jemalloc
+ * specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
+ */
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
+#include "malloc_decls.h"
+
+#define MALLOC_DECL(name, return_type, ...) \
+ MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
+#include "malloc_decls.h"
+
+static int replace_malloc_initialized = 0;
+static void
+init()
+{
+#ifdef MOZ_NO_REPLACE_FUNC_DECL
+ replace_malloc_init_funcs();
+#endif
+ // Set this *before* calling replace_init, otherwise if replace_init calls
+ // malloc() we'll get an infinite loop.
+ replace_malloc_initialized = 1;
+ if (replace_init)
+ replace_init(&malloc_table);
+}
+
+MFBT_API struct ReplaceMallocBridge*
+get_bridge(void)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_get_bridge))
+ return NULL;
+ return replace_get_bridge();
+}
+
+void*
+malloc_impl(size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_malloc))
+ return je_malloc(size);
+ return replace_malloc(size);
+}
+
+int
+posix_memalign_impl(void **memptr, size_t alignment, size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_posix_memalign))
+ return je_posix_memalign(memptr, alignment, size);
+ return replace_posix_memalign(memptr, alignment, size);
+}
+
+void*
+aligned_alloc_impl(size_t alignment, size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_aligned_alloc))
+ return je_aligned_alloc(alignment, size);
+ return replace_aligned_alloc(alignment, size);
+}
+
+void*
+calloc_impl(size_t num, size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_calloc))
+ return je_calloc(num, size);
+ return replace_calloc(num, size);
+}
+
+void*
+realloc_impl(void *ptr, size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_realloc))
+ return je_realloc(ptr, size);
+ return replace_realloc(ptr, size);
+}
+
+void
+free_impl(void *ptr)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_free))
+ je_free(ptr);
+ else
+ replace_free(ptr);
+}
+
+void*
+memalign_impl(size_t alignment, size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_memalign))
+ return je_memalign(alignment, size);
+ return replace_memalign(alignment, size);
+}
+
+void*
+valloc_impl(size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_valloc))
+ return je_valloc(size);
+ return replace_valloc(size);
+}
+
+size_t
+malloc_usable_size_impl(usable_ptr_t ptr)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_malloc_usable_size))
+ return je_malloc_usable_size(ptr);
+ return replace_malloc_usable_size(ptr);
+}
+
+size_t
+malloc_good_size_impl(size_t size)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_malloc_good_size))
+ return je_malloc_good_size(size);
+ return replace_malloc_good_size(size);
+}
+
+void
+jemalloc_stats_impl(jemalloc_stats_t *stats)
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_jemalloc_stats))
+ je_jemalloc_stats(stats);
+ else
+ replace_jemalloc_stats(stats);
+}
+
+void
+jemalloc_purge_freed_pages_impl()
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_jemalloc_purge_freed_pages))
+ je_jemalloc_purge_freed_pages();
+ else
+ replace_jemalloc_purge_freed_pages();
+}
+
+void
+jemalloc_free_dirty_pages_impl()
+{
+ if (MOZ_UNLIKELY(!replace_malloc_initialized))
+ init();
+ if (MOZ_LIKELY(!replace_jemalloc_free_dirty_pages))
+ je_jemalloc_free_dirty_pages();
+ else
+ replace_jemalloc_free_dirty_pages();
+}
+
+/* The following comment and definitions are from jemalloc.c: */
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+
+typedef void (* __free_hook_type)(void *ptr);
+typedef void *(* __malloc_hook_type)(size_t size);
+typedef void *(* __realloc_hook_type)(void *ptr, size_t size);
+typedef void *(* __memalign_hook_type)(size_t alignment, size_t size);
+
+MOZ_MEMORY_API __free_hook_type __free_hook = free_impl;
+MOZ_MEMORY_API __malloc_hook_type __malloc_hook = malloc_impl;
+MOZ_MEMORY_API __realloc_hook_type __realloc_hook = realloc_impl;
+MOZ_MEMORY_API __memalign_hook_type __memalign_hook = memalign_impl;
+
+#endif
+
+/*
+ * The following is a OSX zone allocator implementation.
+ * /!\ WARNING. It assumes the underlying malloc implementation's
+ * malloc_usable_size returns 0 when the given pointer is not owned by
+ * the allocator. Sadly, OSX does call zone_size with pointers not
+ * owned by the allocator.
+ */
+
+#ifdef XP_DARWIN
+#include <stdlib.h>
+#include <malloc/malloc.h>
+#include "mozilla/Assertions.h"
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+ return malloc_usable_size_impl(ptr);
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+ return malloc_impl(size);
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+ return calloc_impl(num, size);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ if (malloc_usable_size_impl(ptr))
+ return realloc_impl(ptr, size);
+ return realloc(ptr, size);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+ if (malloc_usable_size_impl(ptr)) {
+ free_impl(ptr);
+ return;
+ }
+ free(ptr);
+}
+
+static void
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ size_t current_size = malloc_usable_size_impl(ptr);
+ if (current_size) {
+ MOZ_ASSERT(current_size == size);
+ free_impl(ptr);
+ return;
+ }
+ free(ptr);
+}
+
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+{
+ void *ptr;
+ if (posix_memalign_impl(&ptr, alignment, size) == 0)
+ return ptr;
+ return NULL;
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ return valloc_impl(size);
+}
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+ /* This function should never be called. */
+ MOZ_CRASH();
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+ return malloc_good_size_impl(size);
+}
+
+#ifdef MOZ_JEMALLOC
+
+#include "jemalloc/internal/jemalloc_internal.h"
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+ /* /!\ This calls into jemalloc. It works because we're linked in the
+ * same library. Stolen from jemalloc's zone.c. */
+ if (isthreaded)
+ jemalloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+ /* /!\ This calls into jemalloc. It works because we're linked in the
+ * same library. Stolen from jemalloc's zone.c. */
+ if (isthreaded)
+ jemalloc_postfork_parent();
+}
+
+#else
+
+#define JEMALLOC_ZONE_VERSION 6
+
+/* Empty implementations are needed, because fork() calls zone->force_(un)lock
+ * unconditionally. */
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+}
+
+#endif
+
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
+
+static malloc_zone_t *get_default_zone()
+{
+ malloc_zone_t **zones = NULL;
+ unsigned int num_zones = 0;
+
+ /*
+ * On OSX 10.12, malloc_default_zone returns a special zone that is not
+ * present in the list of registered zones. That zone uses a "lite zone"
+ * if one is present (apparently enabled when malloc stack logging is
+ * enabled), or the first registered zone otherwise. In practice this
+ * means unless malloc stack logging is enabled, the first registered
+ * zone is the default.
+ * So get the list of zones to get the first one, instead of relying on
+ * malloc_default_zone.
+ */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**) &zones,
+ &num_zones)) {
+ /* Reset the value in case the failure happened after it was set. */
+ num_zones = 0;
+ }
+ if (num_zones) {
+ return zones[0];
+ }
+ return malloc_default_zone();
+}
+
+
+__attribute__((constructor)) void
+register_zone(void)
+{
+ malloc_zone_t *default_zone = get_default_zone();
+
+ zone.size = (void *)zone_size;
+ zone.malloc = (void *)zone_malloc;
+ zone.calloc = (void *)zone_calloc;
+ zone.valloc = (void *)zone_valloc;
+ zone.free = (void *)zone_free;
+ zone.realloc = (void *)zone_realloc;
+ zone.destroy = (void *)zone_destroy;
+ zone.zone_name = "replace_malloc_zone";
+ zone.batch_malloc = NULL;
+ zone.batch_free = NULL;
+ zone.introspect = &zone_introspect;
+ zone.version = JEMALLOC_ZONE_VERSION;
+ zone.memalign = zone_memalign;
+ zone.free_definite_size = zone_free_definite_size;
+#if (JEMALLOC_ZONE_VERSION >= 8)
+ zone.pressure_relief = NULL;
+#endif
+ zone_introspect.enumerator = NULL;
+ zone_introspect.good_size = (void *)zone_good_size;
+ zone_introspect.check = NULL;
+ zone_introspect.print = NULL;
+ zone_introspect.log = NULL;
+ zone_introspect.force_lock = (void *)zone_force_lock;
+ zone_introspect.force_unlock = (void *)zone_force_unlock;
+ zone_introspect.statistics = NULL;
+ zone_introspect.zone_locked = NULL;
+#if (JEMALLOC_ZONE_VERSION >= 7)
+ zone_introspect.enable_discharge_checking = NULL;
+ zone_introspect.disable_discharge_checking = NULL;
+ zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
+#endif
+
+ /*
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone.
+ */
+ malloc_zone_t *purgeable_zone = malloc_default_purgeable_zone();
+
+ /* Register the custom zone. At this point it won't be the default. */
+ malloc_zone_register(&zone);
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it at the
+ * location of the specified zone. Unregistering the default zone thus
+ * makes the last registered one the default. On OSX < 10.6,
+ * unregistering shifts all registered zones. The first registered zone
+ * then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before the default
+ * zone makes some things crash because it thinks it owns the default
+ * zone allocated pointers. We thus unregister/re-register it in order to
+ * ensure it's always after the default zone. On OSX < 10.6, as
+ * unregistering shifts registered zones, this simply removes the purgeable
+ * zone from the list and adds it back at the end, after the default zone.
+ * On OSX >= 10.6, unregistering replaces the purgeable zone with the last
+ * registered zone above, i.e the default zone. Registering it again then
+ * puts it at the end, obviously after the default zone.
+ */
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ default_zone = get_default_zone();
+ } while (default_zone != &zone);
+}
+#endif
diff --git a/memory/build/replace_malloc.h b/memory/build/replace_malloc.h
new file mode 100644
index 000000000..a61744f60
--- /dev/null
+++ b/memory/build/replace_malloc.h
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef replace_malloc_h
+#define replace_malloc_h
+
+/*
+ * The replace_malloc facility allows an external library to replace or
+ * supplement the jemalloc implementation.
+ *
+ * The external library may be hooked by setting one of the following
+ * environment variables to the library path:
+ * - LD_PRELOAD on Linux,
+ * - DYLD_INSERT_LIBRARIES on OSX,
+ * - MOZ_REPLACE_MALLOC_LIB on Windows and Android.
+ *
+ * An initialization function is called before any malloc replacement
+ * function, and has the following declaration:
+ *
+ * void replace_init(const malloc_table_t *)
+ *
+ * The const malloc_table_t pointer given to that function is a table
+ * containing pointers to the original jemalloc implementation, so that
+ * replacement functions can call them back if they need to. The pointer
+ * itself can safely be kept around (no need to copy the table itself).
+ *
+ * The functions to be implemented in the external library are of the form:
+ *
+ * void *replace_malloc(size_t size)
+ * {
+ * // Fiddle with the size if necessary.
+ * // orig->malloc doesn't have to be called if the external library
+ * // provides its own allocator, but in this case it will have to
+ * // implement all functions.
+ * void *ptr = orig->malloc(size);
+ * // Do whatever you want with the ptr.
+ * return ptr;
+ * }
+ *
+ * where "orig" is the pointer obtained from replace_init.
+ *
+ * See malloc_decls.h for a list of functions that can be replaced this
+ * way. The implementations are all in the form:
+ * return_type replace_name(arguments [,...])
+ *
+ * They don't all need to be provided.
+ *
+ * Building a replace-malloc library is like rocket science. It can end up
+ * with things blowing up, especially when trying to use complex types, and
+ * even more especially when these types come from XPCOM or other parts of the
+ * Mozilla codebase.
+ * It is recommended to add the following to a replace-malloc implementation's
+ * moz.build:
+ * DISABLE_STL_WRAPPING = True # Avoid STL wrapping
+ *
+ * If your replace-malloc implementation lives under memory/replace, these
+ * are taken care of by memory/replace/defs.mk.
+ */
+
+#ifdef replace_malloc_bridge_h
+#error Do not include replace_malloc_bridge.h before replace_malloc.h. \
+ In fact, you only need the latter.
+#endif
+
+#define REPLACE_MALLOC_IMPL
+
+#include "replace_malloc_bridge.h"
+
+/* Implementing a replace-malloc library is incompatible with using mozalloc. */
+#define MOZ_NO_MOZALLOC 1
+
+#include "mozilla/Types.h"
+
+MOZ_BEGIN_EXTERN_C
+
+/* MOZ_NO_REPLACE_FUNC_DECL and MOZ_REPLACE_WEAK are only defined in
+ * replace_malloc.c. Normally including this header will add function
+ * definitions. */
+#ifndef MOZ_NO_REPLACE_FUNC_DECL
+
+# ifndef MOZ_REPLACE_WEAK
+# define MOZ_REPLACE_WEAK
+# endif
+
+# define MALLOC_DECL(name, return_type, ...) \
+ MOZ_EXPORT return_type replace_ ## name(__VA_ARGS__) MOZ_REPLACE_WEAK;
+
+# define MALLOC_FUNCS MALLOC_FUNCS_ALL
+# include "malloc_decls.h"
+
+#endif /* MOZ_NO_REPLACE_FUNC_DECL */
+
+/*
+ * posix_memalign, aligned_alloc, memalign and valloc all implement some
+ * kind of aligned memory allocation. For convenience, replace_posix_memalign,
+ * replace_aligned_alloc and replace_valloc can be automatically derived from
+ * memalign when MOZ_REPLACE_ONLY_MEMALIGN is defined before including this
+ * header. PAGE_SIZE also needs to be defined to the appropriate expression.
+ */
+#ifdef MOZ_REPLACE_ONLY_MEMALIGN
+#include <errno.h>
+
+int replace_posix_memalign(void **ptr, size_t alignment, size_t size)
+{
+ if (size == 0) {
+ *ptr = NULL;
+ return 0;
+ }
+ /* alignment must be a power of two and a multiple of sizeof(void *) */
+ if (((alignment - 1) & alignment) != 0 || (alignment % sizeof(void *)))
+ return EINVAL;
+ *ptr = replace_memalign(alignment, size);
+ return *ptr ? 0 : ENOMEM;
+}
+
+void *replace_aligned_alloc(size_t alignment, size_t size)
+{
+ /* size should be a multiple of alignment */
+ if (size % alignment)
+ return NULL;
+ return replace_memalign(alignment, size);
+}
+
+void *replace_valloc(size_t size)
+{
+ return replace_memalign(PAGE_SIZE, size);
+}
+#endif
+
+MOZ_END_EXTERN_C
+
+#endif /* replace_malloc_h */
diff --git a/memory/build/replace_malloc_bridge.h b/memory/build/replace_malloc_bridge.h
new file mode 100644
index 000000000..301b165eb
--- /dev/null
+++ b/memory/build/replace_malloc_bridge.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef replace_malloc_bridge_h
+#define replace_malloc_bridge_h
+
+/*
+ * The replace-malloc bridge allows bidirectional method calls between
+ * a program and the replace-malloc library that has been loaded for it.
+ * In Firefox, this is used to allow method calls between code in libxul
+ * and code in the replace-malloc library, without libxul needing to link
+ * against that library or vice-versa.
+ *
+ * Subsystems can add methods for their own need. Replace-malloc libraries
+ * can decide to implement those methods or not.
+ *
+ * Replace-malloc libraries can provide such a bridge by implementing
+ * a ReplaceMallocBridge-derived class, and a replace_get_bridge function
+ * returning an instance of that class. The default methods in
+ * ReplaceMallocBridge are expected to return values that callers would
+ * understand as "the bridge doesn't implement this method", so that a
+ * replace-malloc library doesn't have to implement all methods.
+ *
+ * The ReplaceMallocBridge class contains definitions for methods for
+ * all replace-malloc libraries. Each library picks the methods it wants
+ * to reply to in its ReplaceMallocBridge-derived class instance.
+ * All methods of ReplaceMallocBridge must be virtual. Similarly,
+ * anything passed as an argument to those methods must be plain data, or
+ * an instance of a class with only virtual methods.
+ *
+ * Binary compatibility is expected to be maintained, such that a newer
+ * Firefox can be used with an old replace-malloc library, or an old
+ * Firefox can be used with a newer replace-malloc library. As such, only
+ * new virtual methods should be added to ReplaceMallocBridge, and
+ * each change should have a corresponding bump of the mVersion value.
+ * At the same time, each virtual method should have a corresponding
+ * wrapper calling the virtual method on the instance from
+ * ReplaceMallocBridge::Get(), giving it the version the virtual method
+ * was added.
+ *
+ * Parts that are not relevant to the replace-malloc library end of the
+ * bridge are hidden when REPLACE_MALLOC_IMPL is not defined, which is
+ * the case when including replace_malloc.h.
+ */
+
+struct ReplaceMallocBridge;
+
+#include "mozilla/Types.h"
+
+MOZ_BEGIN_EXTERN_C
+
+#ifndef REPLACE_MALLOC_IMPL
+/* Returns the replace-malloc bridge if there is one to be returned. */
+MFBT_API ReplaceMallocBridge* get_bridge();
+#endif
+
+/* Table of malloc functions.
+ * e.g. void* (*malloc)(size_t), etc.
+ */
+#define MALLOC_DECL(name, return_type, ...) \
+ typedef return_type(name ## _impl_t)(__VA_ARGS__);
+
+#include "malloc_decls.h"
+
+#define MALLOC_DECL(name, return_type, ...) \
+ name ## _impl_t * name;
+
+typedef struct {
+#include "malloc_decls.h"
+} malloc_table_t;
+
+
+/* Table of malloc hook functions.
+ * Those functions are called with the arguments and results of malloc
+ * functions after they are called.
+ * e.g. void* (*malloc_hook)(void*, size_t), etc.
+ * They can either return the result they're given, or alter it before
+ * returning it.
+ * The hooks corresponding to functions, like free(void*), that return no
+ * value, don't take an extra argument.
+ * The table must at least contain a pointer for malloc_hook and free_hook
+ * functions. They will be used as fallback if no pointer is given for
+ * other allocation functions, like calloc_hook.
+ */
+#define MALLOC_DECL(name, return_type, ...) \
+ return_type (*name ## _hook)(return_type, __VA_ARGS__);
+#define MALLOC_DECL_VOID(name, ...) \
+ void (*name ## _hook)(__VA_ARGS__);
+
+typedef struct {
+#include "malloc_decls.h"
+ /* Like free_hook, but called before realloc_hook. free_hook is called
+ * instead of not given. */
+ void (*realloc_hook_before)(void* aPtr);
+} malloc_hook_table_t;
+
+MOZ_END_EXTERN_C
+
+#ifdef __cplusplus
+
+namespace mozilla {
+namespace dmd {
+struct DMDFuncs;
+} // namespace dmd
+
+/* Callbacks to register debug file handles for Poison IO interpose.
+ * See Mozilla(|Un)RegisterDebugHandle in xpcom/build/PoisonIOInterposer.h */
+struct DebugFdRegistry
+{
+ virtual void RegisterHandle(intptr_t aFd);
+
+ virtual void UnRegisterHandle(intptr_t aFd);
+};
+
+} // namespace mozilla
+
+struct ReplaceMallocBridge
+{
+ ReplaceMallocBridge() : mVersion(3) {}
+
+ /* This method was added in version 1 of the bridge. */
+ virtual mozilla::dmd::DMDFuncs* GetDMDFuncs() { return nullptr; }
+
+ /* Send a DebugFdRegistry instance to the replace-malloc library so that
+ * it can register/unregister file descriptors whenever needed. The
+ * instance is valid until the process dies.
+ * This method was added in version 2 of the bridge. */
+ virtual void InitDebugFd(mozilla::DebugFdRegistry&) {}
+
+ /* Register a list of malloc functions and hook functions to the
+ * replace-malloc library so that it can choose to dispatch to them
+ * when needed. The details of what is dispatched when is left to the
+ * replace-malloc library.
+ * Passing a nullptr for either table will unregister a previously
+ * registered table under the same name.
+ * Returns nullptr if registration failed.
+ * If registration succeeded, a table of "pure" malloc functions is
+ * returned. Those "pure" malloc functions won't call hooks.
+ * /!\ Do not rely on registration/unregistration to be instantaneous.
+ * Functions from a previously registered table may still be called for
+ * a brief time after RegisterHook returns.
+ * This method was added in version 3 of the bridge. */
+ virtual const malloc_table_t*
+ RegisterHook(const char* aName, const malloc_table_t* aTable,
+ const malloc_hook_table_t* aHookTable) { return nullptr; }
+
+#ifndef REPLACE_MALLOC_IMPL
+ /* Returns the replace-malloc bridge if its version is at least the
+ * requested one. */
+ static ReplaceMallocBridge* Get(int aMinimumVersion) {
+ static ReplaceMallocBridge* sSingleton = get_bridge();
+ return (sSingleton && sSingleton->mVersion >= aMinimumVersion)
+ ? sSingleton : nullptr;
+ }
+#endif
+
+protected:
+ const int mVersion;
+};
+
+#ifndef REPLACE_MALLOC_IMPL
+/* Class containing wrappers for calls to ReplaceMallocBridge methods.
+ * Those wrappers need to be static methods in a class because compilers
+ * complain about unused static global functions, and linkers complain
+ * about multiple definitions of non-static global functions.
+ * Using a separate class from ReplaceMallocBridge allows the function
+ * names to be identical. */
+struct ReplaceMalloc
+{
+ /* Don't call this method from performance critical code. Use
+ * mozilla::dmd::DMDFuncs::Get() instead, it has less overhead. */
+ static mozilla::dmd::DMDFuncs* GetDMDFuncs()
+ {
+ auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 1);
+ return singleton ? singleton->GetDMDFuncs() : nullptr;
+ }
+
+ static void InitDebugFd(mozilla::DebugFdRegistry& aRegistry)
+ {
+ auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 2);
+ if (singleton) {
+ singleton->InitDebugFd(aRegistry);
+ }
+ }
+
+ static const malloc_table_t*
+ RegisterHook(const char* aName, const malloc_table_t* aTable,
+ const malloc_hook_table_t* aHookTable)
+ {
+ auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 3);
+ return singleton ? singleton->RegisterHook(aName, aTable, aHookTable)
+ : nullptr;
+ }
+};
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* replace_malloc_bridge_h */
diff --git a/memory/fallible/fallible.cpp b/memory/fallible/fallible.cpp
new file mode 100644
index 000000000..5a449bc80
--- /dev/null
+++ b/memory/fallible/fallible.cpp
@@ -0,0 +1,11 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "fallible.h"
+
+namespace mozilla {
+
+const fallible_t fallible = {};
+
+} // namespace mozilla
diff --git a/memory/fallible/fallible.h b/memory/fallible/fallible.h
new file mode 100644
index 000000000..c028360b1
--- /dev/null
+++ b/memory/fallible/fallible.h
@@ -0,0 +1,68 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_fallible_h
+#define mozilla_fallible_h
+
+#if defined(__cplusplus)
+
+/* Explicit fallible allocation
+ *
+ * Memory allocation (normally) defaults to abort in case of failed
+ * allocation. That is, it never returns NULL, and crashes instead.
+ *
+ * Code can explicitely request for fallible memory allocation thanks
+ * to the declarations below.
+ *
+ * The typical use of the mozilla::fallible const is with placement new,
+ * like the following:
+ *
+ * foo = new (mozilla::fallible) Foo();
+ *
+ * The following forms, or derivatives, are also possible but deprecated:
+ *
+ * foo = new ((mozilla::fallible_t())) Foo();
+ *
+ * const mozilla::fallible_t fallible = mozilla::fallible_t();
+ * bar = new (f) Bar();
+ *
+ * It is also possible to declare method overloads with fallible allocation
+ * alternatives, like so:
+ *
+ * class Foo {
+ * public:
+ * void Method(void *);
+ * void Method(void *, const mozilla::fallible_t&);
+ * };
+ *
+ * Foo foo;
+ * foo.Method(nullptr, mozilla::fallible);
+ *
+ * If that last method call is in a method that itself takes a const
+ * fallible_t& argument, it is recommended to propagate that argument
+ * instead of using mozilla::fallible:
+ *
+ * void Func(Foo &foo, const mozilla::fallible_t& aFallible) {
+ * foo.Method(nullptr, aFallible);
+ * }
+ *
+ */
+namespace mozilla {
+
+struct fallible_t { };
+
+/* This symbol is kept unexported, such that in corner cases where the
+ * compiler can't remove its use (essentially, cross compilation-unit
+ * calls), the smallest machine code is used.
+ * Depending how the linker packs symbols, it will consume between 1 and
+ * 8 bytes of read-only data in each executable or shared library, but
+ * only in those where it's actually not optimized out by the compiler.
+ */
+extern const fallible_t fallible;
+
+} // namespace mozilla
+
+#endif
+
+#endif // mozilla_fallible_h
diff --git a/memory/fallible/moz.build b/memory/fallible/moz.build
new file mode 100644
index 000000000..581a394b4
--- /dev/null
+++ b/memory/fallible/moz.build
@@ -0,0 +1,34 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS.mozilla += [
+ 'fallible.h',
+]
+
+Library('fallible')
+
+SOURCES += [
+ 'fallible.cpp',
+]
+
+if CONFIG['_MSC_VER']:
+ # MSVC normally adds linker directives relative to the CRT in a .drectve
+ # section in .obj files. Then, when linking objects, it adds those
+ # directives as if they were given as command line arguments. This can
+ # lead to trying to include link CRTs because different objects are
+ # compiled with different CRT options (i.e. -MT vs. -MD), and failing.
+ # The only source in this directory doesn't expose anything that depends
+ # on a CRT, so it doesn't need to be bound to a specific one.
+ # Adding the -Zl option makes MSVC not store linker directives in the
+ # object. This allows to link fallible.obj to binaries independently of
+ # the CRT they use.
+ CXXFLAGS += [
+ '-Zl',
+ ]
+
+ # This further prevents the CRT name from getting into the .obj file,
+ # by avoiding pulling in a bunch of string code that uses the CRT.
+ DEFINES['mozilla_Char16_h'] = True
diff --git a/memory/gtest/TestJemalloc.cpp b/memory/gtest/TestJemalloc.cpp
new file mode 100644
index 000000000..f37c57376
--- /dev/null
+++ b/memory/gtest/TestJemalloc.cpp
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/mozalloc.h"
+#include "mozmemory.h"
+
+#include "gtest/gtest.h"
+
+static inline void
+TestOne(size_t size)
+{
+ size_t req = size;
+ size_t adv = malloc_good_size(req);
+ char* p = (char*)malloc(req);
+ size_t usable = moz_malloc_usable_size(p);
+ // NB: Using EXPECT here so that we still free the memory on failure.
+ EXPECT_EQ(adv, usable) <<
+ "malloc_good_size(" << req << ") --> " << adv << "; "
+ "malloc_usable_size(" << req << ") --> " << usable;
+ free(p);
+}
+
+static inline void
+TestThree(size_t size)
+{
+ ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
+ ASSERT_NO_FATAL_FAILURE(TestOne(size));
+ ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
+}
+
+TEST(Jemalloc, UsableSizeInAdvance)
+{
+ #define K * 1024
+ #define M * 1024 * 1024
+
+ /*
+ * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
+ * various sizes beyond that.
+ */
+
+ for (size_t n = 0; n < 16 K; n++)
+ ASSERT_NO_FATAL_FAILURE(TestOne(n));
+
+ for (size_t n = 16 K; n < 1 M; n += 4 K)
+ ASSERT_NO_FATAL_FAILURE(TestThree(n));
+
+ for (size_t n = 1 M; n < 8 M; n += 128 K)
+ ASSERT_NO_FATAL_FAILURE(TestThree(n));
+}
diff --git a/memory/gtest/moz.build b/memory/gtest/moz.build
new file mode 100644
index 000000000..8d50ee4e3
--- /dev/null
+++ b/memory/gtest/moz.build
@@ -0,0 +1,11 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ 'TestJemalloc.cpp',
+]
+
+FINAL_LIBRARY = 'xul-gtest'
diff --git a/memory/jemalloc/README.mozilla b/memory/jemalloc/README.mozilla
new file mode 100644
index 000000000..0bbbca468
--- /dev/null
+++ b/memory/jemalloc/README.mozilla
@@ -0,0 +1,9 @@
+This is a copy of the jemalloc source code. It is intended to be left pristine.
+Modifications to this code without coordinating with upstream are unacceptable,
+and will be reverted. Integration modifications should be made in memory/build
+whenever possible.
+
+The canonical repository for this source code is git://canonware.com/jemalloc.git.
+The information about the upstream repository and revision lives in upstream.info.
+In order to update the code, you can run the update.sh script located in
+the same directory.
diff --git a/memory/jemalloc/helper/git b/memory/jemalloc/helper/git
new file mode 100755
index 000000000..9afdb6778
--- /dev/null
+++ b/memory/jemalloc/helper/git
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# jemalloc's configure runs git to determine the version. But when building
+# from a gecko git clone, the git commands it uses is going to pick gecko's
+# information, not jemalloc's, which is useless. So pretend we don't have git
+# at all. That will make jemalloc's configure pick the in-tree VERSION file.
+
+exit 1
diff --git a/memory/jemalloc/moz.build b/memory/jemalloc/moz.build
new file mode 100644
index 000000000..b7e2b661f
--- /dev/null
+++ b/memory/jemalloc/moz.build
@@ -0,0 +1,81 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ 'src/src/arena.c',
+ 'src/src/atomic.c',
+ 'src/src/base.c',
+ 'src/src/bitmap.c',
+ 'src/src/chunk.c',
+ 'src/src/chunk_dss.c',
+ 'src/src/chunk_mmap.c',
+ 'src/src/ckh.c',
+ 'src/src/extent.c',
+ 'src/src/hash.c',
+ 'src/src/huge.c',
+ 'src/src/jemalloc.c',
+ 'src/src/mb.c',
+ 'src/src/mutex.c',
+ 'src/src/nstime.c',
+ 'src/src/pages.c',
+ 'src/src/prng.c',
+ 'src/src/prof.c',
+ 'src/src/quarantine.c',
+ 'src/src/rtree.c',
+ 'src/src/spin.c',
+ 'src/src/stats.c',
+ 'src/src/tcache.c',
+ 'src/src/ticker.c',
+ 'src/src/tsd.c',
+ 'src/src/util.c',
+ 'src/src/witness.c',
+]
+
+SOURCES += [
+ # This file cannot be built in unified mode because of symbol clash on arena_purge.
+ 'src/src/ctl.c',
+]
+
+# Only OSX needs the zone allocation implementation,
+# but only if replace-malloc is not enabled.
+if CONFIG['OS_TARGET'] == 'Darwin' and not CONFIG['MOZ_REPLACE_MALLOC']:
+ UNIFIED_SOURCES += [
+ 'src/src/zone.c',
+ ]
+
+if CONFIG['MOZ_JEMALLOC4']:
+ FINAL_LIBRARY = 'memory'
+else:
+ FINAL_LIBRARY = 'replace_jemalloc'
+
+if CONFIG['MOZ_GLUE_IN_PROGRAM']:
+ SDK_LIBRARY = True
+ DIST_INSTALL = True
+
+if CONFIG['_MSC_VER']:
+ DEFINES['DLLEXPORT'] = True
+ LOCAL_INCLUDES += ['src/include/msvc_compat']
+ if not CONFIG['HAVE_INTTYPES_H']:
+ LOCAL_INCLUDES += ['src/include/msvc_compat/C99']
+
+if CONFIG['OS_TARGET'] == 'Linux':
+ # For mremap
+ DEFINES['_GNU_SOURCE'] = True
+
+if CONFIG['GNU_CC']:
+ CFLAGS += ['-std=gnu99']
+
+DEFINES['abort'] = 'moz_abort'
+
+LOCAL_INCLUDES += [
+ '!src/include',
+ 'src/include',
+]
+
+# We allow warnings for third-party code that can be updated from upstream.
+ALLOW_COMPILER_WARNINGS = True
+
+OS_LIBS += CONFIG['REALTIME_LIBS']
diff --git a/memory/jemalloc/src/.appveyor.yml b/memory/jemalloc/src/.appveyor.yml
new file mode 100644
index 000000000..ddd5c5711
--- /dev/null
+++ b/memory/jemalloc/src/.appveyor.yml
@@ -0,0 +1,28 @@
+version: '{build}'
+
+environment:
+ matrix:
+ - MSYSTEM: MINGW64
+ CPU: x86_64
+ MSVC: amd64
+ - MSYSTEM: MINGW32
+ CPU: i686
+ MSVC: x86
+ - MSYSTEM: MINGW64
+ CPU: x86_64
+ - MSYSTEM: MINGW32
+ CPU: i686
+
+install:
+ - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
+ - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC%
+ - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc
+ - pacman --noconfirm -Suy mingw-w64-%CPU%-make
+
+build_script:
+ - bash -c "autoconf"
+ - bash -c "./configure"
+ - mingw32-make -j3
+ - file lib/jemalloc.dll
+ - mingw32-make -j3 tests
+ - mingw32-make -k check
diff --git a/memory/jemalloc/src/.travis.yml b/memory/jemalloc/src/.travis.yml
new file mode 100644
index 000000000..1fed4f8e6
--- /dev/null
+++ b/memory/jemalloc/src/.travis.yml
@@ -0,0 +1,29 @@
+language: c
+
+matrix:
+ include:
+ - os: linux
+ compiler: gcc
+ - os: linux
+ compiler: gcc
+ env:
+ - EXTRA_FLAGS=-m32
+ addons:
+ apt:
+ packages:
+ - gcc-multilib
+ - os: osx
+ compiler: clang
+ - os: osx
+ compiler: clang
+ env:
+ - EXTRA_FLAGS=-m32
+
+before_script:
+ - autoconf
+ - ./configure${EXTRA_FLAGS:+ CC="$CC $EXTRA_FLAGS"}
+ - make -j3
+ - make -j3 tests
+
+script:
+ - make check
diff --git a/memory/jemalloc/src/COPYING b/memory/jemalloc/src/COPYING
new file mode 100644
index 000000000..104b1f8b0
--- /dev/null
+++ b/memory/jemalloc/src/COPYING
@@ -0,0 +1,27 @@
+Unless otherwise specified, files in the jemalloc source distribution are
+subject to the following license:
+--------------------------------------------------------------------------------
+Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
+All rights reserved.
+Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
+Copyright (C) 2009-2016 Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice(s),
+ this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice(s),
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--------------------------------------------------------------------------------
diff --git a/memory/jemalloc/src/ChangeLog b/memory/jemalloc/src/ChangeLog
new file mode 100644
index 000000000..587685d02
--- /dev/null
+++ b/memory/jemalloc/src/ChangeLog
@@ -0,0 +1,981 @@
+Following are change highlights associated with official releases. Important
+bug fixes are all mentioned, but some internal enhancements are omitted here for
+brevity. Much more detail can be found in the git revision history:
+
+ https://github.com/jemalloc/jemalloc
+
+* 4.3.1 (November 7, 2016)
+
+ Bug fixes:
+ - Fix a severe virtual memory leak. This regression was first released in
+ 4.3.0. (@interwq, @jasone)
+ - Refactor atomic and prng APIs to restore support for 32-bit platforms that
+ use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
+
+* 4.3.0 (November 4, 2016)
+
+ This is the first release that passes the test suite for multiple Windows
+ configurations, thanks in large part to @glandium setting up continuous
+ integration via AppVeyor (and Travis CI for Linux and OS X).
+
+ New features:
+ - Add "J" (JSON) support to malloc_stats_print(). (@jasone)
+ - Add Cray compiler support. (@ronawho)
+
+ Optimizations:
+ - Add/use adaptive spinning for bootstrapping and radix tree node
+ initialization. (@jasone)
+
+ Bug fixes:
+ - Fix large allocation to search starting in the optimal size class heap,
+ which can substantially reduce virtual memory churn and fragmentation. This
+ regression was first released in 4.0.0. (@mjp41, @jasone)
+ - Fix stats.arenas.<i>.nthreads accounting. (@interwq)
+ - Fix and simplify decay-based purging. (@jasone)
+ - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
+ deadlocks during thread exit. (@jasone)
+ - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
+ @jasone)
+ - Fix over-sized allocation of arena_t (plus associated stats) data
+ structures. (@jasone, @interwq)
+ - Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
+ - Fix a Valgrind integration bug. (@ronawho)
+ - Disallow 0x5a junk filling when running in Valgrind. (@jasone)
+ - Fix a file descriptor leak on Linux. This regression was first released in
+ 4.2.0. (@vsarunas, @jasone)
+ - Fix static linking of jemalloc with glibc. (@djwatson)
+ - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
+ works around other libraries' system call wrappers performing reentrant
+ allocation. (@kspinka, @Whissi, @jasone)
+ - Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
+ @jasone)
+ - Fix cached memory management to avoid needless commit/decommit operations
+ during purging, which resolves permanent virtual memory map fragmentation
+ issues on Windows. (@mjp41, @jasone)
+ - Fix TSD fetches to avoid (recursive) allocation. This is relevant to
+ non-TLS and Windows configurations. (@jasone)
+ - Fix malloc_conf overriding to work on Windows. (@jasone)
+ - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
+
+* 4.2.1 (June 8, 2016)
+
+ Bug fixes:
+ - Fix bootstrapping issues for configurations that require allocation during
+ tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
+ - Fix gettimeofday() version of nstime_update(). (@ronawho)
+ - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
+ - Fix potential VM map fragmentation regression. (@jasone)
+ - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
+ - Fix heap profiling context leaks in reallocation edge cases. (@jasone)
+
+* 4.2.0 (May 12, 2016)
+
+ New features:
+ - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
+ an arena's allocations in a single operation. (@jasone)
+ - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
+ - Add the --with-version configure option. (@jasone)
+ - Support --with-lg-page values larger than actual page size. (@jasone)
+
+ Optimizations:
+ - Use pairing heaps rather than red-black trees for various hot data
+ structures. (@djwatson, @jasone)
+ - Streamline fast paths of rtree operations. (@jasone)
+ - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
+ - Decommit unused virtual memory if the OS does not overcommit. (@jasone)
+ - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
+ to avoid unfortunate interactions during fork(2). (@jasone)
+
+ Bug fixes:
+ - Fix chunk accounting related to triggering gdump profiles. (@jasone)
+ - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
+ - Scale leak report summary according to sampling probability. (@jasone)
+
+* 4.1.1 (May 3, 2016)
+
+ This bugfix release resolves a variety of mostly minor issues, though the
+ bitmap fix is critical for 64-bit Windows.
+
+ Bug fixes:
+ - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
+ even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
+ Windows. (@jasone)
+ - Fix hashing functions to avoid unaligned memory accesses (and resulting
+ crashes). This is relevant at least to some ARM-based platforms.
+ (@rkmisra)
+ - Fix fork()-related lock rank ordering reversals. These reversals were
+ unlikely to cause deadlocks in practice except when heap profiling was
+ enabled and active. (@jasone)
+ - Fix various chunk leaks in OOM code paths. (@jasone)
+ - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
+ - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
+ - Fix a variety of test failures that were due to test fragility rather than
+ core bugs. (@jasone)
+
+* 4.1.0 (February 28, 2016)
+
+ This release is primarily about optimizations, but it also incorporates a lot
+ of portability-motivated refactoring and enhancements. Many people worked on
+ this release, to an extent that even with the omission here of minor changes
+ (see git revision history), and of the people who reported and diagnosed
+ issues, so much of the work was contributed that starting with this release,
+ changes are annotated with author credits to help reflect the collaborative
+ effort involved.
+
+ New features:
+ - Implement decay-based unused dirty page purging, a major optimization with
+ mallctl API impact. This is an alternative to the existing ratio-based
+ unused dirty page purging, and is intended to eventually become the sole
+ purging mechanism. New mallctls:
+ + opt.purge
+ + opt.decay_time
+ + arena.<i>.decay
+ + arena.<i>.decay_time
+ + arenas.decay_time
+ + stats.arenas.<i>.decay_time
+ (@jasone, @cevans87)
+ - Add --with-malloc-conf, which makes it possible to embed a default
+ options string during configuration. This was motivated by the desire to
+ specify --with-malloc-conf=purge:decay , since the default must remain
+ purge:ratio until the 5.0.0 release. (@jasone)
+ - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
+ - Make *allocx() size class overflow behavior defined. The maximum
+ size class is now less than PTRDIFF_MAX to protect applications against
+ numerical overflow, and all allocation functions are guaranteed to indicate
+ errors rather than potentially crashing if the request size exceeds the
+ maximum size class. (@jasone)
+ - jeprof:
+ + Add raw heap profile support. (@jasone)
+ + Add --retain and --exclude for backtrace symbol filtering. (@jasone)
+
+ Optimizations:
+ - Optimize the fast path to combine various bootstrapping and configuration
+ checks and execute more streamlined code in the common case. (@interwq)
+ - Use linear scan for small bitmaps (used for small object tracking). In
+ addition to speeding up bitmap operations on 64-bit systems, this reduces
+ allocator metadata overhead by approximately 0.2%. (@djwatson)
+ - Separate arena_avail trees, which substantially speeds up run tree
+ operations. (@djwatson)
+ - Use memoization (boot-time-computed table) for run quantization. Separate
+ arena_avail trees reduced the importance of this optimization. (@jasone)
+ - Attempt mmap-based in-place huge reallocation. This can dramatically speed
+ up incremental huge reallocation. (@jasone)
+
+ Incompatible changes:
+ - Make opt.narenas unsigned rather than size_t. (@jasone)
+
+ Bug fixes:
+ - Fix stats.cactive accounting regression. (@rustyx, @jasone)
+ - Handle unaligned keys in hash(). This caused problems for some ARM systems.
+ (@jasone, @cferris1000)
+ - Refactor arenas array. In addition to fixing a fork-related deadlock, this
+ makes arena lookups faster and simpler. (@jasone)
+ - Move retained memory allocation out of the default chunk allocation
+ function, to a location that gets executed even if the application installs
+ a custom chunk allocation function. This resolves a virtual memory leak.
+ (@buchgr)
+ - Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
+ - Fix run quantization. In practice this bug had no impact unless
+ applications requested memory with alignment exceeding one page.
+ (@jasone, @djwatson)
+ - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
+ - jeprof:
+ + Don't discard curl options if timeout is not defined. (@djwatson)
+ + Detect failed profile fetches. (@djwatson)
+ - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
+ --disable-stats case. (@jasone)
+
+* 4.0.4 (October 24, 2015)
+
+ This bugfix release fixes another xallocx() regression. No other regressions
+ have come to light in over a month, so this is likely a good starting point
+ for people who prefer to wait for "dot one" releases with all the major issues
+ shaken out.
+
+ Bug fixes:
+ - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
+ allocations that have been randomly assigned an offset of 0 when
+ --enable-cache-oblivious configure option is enabled.
+
+* 4.0.3 (September 24, 2015)
+
+ This bugfix release continues the trend of xallocx() and heap profiling fixes.
+
+ Bug fixes:
+ - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
+ allocations when --enable-cache-oblivious configure option is enabled.
+ - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
+ when resizing from/to a size class that is not a multiple of the chunk size.
+ - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
+ profile dumping started.
+ - Work around a potentially bad thread-specific data initialization
+ interaction with NPTL (glibc's pthreads implementation).
+
+* 4.0.2 (September 21, 2015)
+
+ This bugfix release addresses a few bugs specific to heap profiling.
+
+ Bug fixes:
+ - Fix ixallocx_prof_sample() to never modify nor create sampled small
+ allocations. xallocx() is in general incapable of moving small allocations,
+ so this fix removes buggy code without loss of generality.
+ - Fix irallocx_prof_sample() to always allocate large regions, even when
+ alignment is non-zero.
+ - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
+ than dereferencing a potentially invalid tctx.
+
+* 4.0.1 (September 15, 2015)
+
+ This is a bugfix release that is somewhat high risk due to the amount of
+ refactoring required to address deep xallocx() problems. As a side effect of
+ these fixes, xallocx() now tries harder to partially fulfill requests for
+ optional extra space. Note that a couple of minor heap profiling
+ optimizations are included, but these are better thought of as performance
+ fixes that were integral to disovering most of the other bugs.
+
+ Optimizations:
+ - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
+ fast path when heap profiling is enabled. Additionally, split a special
+ case out into arena_prof_tctx_reset(), which also avoids chunk metadata
+ reads.
+ - Optimize irallocx_prof() to optimistically update the sampler state. The
+ prior implementation appears to have been a holdover from when
+ rallocx()/xallocx() functionality was combined as rallocm().
+
+ Bug fixes:
+ - Fix TLS configuration such that it is enabled by default for platforms on
+ which it works correctly.
+ - Fix arenas_cache_cleanup() and arena_get_hard() to handle
+ allocation/deallocation within the application's thread-specific data
+ cleanup functions even after arenas_cache is torn down.
+ - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
+ - Fix chunk purge hook calls for in-place huge shrinking reallocation to
+ specify the old chunk size rather than the new chunk size. This bug caused
+ no correctness issues for the default chunk purge function, but was
+ visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
+ - Fix heap profiling bugs:
+ + Fix heap profiling to distinguish among otherwise identical sample sites
+ with interposed resets (triggered via the "prof.reset" mallctl). This bug
+ could cause data structure corruption that would most likely result in a
+ segfault.
+ + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
+ + Make one call to prof_active_get_unlocked() per allocation event, and use
+ the result throughout the relevant functions that handle an allocation
+ event. Also add a missing check in prof_realloc(). These fixes protect
+ allocation events against concurrent prof_active changes.
+ + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
+ in the correct order.
+ + Fix prof_realloc() to call prof_free_sampled_object() after calling
+ prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
+ the same, the tctx could have been prematurely destroyed.
+ - Fix portability bugs:
+ + Don't bitshift by negative amounts when encoding/decoding run sizes in
+ chunk header maps. This affected systems with page sizes greater than 8
+ KiB.
+ + Rename index_t to szind_t to avoid an existing type on Solaris.
+ + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
+ match glibc and avoid compilation errors when including both
+ jemalloc/jemalloc.h and malloc.h in C++ code.
+ + Don't assume that /bin/sh is appropriate when running size_classes.sh
+ during configuration.
+ + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
+ + Link tests to librt if it contains clock_gettime(2).
+
+* 4.0.0 (August 17, 2015)
+
+ This version contains many speed and space optimizations, both minor and
+ major. The major themes are generalization, unification, and simplification.
+ Although many of these optimizations cause no visible behavior change, their
+ cumulative effect is substantial.
+
+ New features:
+ - Normalize size class spacing to be consistent across the complete size
+ range. By default there are four size classes per size doubling, but this
+ is now configurable via the --with-lg-size-class-group option. Also add the
+ --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
+ --with-lg-tiny-min options, which can be used to tweak page and size class
+ settings. Impacts:
+ + Worst case performance for incrementally growing/shrinking reallocation
+ is improved because there are far fewer size classes, and therefore
+ copying happens less often.
+ + Internal fragmentation is limited to 20% for all but the smallest size
+ classes (those less than four times the quantum). (1B + 4 KiB)
+ and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+ + Chunk fragmentation tends to be lower because there are fewer distinct run
+ sizes to pack.
+ - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
+ "tcache.destroy" mallctls control tcache lifetime and flushing, and the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
+ control which tcache is used for each operation.
+ - Implement per thread heap profiling, as well as the ability to
+ enable/disable heap profiling on a per thread basis. Add the "prof.reset",
+ "prof.lg_sample", "thread.prof.name", "thread.prof.active",
+ "opt.prof_thread_active_init", "prof.thread_active_init", and
+ "thread.prof.active" mallctls.
+ - Add support for per arena application-specified chunk allocators, configured
+ via the "arena.<i>.chunk_hooks" mallctl.
+ - Refactor huge allocation to be managed by arenas, so that arenas now
+ function as general purpose independent allocators. This is important in
+ the context of user-specified chunk allocators, aside from the scalability
+ benefits. Related new statistics:
+ + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
+ "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
+ mallctls provide high level per arena huge allocation statistics.
+ + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
+ "stats.arenas.<i>.hchunks.<j>.nmalloc",
+ "stats.arenas.<i>.hchunks.<j>.ndalloc",
+ "stats.arenas.<i>.hchunks.<j>.nrequests", and
+ "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
+ statistics.
+ - Add the 'util' column to malloc_stats_print() output, which reports the
+ proportion of available regions that are currently in use for each small
+ size class.
+ - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
+ mallctl), so that it is possible to separately enable junk filling for
+ allocation versus deallocation.
+ - Add the jemalloc-config script, which provides information about how
+ jemalloc was configured, and how to integrate it into application builds.
+ - Add metadata statistics, which are accessible via the "stats.metadata",
+ "stats.arenas.<i>.metadata.mapped", and
+ "stats.arenas.<i>.metadata.allocated" mallctls.
+ - Add the "stats.resident" mallctl, which reports the upper limit of
+ physically resident memory mapped by the allocator.
+ - Add per arena control over unused dirty page purging, via the
+ "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
+ "stats.arenas.<i>.lg_dirty_mult" mallctls.
+ - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
+ feature on/off during program execution.
+ - Add sdallocx(), which implements sized deallocation. The primary
+ optimization over dallocx() is the removal of a metadata read, which often
+ suffers an L1 cache miss.
+ - Add missing header includes in jemalloc/jemalloc.h, so that applications
+ only have to #include <jemalloc/jemalloc.h>.
+ - Add support for additional platforms:
+ + Bitrig
+ + Cygwin
+ + DragonFlyBSD
+ + iOS
+ + OpenBSD
+ + OpenRISC/or1k
+
+ Optimizations:
+ - Maintain dirty runs in per arena LRUs rather than in per arena trees of
+ dirty-run-containing chunks. In practice this change significantly reduces
+ dirty page purging volume.
+ - Integrate whole chunks into the unused dirty page purging machinery. This
+ reduces the cost of repeated huge allocation/deallocation, because it
+ effectively introduces a cache of chunks.
+ - Split the arena chunk map into two separate arrays, in order to increase
+ cache locality for the frequently accessed bits.
+ - Move small run metadata out of runs, into arena chunk headers. This reduces
+ run fragmentation, smaller runs reduce external fragmentation for small size
+ classes, and packed (less uniformly aligned) metadata layout improves CPU
+ cache set distribution.
+ - Randomly distribute large allocation base pointer alignment relative to page
+ boundaries in order to more uniformly utilize CPU cache sets. This can be
+ disabled via the --disable-cache-oblivious configure option, and queried via
+ the "config.cache_oblivious" mallctl.
+ - Micro-optimize the fast paths for the public API functions.
+ - Refactor thread-specific data to reside in a single structure. This assures
+ that only a single TLS read is necessary per call into the public API.
+ - Implement in-place huge allocation growing and shrinking.
+ - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
+ additional optimizations that reduce maximum lookup depth to one or two
+ levels. This resolves what was a concurrency bottleneck for per arena huge
+ allocation, because a global data structure is critical for determining
+ which arenas own which huge allocations.
+
+ Incompatible changes:
+ - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
+ warnings by default.
+ - Assure that the constness of malloc_usable_size()'s return type matches that
+ of the system implementation.
+ - Change the heap profile dump format to support per thread heap profiling,
+ rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
+ result, the bundled jeprof must now be used rather than the upstream
+ (gperftools) pprof.
+ - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
+ internally deadlock on some platforms.
+ - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
+ - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
+ "stats.arenas.<i>.bins.<j>.curregs".
+ - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
+ - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
+
+ Removed features:
+ - Remove the *allocm() API, which is superseded by the *allocx() API.
+ - Remove the --enable-dss options, and make dss non-optional on all platforms
+ which support sbrk(2).
+ - Remove the "arenas.purge" mallctl, which was obsoleted by the
+ "arena.<i>.purge" mallctl in 3.1.0.
+ - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
+ detects whether it is running inside Valgrind.
+ - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
+ "stats.huge.ndalloc" mallctls.
+ - Remove the --enable-mremap option.
+ - Remove the "stats.chunks.current", "stats.chunks.total", and
+ "stats.chunks.high" mallctls.
+
+ Bug fixes:
+ - Fix the cactive statistic to decrease (rather than increase) when active
+ memory decreases. This regression was first released in 3.5.0.
+ - Fix OOM handling in memalign() and valloc(). A variant of this bug existed
+ in all releases since 2.0.0, which introduced these functions.
+ - Fix an OOM-related regression in arena_tcache_fill_small(), which could
+ cause cache corruption on OOM. This regression was present in all releases
+ from 2.2.0 through 3.6.0.
+ - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
+ calloc(), and realloc() when profiling is enabled.
+ - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
+ "secondary" precedence is specified, but sbrk(2) is not supported.
+ - Fix fallback lg_floor() implementations to handle extremely large inputs.
+ - Ensure the default purgeable zone is after the default zone on OS X.
+ - Fix latent bugs in atomic_*().
+ - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
+ - Fix tls_model configuration to enable the initial-exec model when possible.
+ - Mark malloc_conf as a weak symbol so that the application can override it.
+ - Correctly detect glibc's adaptive pthread mutexes.
+ - Fix the --without-export configure option.
+
+* 3.6.0 (March 31, 2014)
+
+ This version contains a critical bug fix for a regression present in 3.5.0 and
+ 3.5.1.
+
+ Bug fixes:
+ - Fix a regression in arena_chunk_alloc() that caused crashes during
+ small/large allocation if chunk allocation failed. In the absence of this
+ bug, chunk allocation failure would result in allocation failure, e.g. NULL
+ return from malloc(). This regression was introduced in 3.5.0.
+ - Fix backtracing for gcc intrinsics-based backtracing by specifying
+ -fno-omit-frame-pointer to gcc. Note that the application (and all the
+ libraries it links to) must also be compiled with this option for
+ backtracing to be reliable.
+ - Use dss allocation precedence for huge allocations as well as small/large
+ allocations.
+ - Fix test assertion failure message formatting. This bug did not manifest on
+ x86_64 systems because of implementation subtleties in va_list.
+ - Fix inconsequential test failures for hash and SFMT code.
+
+ New features:
+ - Support heap profiling on FreeBSD. This feature depends on the proc
+ filesystem being mounted during heap profile dumping.
+
+* 3.5.1 (February 25, 2014)
+
+ This version primarily addresses minor bugs in test code.
+
+ Bug fixes:
+ - Configure Solaris/Illumos to use MADV_FREE.
+ - Fix junk filling for mremap(2)-based huge reallocation. This is only
+ relevant if configuring with the --enable-mremap option specified.
+ - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
+ compiler.
+ - Add a configure test for SSE2 rather than assuming it is usable on i686
+ systems. This fixes test compilation errors, especially on 32-bit Linux
+ systems.
+ - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
+ test.
+ - Fix/remove flawed alignment-related overflow tests.
+ - Prevent compiler optimizations that could change backtraces in the
+ prof_accum unit test.
+
+* 3.5.0 (January 22, 2014)
+
+ This version focuses on refactoring and automated testing, though it also
+ includes some non-trivial heap profiling optimizations not mentioned below.
+
+ New features:
+ - Add the *allocx() API, which is a successor to the experimental *allocm()
+ API. The *allocx() functions are slightly simpler to use because they have
+ fewer parameters, they directly return the results of primary interest, and
+ mallocx()/rallocx() avoid the strict aliasing pitfall that
+ allocm()/rallocm() share with posix_memalign(). Note that *allocm() is
+ slated for removal in the next non-bugfix release.
+ - Add support for LinuxThreads.
+
+ Bug fixes:
+ - Unless heap profiling is enabled, disable floating point code and don't link
+ with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
+ systems, makes it possible to completely disable floating point register
+ use. Some versions of glibc neglect to save/restore caller-saved floating
+ point registers during dynamic lazy symbol loading, and the symbol loading
+ code uses whatever malloc the application happens to have linked/loaded
+ with, the result being potential floating point register corruption.
+ - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
+ backtrace creation in imemalign(). This bug impacted posix_memalign() and
+ aligned_alloc().
+ - Fix a file descriptor leak in a prof_dump_maps() error path.
+ - Fix prof_dump() to close the dump file descriptor for all relevant error
+ paths.
+ - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
+ allocation, not just deallocation.
+ - Fix a data race for large allocation stats counters.
+ - Fix a potential infinite loop during thread exit. This bug occurred on
+ Solaris, and could affect other platforms with similar pthreads TSD
+ implementations.
+ - Don't junk-fill reallocations unless usable size changes. This fixes a
+ violation of the *allocx()/*allocm() semantics.
+ - Fix growing large reallocation to junk fill new space.
+ - Fix huge deallocation to junk fill when munmap is disabled.
+ - Change the default private namespace prefix from empty to je_, and change
+ --with-private-namespace-prefix so that it prepends an additional prefix
+ rather than replacing je_. This reduces the likelihood of applications
+ which statically link jemalloc experiencing symbol name collisions.
+ - Add missing private namespace mangling (relevant when
+ --with-private-namespace is specified).
+ - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
+ static even for debug builds.
+ - Add a missing mutex unlock in a malloc_init_hard() error path. In practice
+ this error path is never executed.
+ - Fix numerous bugs in malloc_strotumax() error handling/reporting. These
+ bugs had no impact except for malformed inputs.
+ - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by
+ existing calls, so they had no impact.
+
+* 3.4.1 (October 20, 2013)
+
+ Bug fixes:
+ - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
+ of internal data structures and subsequent crashes.
+ - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
+ uninitialized memory in:
+ + arena chunk headers
+ + internal zero-initialized data structures (relevant to tcache and prof
+ code)
+ - Preserve errno during the first allocation. A readlink(2) call during
+ initialization fails unless /etc/malloc.conf exists, so errno was typically
+ set during the first allocation prior to this fix.
+ - Fix compilation warnings reported by gcc 4.8.1.
+
+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
+
+* 3.3.1 (March 6, 2013)
+
+ This version fixes bugs that are typically encountered only when utilizing
+ custom run-time options.
+
+ Bug fixes:
+ - Fix a locking order bug that could cause deadlock during fork if heap
+ profiling were enabled.
+ - Fix a chunk recycling bug that could cause the allocator to lose track of
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ corruption if allocating via sbrk(2) (unlikely unless running with the
+ "dss:primary" option specified). This was completely harmless on Linux
+ unless using mlockall(2) (and unlikely even then, unless the
+ --disable-munmap configure option or the "dss:primary" option was
+ specified). This regression was introduced in 3.1.0 by the
+ mlockall(2)/madvise(2) interaction fix.
+ - Fix TLS-related memory corruption that could occur during thread exit if the
+ thread never allocated memory. Only the quarantine and prof facilities were
+ susceptible.
+ - Fix two quarantine bugs:
+ + Internal reallocation of the quarantined object array leaked the old
+ array.
+ + Reallocation failure for internal reallocation of the quarantined object
+ array (very unlikely) resulted in memory corruption.
+ - Fix Valgrind integration to annotate all internally allocated memory in a
+ way that keeps Valgrind happy about internal data structure access.
+ - Fix building for s390 systems.
+
+* 3.3.0 (January 23, 2013)
+
+ This version includes a few minor performance improvements in addition to the
+ listed new features and bug fixes.
+
+ New features:
+ - Add clipping support to lg_chunk option processing.
+ - Add the --enable-ivsalloc option.
+ - Add the --without-export option.
+ - Add the --disable-zone-allocator option.
+
+ Bug fixes:
+ - Fix "arenas.extend" mallctl to output the number of arenas.
+ - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
+ is undefined.
+ - Fix build break on FreeBSD related to alloca.h.
+
+* 3.2.0 (November 9, 2012)
+
+ In addition to a couple of bug fixes, this version modifies page run
+ allocation and dirty page purging algorithms in order to better control
+ page-level virtual memory fragmentation.
+
+ Incompatible changes:
+ - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
+
+ Bug fixes:
+ - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
+ after primary dss allocation fails.
+ - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
+ in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
+
+* 3.1.0 (October 16, 2012)
+
+ New features:
+ - Auto-detect whether running inside Valgrind, thus removing the need to
+ manually specify MALLOC_CONF=valgrind:true.
+ - Add the "arenas.extend" mallctl, which allows applications to create
+ manually managed arenas.
+ - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
+ - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
+ which provide control over dss/mmap precedence.
+ - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
+ - Define LG_QUANTUM for hppa.
+
+ Incompatible changes:
+ - Disable tcache by default if running inside Valgrind, in order to avoid
+ making unallocated objects appear reachable to Valgrind.
+ - Drop const from malloc_usable_size() argument on Linux.
+
+ Bug fixes:
+ - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
+ - Remove const from __*_hook variable declarations, so that glibc can modify
+ them during process forking.
+ - Fix mlockall(2)/madvise(2) interaction.
+ - Fix fork(2)-related deadlocks.
+ - Fix error return value for "thread.tcache.enabled" mallctl.
+
+* 3.0.0 (May 11, 2012)
+
+ Although this version adds some major new features, the primary focus is on
+ internal code cleanup that facilitates maintainability and portability, most
+ of which is not reflected in the ChangeLog. This is the first release to
+ incorporate substantial contributions from numerous other developers, and the
+ result is a more broadly useful allocator (see the git revision history for
+ contribution details). Note that the license has been unified, thanks to
+ Facebook granting a license under the same terms as the other copyright
+ holders (see COPYING).
+
+ New features:
+ - Implement Valgrind support, redzones, and quarantine.
+ - Add support for additional platforms:
+ + FreeBSD
+ + Mac OS X Lion
+ + MinGW
+ + Windows (no support yet for replacing the system malloc)
+ - Add support for additional architectures:
+ + MIPS
+ + SH4
+ + Tilera
+ - Add support for cross compiling.
+ - Add nallocm(), which rounds a request size up to the nearest size class
+ without actually allocating.
+ - Implement aligned_alloc() (blame C11).
+ - Add the "thread.tcache.enabled" mallctl.
+ - Add the "opt.prof_final" mallctl.
+ - Update pprof (from gperftools 2.0).
+ - Add the --with-mangling option.
+ - Add the --disable-experimental option.
+ - Add the --disable-munmap option, and make it the default on Linux.
+ - Add the --enable-mremap option, which disables use of mremap(2) by default.
+
+ Incompatible changes:
+ - Enable stats by default.
+ - Enable fill by default.
+ - Disable lazy locking by default.
+ - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
+ - Rename the "arenas.pagesize" mallctl to "arenas.page".
+ - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
+ - Change the "opt.prof_accum" default from true to false.
+
+ Removed features:
+ - Remove the swap feature, including the "config.swap", "swap.avail",
+ "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
+ - Remove highruns statistics, including the
+ "stats.arenas.<i>.bins.<j>.highruns" and
+ "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
+ - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
+ "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
+ "arenas.[tqcs]bins" mallctls.
+ - Remove the "arenas.chunksize" mallctl.
+ - Remove the "opt.lg_prof_tcmax" option.
+ - Remove the "opt.lg_prof_bt_max" option.
+ - Remove the "opt.lg_tcache_gc_sweep" option.
+ - Remove the --disable-tiny option, including the "config.tiny" mallctl.
+ - Remove the --enable-dynamic-page-shift configure option.
+ - Remove the --enable-sysv configure option.
+
+ Bug fixes:
+ - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
+ invalid statistics and crashes.
+ - Work around TLS deallocation via free() on Linux. This bug could cause
+ write-after-free memory corruption.
+ - Fix a potential deadlock that could occur during interval- and
+ growth-triggered heap profile dumps.
+ - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
+ - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
+ cause memory corruption and crashes with --enable-dss specified.
+ - Fix fork-related bugs that could cause deadlock in children between fork
+ and exec.
+ - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
+ - Fix realloc(p, 0) to act like free(p).
+ - Do not enforce minimum alignment in memalign().
+ - Check for NULL pointer in malloc_usable_size().
+ - Fix an off-by-one heap profile statistics bug that could be observed in
+ interval- and growth-triggered heap profiles.
+ - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
+ is 0.
+ - Fix bin->runcur management to fix a layout policy bug. This bug did not
+ affect correctness.
+ - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
+ initialized than necessary.
+ - Add missing "opt.lg_tcache_max" mallctl implementation.
+ - Use glibc allocator hooks to make mixed allocator usage less likely.
+ - Fix build issues for --disable-tcache.
+ - Don't mangle pthread_create() when --with-private-namespace is specified.
+
+* 2.2.5 (November 14, 2011)
+
+ Bug fixes:
+ - Fix huge_ralloc() race when using mremap(2). This is a serious bug that
+ could cause memory corruption and/or crashes.
+ - Fix huge_ralloc() to maintain chunk statistics.
+ - Fix malloc_stats_print(..., "a") output.
+
+* 2.2.4 (November 5, 2011)
+
+ Bug fixes:
+ - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as
+ well as for --disable-tls builds in earlier releases.
+ - Do not assume a 4 KiB page size in test/rallocm.c.
+
+* 2.2.3 (August 31, 2011)
+
+ This version fixes numerous bugs related to heap profiling.
+
+ Bug fixes:
+ - Fix a prof-related race condition. This bug could cause memory corruption,
+ but only occurred in non-default configurations (prof_accum:false).
+ - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
+ excluded from backtraces).
+ - Fix a prof-related bug in realloc() (only triggered by OOM errors).
+ - Fix prof-related bugs in allocm() and rallocm().
+ - Fix prof_tdata_cleanup() for --disable-tls builds.
+ - Fix a relative include path, to fix objdir builds.
+
+* 2.2.2 (July 30, 2011)
+
+ Bug fixes:
+ - Fix a build error for --disable-tcache.
+ - Fix assertions in arena_purge() (for real this time).
+ - Add the --with-private-namespace option. This is a workaround for symbol
+ conflicts that can inadvertently arise when using static libraries.
+
+* 2.2.1 (March 30, 2011)
+
+ Bug fixes:
+ - Implement atomic operations for x86/x64. This fixes compilation failures
+ for versions of gcc that are still in wide use.
+ - Fix an assertion in arena_purge().
+
+* 2.2.0 (March 22, 2011)
+
+ This version incorporates several improvements to algorithms and data
+ structures that tend to reduce fragmentation and increase speed.
+
+ New features:
+ - Add the "stats.cactive" mallctl.
+ - Update pprof (from google-perftools 1.7).
+ - Improve backtracing-related configuration logic, and add the
+ --disable-prof-libgcc option.
+
+ Bug fixes:
+ - Change default symbol visibility from "internal", to "hidden", which
+ decreases the overhead of library-internal function calls.
+ - Fix symbol visibility so that it is also set on OS X.
+ - Fix a build dependency regression caused by the introduction of the .pic.o
+ suffix for PIC object files.
+ - Add missing checks for mutex initialization failures.
+ - Don't use libgcc-based backtracing except on x64, where it is known to work.
+ - Fix deadlocks on OS X that were due to memory allocation in
+ pthread_mutex_lock().
+ - Heap profiling-specific fixes:
+ + Fix memory corruption due to integer overflow in small region index
+ computation, when using a small enough sample interval that profiling
+ context pointers are stored in small run headers.
+ + Fix a bootstrap ordering bug that only occurred with TLS disabled.
+ + Fix a rallocm() rsize bug.
+ + Fix error detection bugs for aligned memory allocation.
+
+* 2.1.3 (March 14, 2011)
+
+ Bug fixes:
+ - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
+ for OS X in 2.1.2).
+ - Fix a "thread.arena" mallctl bug.
+ - Fix a thread cache stats merging bug.
+
+* 2.1.2 (March 2, 2011)
+
+ Bug fixes:
+ - Fix "thread.{de,}allocatedp" mallctl for OS X.
+ - Add missing jemalloc.a to build system.
+
+* 2.1.1 (January 31, 2011)
+
+ Bug fixes:
+ - Fix aligned huge reallocation (affected allocm()).
+ - Fix the ALLOCM_LG_ALIGN macro definition.
+ - Fix a heap dumping deadlock.
+ - Fix a "thread.arena" mallctl bug.
+
+* 2.1.0 (December 3, 2010)
+
+ This version incorporates some optimizations that can't quite be considered
+ bug fixes.
+
+ New features:
+ - Use Linux's mremap(2) for huge object reallocation when possible.
+ - Avoid locking in mallctl*() when possible.
+ - Add the "thread.[de]allocatedp" mallctl's.
+ - Convert the manual page source from roff to DocBook, and generate both roff
+ and HTML manuals.
+
+ Bug fixes:
+ - Fix a crash due to incorrect bootstrap ordering. This only impacted
+ --enable-debug --enable-dss configurations.
+ - Fix a minor statistics bug for mallctl("swap.avail", ...).
+
+* 2.0.1 (October 29, 2010)
+
+ Bug fixes:
+ - Fix a race condition in heap profiling that could cause undefined behavior
+ if "opt.prof_accum" were disabled.
+ - Add missing mutex unlocks for some OOM error paths in the heap profiling
+ code.
+ - Fix a compilation error for non-C99 builds.
+
+* 2.0.0 (October 24, 2010)
+
+ This version focuses on the experimental *allocm() API, and on improved
+ run-time configuration/introspection. Nonetheless, numerous performance
+ improvements are also included.
+
+ New features:
+ - Implement the experimental {,r,s,d}allocm() API, which provides a superset
+ of the functionality available via malloc(), calloc(), posix_memalign(),
+ realloc(), malloc_usable_size(), and free(). These functions can be used to
+ allocate/reallocate aligned zeroed memory, ask for optional extra memory
+ during reallocation, prevent object movement during reallocation, etc.
+ - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
+ more human-readable, and more flexible. For example:
+ JEMALLOC_OPTIONS=AJP
+ is now:
+ MALLOC_CONF=abort:true,fill:true,stats_print:true
+ - Port to Apple OS X. Sponsored by Mozilla.
+ - Make it possible for the application to control thread-->arena mappings via
+ the "thread.arena" mallctl.
+ - Add compile-time support for all TLS-related functionality via pthreads TSD.
+ This is mainly of interest for OS X, which does not support TLS, but has a
+ TSD implementation with similar performance.
+ - Override memalign() and valloc() if they are provided by the system.
+ - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
+ dirty unused pages.
+ - Make cumulative heap profiling data optional, so that it is possible to
+ limit the amount of memory consumed by heap profiling data structures.
+ - Add per thread allocation counters that can be accessed via the
+ "thread.allocated" and "thread.deallocated" mallctls.
+
+ Incompatible changes:
+ - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
+ - Increase default backtrace depth from 4 to 128 for heap profiling.
+ - Disable interval-based profile dumps by default.
+
+ Bug fixes:
+ - Remove bad assertions in fork handler functions. These assertions could
+ cause aborts for some combinations of configure settings.
+ - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
+ - Fix leak context reporting. This bug tended to cause the number of contexts
+ to be underreported (though the reported number of objects and bytes were
+ correct).
+ - Fix a realloc() bug for large in-place growing reallocation. This bug could
+ cause memory corruption, but it was hard to trigger.
+ - Fix an allocation bug for small allocations that could be triggered if
+ multiple threads raced to create a new run of backing pages.
+ - Enhance the heap profiler to trigger samples based on usable size, rather
+ than request size.
+ - Fix a heap profiling bug due to sometimes losing track of requested object
+ size for sampled objects.
+
+* 1.0.3 (August 12, 2010)
+
+ Bug fixes:
+ - Fix the libunwind-based implementation of stack backtracing (used for heap
+ profiling). This bug could cause zero-length backtraces to be reported.
+ - Add a missing mutex unlock in library initialization code. If multiple
+ threads raced to initialize malloc, some of them could end up permanently
+ blocked.
+
+* 1.0.2 (May 11, 2010)
+
+ Bug fixes:
+ - Fix junk filling of large objects, which could cause memory corruption.
+ - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
+ memory limits could cause swap file configuration to fail. Contributed by
+ Jordan DeLong.
+
+* 1.0.1 (April 14, 2010)
+
+ Bug fixes:
+ - Fix compilation when --enable-fill is specified.
+ - Fix threads-related profiling bugs that affected accuracy and caused memory
+ to be leaked during thread exit.
+ - Fix dirty page purging race conditions that could cause crashes.
+ - Fix crash in tcache flushing code during thread destruction.
+
+* 1.0.0 (April 11, 2010)
+
+ This release focuses on speed and run-time introspection. Numerous
+ algorithmic improvements make this release substantially faster than its
+ predecessors.
+
+ New features:
+ - Implement autoconf-based configuration system.
+ - Add mallctl*(), for the purposes of introspection and run-time
+ configuration.
+ - Make it possible for the application to manually flush a thread's cache, via
+ the "tcache.flush" mallctl.
+ - Base maximum dirty page count on proportion of active memory.
+ - Compute various additional run-time statistics, including per size class
+ statistics for large objects.
+ - Expose malloc_stats_print(), which can be called repeatedly by the
+ application.
+ - Simplify the malloc_message() signature to only take one string argument,
+ and incorporate an opaque data pointer argument for use by the application
+ in combination with malloc_stats_print().
+ - Add support for allocation backed by one or more swap files, and allow the
+ application to disable over-commit if swap files are in use.
+ - Implement allocation profiling and leak checking.
+
+ Removed features:
+ - Remove the dynamic arena rebalancing code, since thread-specific caching
+ reduces its utility.
+
+ Bug fixes:
+ - Modify chunk allocation to work when address space layout randomization
+ (ASLR) is in use.
+ - Fix thread cleanup bugs related to TLS destruction.
+ - Handle 0-size allocation requests in posix_memalign().
+ - Fix a chunk leak. The leaked chunks were never touched, so this impacted
+ virtual memory usage, but not physical memory usage.
+
+* linux_2008082[78]a (August 27/28, 2008)
+
+ These snapshot releases are the simple result of incorporating Linux-specific
+ support into the FreeBSD malloc sources.
+
+--------------------------------------------------------------------------------
+vim:filetype=text:textwidth=80
diff --git a/memory/jemalloc/src/INSTALL b/memory/jemalloc/src/INSTALL
new file mode 100644
index 000000000..687871653
--- /dev/null
+++ b/memory/jemalloc/src/INSTALL
@@ -0,0 +1,414 @@
+Building and installing a packaged release of jemalloc can be as simple as
+typing the following while in the root directory of the source tree:
+
+ ./configure
+ make
+ make install
+
+If building from unpackaged developer sources, the simplest command sequence
+that might work is:
+
+ ./autogen.sh
+ make dist
+ make
+ make install
+
+Note that documentation is not built by the default target because doing so
+would create a dependency on xsltproc in packaged releases, hence the
+requirement to either run 'make dist' or avoid installing docs via the various
+install_* targets documented below.
+
+=== Advanced configuration =====================================================
+
+The 'configure' script supports numerous options that allow control of which
+functionality is enabled, where jemalloc is installed, etc. Optionally, pass
+any of the following arguments (not a definitive list) to 'configure':
+
+--help
+ Print a definitive list of options.
+
+--prefix=<install-root-dir>
+ Set the base directory in which to install. For example:
+
+ ./configure --prefix=/usr/local
+
+ will cause files to be installed into /usr/local/include, /usr/local/lib,
+ and /usr/local/man.
+
+--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
+ Use the specified version string rather than trying to generate one (if in
+ a git repository) or use existing the VERSION file (if present).
+
+--with-rpath=<colon-separated-rpath>
+ Embed one or more library paths, so that libjemalloc can find the libraries
+ it is linked to. This works only on ELF-based systems.
+
+--with-mangling=<map>
+ Mangle public symbols specified in <map> which is a comma-separated list of
+ name:mangled pairs.
+
+ For example, to use ld's --wrap option as an alternative method for
+ overriding libc's malloc implementation, specify something like:
+
+ --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
+
+ Note that mangling happens prior to application of the prefix specified by
+ --with-jemalloc-prefix, and mangled symbols are then ignored when applying
+ the prefix.
+
+--with-jemalloc-prefix=<prefix>
+ Prefix all public APIs with <prefix>. For example, if <prefix> is
+ "prefix_", API changes like the following occur:
+
+ malloc() --> prefix_malloc()
+ malloc_conf --> prefix_malloc_conf
+ /etc/malloc.conf --> /etc/prefix_malloc.conf
+ MALLOC_CONF --> PREFIX_MALLOC_CONF
+
+ This makes it possible to use jemalloc at the same time as the system
+ allocator, or even to use multiple copies of jemalloc simultaneously.
+
+ By default, the prefix is "", except on OS X, where it is "je_". On OS X,
+ jemalloc overlays the default malloc zone, but makes no attempt to actually
+ replace the "malloc", "calloc", etc. symbols.
+
+--without-export
+ Don't export public APIs. This can be useful when building jemalloc as a
+ static library, or to avoid exporting public APIs when using the zone
+ allocator on OSX.
+
+--with-private-namespace=<prefix>
+ Prefix all library-private APIs with <prefix>je_. For shared libraries,
+ symbol visibility mechanisms prevent these symbols from being exported, but
+ for static libraries, naming collisions are a real possibility. By
+ default, <prefix> is empty, which results in a symbol prefix of je_ .
+
+--with-install-suffix=<suffix>
+ Append <suffix> to the base name of all installed files, such that multiple
+ versions of jemalloc can coexist in the same installation directory. For
+ example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
+
+--with-malloc-conf=<malloc_conf>
+ Embed <malloc_conf> as a run-time options string that is processed prior to
+ the malloc_conf global variable, the /etc/malloc.conf symlink, and the
+ MALLOC_CONF environment variable. For example, to change the default chunk
+ size to 256 KiB:
+
+ --with-malloc-conf=lg_chunk:18
+
+--disable-cc-silence
+ Disable code that silences non-useful compiler warnings. This is mainly
+ useful during development when auditing the set of warnings that are being
+ silenced.
+
+--enable-debug
+ Enable assertions and validation code. This incurs a substantial
+ performance hit, but is very useful during application development.
+ Implies --enable-ivsalloc.
+
+--enable-code-coverage
+ Enable code coverage support, for use during jemalloc test development.
+ Additional testing targets are available if this option is enabled:
+
+ coverage
+ coverage_unit
+ coverage_integration
+ coverage_stress
+
+ These targets do not clear code coverage results from previous runs, and
+ there are interactions between the various coverage targets, so it is
+ usually advisable to run 'make clean' between repeated code coverage runs.
+
+--disable-stats
+ Disable statistics gathering functionality. See the "opt.stats_print"
+ option documentation for usage details.
+
+--enable-ivsalloc
+ Enable validation code, which verifies that pointers reside within
+ jemalloc-owned chunks before dereferencing them. This incurs a minor
+ performance hit.
+
+--enable-prof
+ Enable heap profiling and leak detection functionality. See the "opt.prof"
+ option documentation for usage details. When enabled, there are several
+ approaches to backtracing, and the configure script chooses the first one
+ in the following list that appears to function correctly:
+
+ + libunwind (requires --enable-prof-libunwind)
+ + libgcc (unless --disable-prof-libgcc)
+ + gcc intrinsics (unless --disable-prof-gcc)
+
+--enable-prof-libunwind
+ Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
+ backtracing.
+
+--disable-prof-libgcc
+ Disable the use of libgcc's backtracing functionality.
+
+--disable-prof-gcc
+ Disable the use of gcc intrinsics for backtracing.
+
+--with-static-libunwind=<libunwind.a>
+ Statically link against the specified libunwind.a rather than dynamically
+ linking with -lunwind.
+
+--disable-tcache
+ Disable thread-specific caches for small objects. Objects are cached and
+ released in bulk, thus reducing the total number of mutex operations. See
+ the "opt.tcache" option for usage details.
+
+--disable-munmap
+ Disable virtual memory deallocation via munmap(2); instead keep track of
+ the virtual memory for later use. munmap() is disabled by default (i.e.
+ --disable-munmap is implied) on Linux, which has a quirk in its virtual
+ memory allocation algorithm that causes semi-permanent VM map holes under
+ normal jemalloc operation.
+
+--disable-fill
+ Disable support for junk/zero filling of memory, quarantine, and redzones.
+ See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
+ documentation for usage details.
+
+--disable-valgrind
+ Disable support for Valgrind.
+
+--disable-zone-allocator
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ the default allocator on OSX/iOS.
+
+--enable-utrace
+ Enable utrace(2)-based allocation tracing. This feature is not broadly
+ portable (FreeBSD has it, but Linux and OS X do not).
+
+--enable-xmalloc
+ Enable support for optional immediate termination due to out-of-memory
+ errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
+ See the "opt.xmalloc" option documentation for usage details.
+
+--enable-lazy-lock
+ Enable code that wraps pthread_create() to detect when an application
+ switches from single-threaded to multi-threaded mode, so that it can avoid
+ mutex locking/unlocking operations while in single-threaded mode. In
+ practice, this feature usually has little impact on performance unless
+ thread-specific caching is disabled.
+
+--disable-tls
+ Disable thread-local storage (TLS), which allows for fast access to
+ thread-local variables via the __thread keyword. If TLS is available,
+ jemalloc uses it for several purposes.
+
+--disable-cache-oblivious
+ Disable cache-oblivious large allocation alignment for large allocation
+ requests with no alignment constraints. If this feature is disabled, all
+ large allocations are page-aligned as an implementation artifact, which can
+ severely harm CPU cache utilization. However, the cache-oblivious layout
+ comes at the cost of one extra page per large allocation, which in the
+ most extreme case increases physical memory usage for the 16 KiB size class
+ to 20 KiB.
+
+--with-xslroot=<path>
+ Specify where to find DocBook XSL stylesheets when building the
+ documentation.
+
+--with-lg-page=<lg-page>
+ Specify the base 2 log of the system page size. This option is only useful
+ when cross compiling, since the configure script automatically determines
+ the host's page size by default.
+
+--with-lg-page-sizes=<lg-page-sizes>
+ Specify the comma-separated base 2 logs of the page sizes to support. This
+ option may be useful when cross-compiling in combination with
+ --with-lg-page, but its primary use case is for integration with FreeBSD's
+ libc, wherein jemalloc is embedded.
+
+--with-lg-size-class-group=<lg-size-class-group>
+ Specify the base 2 log of how many size classes to use for each doubling in
+ size. By default jemalloc uses <lg-size-class-group>=2, which results in
+ e.g. the following size classes:
+
+ [...], 64,
+ 80, 96, 112, 128,
+ 160, [...]
+
+ <lg-size-class-group>=3 results in e.g. the following size classes:
+
+ [...], 64,
+ 72, 80, 88, 96, 104, 112, 120, 128,
+ 144, [...]
+
+ The minimal <lg-size-class-group>=0 causes jemalloc to only provide size
+ classes that are powers of 2:
+
+ [...],
+ 64,
+ 128,
+ 256,
+ [...]
+
+ An implementation detail currently limits the total number of small size
+ classes to 255, and a compilation error will result if the
+ <lg-size-class-group> you specify cannot be supported. The limit is
+ roughly <lg-size-class-group>=4, depending on page size.
+
+--with-lg-quantum=<lg-quantum>
+ Specify the base 2 log of the minimum allocation alignment. jemalloc needs
+ to know the minimum alignment that meets the following C standard
+ requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+ The pointer returned if the allocation succeeds is suitably aligned so
+ that it may be assigned to a pointer to any type of object with a
+ fundamental alignment requirement and then used to access such an object
+ or an array of such objects in the space allocated [...]
+
+ This setting is architecture-specific, and although jemalloc includes known
+ safe values for the most commonly used modern architectures, there is a
+ wrinkle related to GNU libc (glibc) that may impact your choice of
+ <lg-quantum>. On most modern architectures, this mandates 16-byte alignment
+ (<lg-quantum>=4), but the glibc developers chose not to meet this
+ requirement for performance reasons. An old discussion can be found at
+ https://sourceware.org/bugzilla/show_bug.cgi?id=206 . Unlike glibc,
+ jemalloc does follow the C standard by default (caveat: jemalloc
+ technically cheats if --with-lg-tiny-min is smaller than
+ --with-lg-quantum), but the fact that Linux systems already work around
+ this allocator noncompliance means that it is generally safe in practice to
+ let jemalloc's minimum alignment follow glibc's lead. If you specify
+ --with-lg-quantum=3 during configuration, jemalloc will provide additional
+ size classes that are not 16-byte-aligned (24, 40, and 56, assuming
+ --with-lg-size-class-group=2).
+
+--with-lg-tiny-min=<lg-tiny-min>
+ Specify the base 2 log of the minimum tiny size class to support. Tiny
+ size classes are powers of 2 less than the quantum, and are only
+ incorporated if <lg-tiny-min> is less than <lg-quantum> (see
+ --with-lg-quantum). Tiny size classes technically violate the C standard
+ requirement for minimum alignment, and crashes could conceivably result if
+ the compiler were to generate instructions that made alignment assumptions,
+ both because illegal instruction traps could result, and because accesses
+ could straddle page boundaries and cause segmentation faults due to
+ accessing unmapped addresses.
+
+ The default of <lg-tiny-min>=3 works well in practice even on architectures
+ that technically require 16-byte alignment, probably for the same reason
+ --with-lg-quantum=3 works. Smaller tiny size classes can, and will, cause
+ crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an
+ example).
+
+ This option is rarely useful, and is mainly provided as documentation of a
+ subtle implementation detail. If you do use this option, specify a
+ value in [3, ..., <lg-quantum>].
+
+The following environment variables (not a definitive list) impact configure's
+behavior:
+
+CFLAGS="?"
+ Pass these flags to the compiler. You probably shouldn't define this unless
+ you know what you are doing. (Use EXTRA_CFLAGS instead.)
+
+EXTRA_CFLAGS="?"
+ Append these flags to CFLAGS. This makes it possible to add flags such as
+ -Werror, while allowing the configure script to determine what other flags
+ are appropriate for the specified configuration.
+
+ The configure script specifically checks whether an optimization flag (-O*)
+ is specified in EXTRA_CFLAGS, and refrains from specifying an optimization
+ level if it finds that one has already been specified.
+
+CPPFLAGS="?"
+ Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
+ 'cpp' when 'configure' is looking for include files, so you must use
+ CPPFLAGS instead if you need to help 'configure' find header files.
+
+LD_LIBRARY_PATH="?"
+ 'ld' uses this colon-separated list to find libraries.
+
+LDFLAGS="?"
+ Pass these flags when linking.
+
+PATH="?"
+ 'configure' uses this to find programs.
+
+=== Advanced compilation =======================================================
+
+To build only parts of jemalloc, use the following targets:
+
+ build_lib_shared
+ build_lib_static
+ build_lib
+ build_doc_html
+ build_doc_man
+ build_doc
+
+To install only parts of jemalloc, use the following targets:
+
+ install_bin
+ install_include
+ install_lib_shared
+ install_lib_static
+ install_lib
+ install_doc_html
+ install_doc_man
+ install_doc
+
+To clean up build results to varying degrees, use the following make targets:
+
+ clean
+ distclean
+ relclean
+
+=== Advanced installation ======================================================
+
+Optionally, define make variables when invoking make, including (not
+exclusively):
+
+INCLUDEDIR="?"
+ Use this as the installation prefix for header files.
+
+LIBDIR="?"
+ Use this as the installation prefix for libraries.
+
+MANDIR="?"
+ Use this as the installation prefix for man pages.
+
+DESTDIR="?"
+ Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
+ when installing to a different path than was specified via --prefix.
+
+CC="?"
+ Use this to invoke the C compiler.
+
+CFLAGS="?"
+ Pass these flags to the compiler.
+
+CPPFLAGS="?"
+ Pass these flags to the C preprocessor.
+
+LDFLAGS="?"
+ Pass these flags when linking.
+
+PATH="?"
+ Use this to search for programs used during configuration and building.
+
+=== Development ================================================================
+
+If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
+script rather than 'configure'. This re-generates 'configure', enables
+configuration dependency rules, and enables re-generation of automatically
+generated source files.
+
+The build system supports using an object directory separate from the source
+tree. For example, you can create an 'obj' directory, and from within that
+directory, issue configuration and build commands:
+
+ autoconf
+ mkdir obj
+ cd obj
+ ../configure --enable-autogen
+ make
+
+=== Documentation ==============================================================
+
+The manual page is generated in both html and roff formats. Any web browser
+can be used to view the html manual. The roff manual page can be formatted
+prior to installation via the following command:
+
+ nroff -man -t doc/jemalloc.3
diff --git a/memory/jemalloc/src/Makefile.in b/memory/jemalloc/src/Makefile.in
new file mode 100644
index 000000000..d13c7f108
--- /dev/null
+++ b/memory/jemalloc/src/Makefile.in
@@ -0,0 +1,506 @@
+# Clear out all vpaths, then set just one (default vpath) for the main build
+# directory.
+vpath
+vpath % .
+
+# Clear the default suffixes, so that built-in rules are not used.
+.SUFFIXES :
+
+SHELL := /bin/sh
+
+CC := @CC@
+
+# Configuration parameters.
+DESTDIR =
+BINDIR := $(DESTDIR)@BINDIR@
+INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
+LIBDIR := $(DESTDIR)@LIBDIR@
+DATADIR := $(DESTDIR)@DATADIR@
+MANDIR := $(DESTDIR)@MANDIR@
+srcroot := @srcroot@
+objroot := @objroot@
+abs_srcroot := @abs_srcroot@
+abs_objroot := @abs_objroot@
+
+# Build parameters.
+CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
+EXTRA_CFLAGS := @EXTRA_CFLAGS@
+CFLAGS := @CFLAGS@ $(EXTRA_CFLAGS)
+LDFLAGS := @LDFLAGS@
+EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
+LIBS := @LIBS@
+RPATH_EXTRA := @RPATH_EXTRA@
+SO := @so@
+IMPORTLIB := @importlib@
+O := @o@
+A := @a@
+EXE := @exe@
+LIBPREFIX := @libprefix@
+REV := @rev@
+install_suffix := @install_suffix@
+ABI := @abi@
+XSLTPROC := @XSLTPROC@
+AUTOCONF := @AUTOCONF@
+_RPATH = @RPATH@
+RPATH = $(if $(1),$(call _RPATH,$(1)))
+cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
+cfghdrs_out := @cfghdrs_out@
+cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
+cfgoutputs_out := @cfgoutputs_out@
+enable_autogen := @enable_autogen@
+enable_code_coverage := @enable_code_coverage@
+enable_prof := @enable_prof@
+enable_valgrind := @enable_valgrind@
+enable_zone_allocator := @enable_zone_allocator@
+MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
+link_whole_archive := @link_whole_archive@
+DSO_LDFLAGS = @DSO_LDFLAGS@
+SOREV = @SOREV@
+PIC_CFLAGS = @PIC_CFLAGS@
+CTARGET = @CTARGET@
+LDTARGET = @LDTARGET@
+TEST_LD_MODE = @TEST_LD_MODE@
+MKLIB = @MKLIB@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+CC_MM = @CC_MM@
+LM := @LM@
+INSTALL = @INSTALL@
+
+ifeq (macho, $(ABI))
+TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
+else
+ifeq (pecoff, $(ABI))
+TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
+else
+TEST_LIBRARY_PATH :=
+endif
+endif
+
+LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
+
+# Lists of files.
+BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
+C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
+C_SRCS := $(srcroot)src/jemalloc.c \
+ $(srcroot)src/arena.c \
+ $(srcroot)src/atomic.c \
+ $(srcroot)src/base.c \
+ $(srcroot)src/bitmap.c \
+ $(srcroot)src/chunk.c \
+ $(srcroot)src/chunk_dss.c \
+ $(srcroot)src/chunk_mmap.c \
+ $(srcroot)src/ckh.c \
+ $(srcroot)src/ctl.c \
+ $(srcroot)src/extent.c \
+ $(srcroot)src/hash.c \
+ $(srcroot)src/huge.c \
+ $(srcroot)src/mb.c \
+ $(srcroot)src/mutex.c \
+ $(srcroot)src/nstime.c \
+ $(srcroot)src/pages.c \
+ $(srcroot)src/prng.c \
+ $(srcroot)src/prof.c \
+ $(srcroot)src/quarantine.c \
+ $(srcroot)src/rtree.c \
+ $(srcroot)src/stats.c \
+ $(srcroot)src/spin.c \
+ $(srcroot)src/tcache.c \
+ $(srcroot)src/ticker.c \
+ $(srcroot)src/tsd.c \
+ $(srcroot)src/util.c \
+ $(srcroot)src/witness.c
+ifeq ($(enable_valgrind), 1)
+C_SRCS += $(srcroot)src/valgrind.c
+endif
+ifeq ($(enable_zone_allocator), 1)
+C_SRCS += $(srcroot)src/zone.c
+endif
+ifeq ($(IMPORTLIB),$(SO))
+STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
+endif
+ifdef PIC_CFLAGS
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
+else
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
+endif
+DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
+ifneq ($(SOREV),$(SO))
+DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
+endif
+ifeq (1, $(link_whole_archive))
+LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
+else
+LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+endif
+PC := $(objroot)jemalloc.pc
+MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
+DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
+DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
+DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
+DOCS := $(DOCS_HTML) $(DOCS_MAN3)
+C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
+ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
+ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
+ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
+ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
+ifeq (1, $(link_whole_archive))
+C_UTIL_INTEGRATION_SRCS :=
+else
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
+endif
+TESTS_UNIT := \
+ $(srcroot)test/unit/a0.c \
+ $(srcroot)test/unit/arena_reset.c \
+ $(srcroot)test/unit/atomic.c \
+ $(srcroot)test/unit/bitmap.c \
+ $(srcroot)test/unit/ckh.c \
+ $(srcroot)test/unit/decay.c \
+ $(srcroot)test/unit/fork.c \
+ $(srcroot)test/unit/hash.c \
+ $(srcroot)test/unit/junk.c \
+ $(srcroot)test/unit/junk_alloc.c \
+ $(srcroot)test/unit/junk_free.c \
+ $(srcroot)test/unit/lg_chunk.c \
+ $(srcroot)test/unit/mallctl.c \
+ $(srcroot)test/unit/math.c \
+ $(srcroot)test/unit/mq.c \
+ $(srcroot)test/unit/mtx.c \
+ $(srcroot)test/unit/ph.c \
+ $(srcroot)test/unit/prng.c \
+ $(srcroot)test/unit/prof_accum.c \
+ $(srcroot)test/unit/prof_active.c \
+ $(srcroot)test/unit/prof_gdump.c \
+ $(srcroot)test/unit/prof_idump.c \
+ $(srcroot)test/unit/prof_reset.c \
+ $(srcroot)test/unit/prof_thread_name.c \
+ $(srcroot)test/unit/ql.c \
+ $(srcroot)test/unit/qr.c \
+ $(srcroot)test/unit/quarantine.c \
+ $(srcroot)test/unit/rb.c \
+ $(srcroot)test/unit/rtree.c \
+ $(srcroot)test/unit/run_quantize.c \
+ $(srcroot)test/unit/SFMT.c \
+ $(srcroot)test/unit/size_classes.c \
+ $(srcroot)test/unit/smoothstep.c \
+ $(srcroot)test/unit/stats.c \
+ $(srcroot)test/unit/ticker.c \
+ $(srcroot)test/unit/nstime.c \
+ $(srcroot)test/unit/tsd.c \
+ $(srcroot)test/unit/util.c \
+ $(srcroot)test/unit/witness.c \
+ $(srcroot)test/unit/zero.c
+TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
+ $(srcroot)test/integration/allocated.c \
+ $(srcroot)test/integration/sdallocx.c \
+ $(srcroot)test/integration/mallocx.c \
+ $(srcroot)test/integration/MALLOCX_ARENA.c \
+ $(srcroot)test/integration/overflow.c \
+ $(srcroot)test/integration/posix_memalign.c \
+ $(srcroot)test/integration/rallocx.c \
+ $(srcroot)test/integration/thread_arena.c \
+ $(srcroot)test/integration/thread_tcache_enabled.c \
+ $(srcroot)test/integration/xallocx.c \
+ $(srcroot)test/integration/chunk.c
+TESTS_STRESS := $(srcroot)test/stress/microbench.c
+TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
+
+C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
+C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
+C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
+C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
+C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
+C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
+
+TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
+
+.PHONY: all dist build_doc_html build_doc_man build_doc
+.PHONY: install_bin install_include install_lib
+.PHONY: install_doc_html install_doc_man install_doc install
+.PHONY: tests check clean distclean relclean
+
+.SECONDARY : $(TESTS_OBJS)
+
+# Default target.
+all: build_lib
+
+dist: build_doc
+
+$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
+ $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
+
+$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
+ $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
+
+build_doc_html: $(DOCS_HTML)
+build_doc_man: $(DOCS_MAN3)
+build_doc: $(DOCS)
+
+#
+# Include generated dependency files.
+#
+ifdef CC_MM
+-include $(C_OBJS:%.$(O)=%.d)
+-include $(C_PIC_OBJS:%.$(O)=%.d)
+-include $(C_JET_OBJS:%.$(O)=%.d)
+-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
+-include $(TESTS_OBJS:%.$(O)=%.d)
+endif
+
+$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
+$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
+$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET
+$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
+$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
+$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
+$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
+$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+ifneq ($(IMPORTLIB),$(SO))
+$(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
+endif
+
+ifndef CC_MM
+# Dependencies.
+HEADER_DIRS = $(srcroot)include/jemalloc/internal \
+ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal
+HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
+$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS)
+$(TESTS_OBJS): $(objroot)test/include/test/jemalloc_test.h
+endif
+
+$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
+ @mkdir -p $(@D)
+ $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
+ifdef CC_MM
+ @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
+endif
+
+ifneq ($(SOREV),$(SO))
+%.$(SO) : %.$(SOREV)
+ @mkdir -p $(@D)
+ ln -sf $(<F) $@
+endif
+
+$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS))
+ @mkdir -p $(@D)
+ $(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
+
+$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS)
+$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS)
+$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS)
+
+$(STATIC_LIBS):
+ @mkdir -p $(@D)
+ $(AR) $(ARFLAGS)@AROUT@ $+
+
+$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+build_lib_shared: $(DSOS)
+build_lib_static: $(STATIC_LIBS)
+build_lib: build_lib_shared build_lib_static
+
+install_bin:
+ $(INSTALL) -d $(BINDIR)
+ @for b in $(BINS); do \
+ echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
+ $(INSTALL) -m 755 $$b $(BINDIR); \
+done
+
+install_include:
+ $(INSTALL) -d $(INCLUDEDIR)/jemalloc
+ @for h in $(C_HDRS); do \
+ echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
+ $(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
+done
+
+install_lib_shared: $(DSOS)
+ $(INSTALL) -d $(LIBDIR)
+ $(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
+ifneq ($(SOREV),$(SO))
+ ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
+endif
+
+install_lib_static: $(STATIC_LIBS)
+ $(INSTALL) -d $(LIBDIR)
+ @for l in $(STATIC_LIBS); do \
+ echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
+ $(INSTALL) -m 755 $$l $(LIBDIR); \
+done
+
+install_lib_pc: $(PC)
+ $(INSTALL) -d $(LIBDIR)/pkgconfig
+ @for l in $(PC); do \
+ echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
+ $(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
+done
+
+install_lib: install_lib_shared install_lib_static install_lib_pc
+
+install_doc_html:
+ $(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
+ @for d in $(DOCS_HTML); do \
+ echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
+ $(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
+done
+
+install_doc_man:
+ $(INSTALL) -d $(MANDIR)/man3
+ @for d in $(DOCS_MAN3); do \
+ echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
+ $(INSTALL) -m 644 $$d $(MANDIR)/man3; \
+done
+
+install_doc: install_doc_html install_doc_man
+
+install: install_bin install_include install_lib install_doc
+
+tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
+tests: tests_unit tests_integration tests_stress
+
+check_unit_dir:
+ @mkdir -p $(objroot)test/unit
+check_integration_dir:
+ @mkdir -p $(objroot)test/integration
+stress_dir:
+ @mkdir -p $(objroot)test/stress
+check_dir: check_unit_dir check_integration_dir
+
+check_unit: tests_unit check_unit_dir
+ $(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+ $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration_prof: tests_integration check_integration_dir
+ifeq ($(enable_prof), 1)
+ $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+ $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+endif
+check_integration_decay: tests_integration check_integration_dir
+ $(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+ $(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+ $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+check_integration: tests_integration check_integration_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+stress: tests_stress stress_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
+check: check_unit check_integration check_integration_decay check_integration_prof
+
+ifeq ($(enable_code_coverage), 1)
+coverage_unit: check_unit
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS)
+
+coverage_integration: check_integration
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
+
+coverage_stress: stress
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress stress $(TESTS_STRESS_OBJS)
+
+coverage: check
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress integration $(TESTS_STRESS_OBJS)
+endif
+
+clean:
+ rm -f $(C_OBJS)
+ rm -f $(C_PIC_OBJS)
+ rm -f $(C_JET_OBJS)
+ rm -f $(C_TESTLIB_OBJS)
+ rm -f $(C_OBJS:%.$(O)=%.d)
+ rm -f $(C_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_JET_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_JET_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcno)
+ rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_OBJS)
+ rm -f $(TESTS_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%.gcda)
+ rm -f $(TESTS_OBJS:%.$(O)=%.gcno)
+ rm -f $(TESTS_OBJS:%.$(O)=%.out)
+ rm -f $(DSOS) $(STATIC_LIBS)
+ rm -f $(objroot)*.gcov.*
+
+distclean: clean
+ rm -f $(objroot)bin/jemalloc-config
+ rm -f $(objroot)bin/jemalloc.sh
+ rm -f $(objroot)bin/jeprof
+ rm -f $(objroot)config.log
+ rm -f $(objroot)config.status
+ rm -f $(objroot)config.stamp
+ rm -f $(cfghdrs_out)
+ rm -f $(cfgoutputs_out)
+
+relclean: distclean
+ rm -f $(objroot)configure
+ rm -f $(objroot)VERSION
+ rm -f $(DOCS_HTML)
+ rm -f $(DOCS_MAN3)
+
+#===============================================================================
+# Re-configuration rules.
+
+ifeq ($(enable_autogen), 1)
+$(srcroot)configure : $(srcroot)configure.ac
+ cd ./$(srcroot) && $(AUTOCONF)
+
+$(objroot)config.status : $(srcroot)configure
+ ./$(objroot)config.status --recheck
+
+$(srcroot)config.stamp.in : $(srcroot)configure.ac
+ echo stamp > $(srcroot)config.stamp.in
+
+$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
+ ./$(objroot)config.status
+ @touch $@
+
+# There must be some action in order for make to re-read Makefile when it is
+# out of date.
+$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
+ @true
+endif
diff --git a/memory/jemalloc/src/README b/memory/jemalloc/src/README
new file mode 100644
index 000000000..5ff24a9ef
--- /dev/null
+++ b/memory/jemalloc/src/README
@@ -0,0 +1,20 @@
+jemalloc is a general purpose malloc(3) implementation that emphasizes
+fragmentation avoidance and scalable concurrency support. jemalloc first came
+into use as the FreeBSD libc allocator in 2005, and since then it has found its
+way into numerous applications that rely on its predictable behavior. In 2010
+jemalloc development efforts broadened to include developer support features
+such as heap profiling, Valgrind integration, and extensive monitoring/tuning
+hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
+and therefore versatility remains critical. Ongoing development efforts trend
+toward making jemalloc among the best allocators for a broad range of demanding
+applications, and eliminating/mitigating weaknesses that have practical
+repercussions for real world applications.
+
+The COPYING file contains copyright and licensing information.
+
+The INSTALL file contains information on how to configure, build, and install
+jemalloc.
+
+The ChangeLog file contains a brief summary of changes for each release.
+
+URL: http://jemalloc.net/
diff --git a/memory/jemalloc/src/VERSION b/memory/jemalloc/src/VERSION
new file mode 100644
index 000000000..c108ecb84
--- /dev/null
+++ b/memory/jemalloc/src/VERSION
@@ -0,0 +1 @@
+4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
diff --git a/memory/jemalloc/src/autogen.sh b/memory/jemalloc/src/autogen.sh
new file mode 100755
index 000000000..75f32da68
--- /dev/null
+++ b/memory/jemalloc/src/autogen.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+for i in autoconf; do
+ echo "$i"
+ $i
+ if [ $? -ne 0 ]; then
+ echo "Error $? in $i"
+ exit 1
+ fi
+done
+
+echo "./configure --enable-autogen $@"
+./configure --enable-autogen $@
+if [ $? -ne 0 ]; then
+ echo "Error $? in ./configure"
+ exit 1
+fi
diff --git a/memory/jemalloc/src/bin/jemalloc-config.in b/memory/jemalloc/src/bin/jemalloc-config.in
new file mode 100644
index 000000000..b016c8d33
--- /dev/null
+++ b/memory/jemalloc/src/bin/jemalloc-config.in
@@ -0,0 +1,79 @@
+#!/bin/sh
+
+usage() {
+ cat <<EOF
+Usage:
+ @BINDIR@/jemalloc-config <option>
+Options:
+ --help | -h : Print usage.
+ --version : Print jemalloc version.
+ --revision : Print shared library revision number.
+ --config : Print configure options used to build jemalloc.
+ --prefix : Print installation directory prefix.
+ --bindir : Print binary installation directory.
+ --datadir : Print data installation directory.
+ --includedir : Print include installation directory.
+ --libdir : Print library installation directory.
+ --mandir : Print manual page installation directory.
+ --cc : Print compiler used to build jemalloc.
+ --cflags : Print compiler flags used to build jemalloc.
+ --cppflags : Print preprocessor flags used to build jemalloc.
+ --ldflags : Print library flags used to build jemalloc.
+ --libs : Print libraries jemalloc was linked against.
+EOF
+}
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+
+case "$1" in
+--help | -h)
+ usage
+ exit 0
+ ;;
+--version)
+ echo "@jemalloc_version@"
+ ;;
+--revision)
+ echo "@rev@"
+ ;;
+--config)
+ echo "@CONFIG@"
+ ;;
+--prefix)
+ echo "@PREFIX@"
+ ;;
+--bindir)
+ echo "@BINDIR@"
+ ;;
+--datadir)
+ echo "@DATADIR@"
+ ;;
+--includedir)
+ echo "@INCLUDEDIR@"
+ ;;
+--libdir)
+ echo "@LIBDIR@"
+ ;;
+--mandir)
+ echo "@MANDIR@"
+ ;;
+--cc)
+ echo "@CC@"
+ ;;
+--cflags)
+ echo "@CFLAGS@"
+ ;;
+--cppflags)
+ echo "@CPPFLAGS@"
+ ;;
+--ldflags)
+ echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
+ ;;
+--libs)
+ echo "@LIBS@"
+ ;;
+*)
+ usage
+ exit 1
+esac
diff --git a/memory/jemalloc/src/bin/jemalloc.sh.in b/memory/jemalloc/src/bin/jemalloc.sh.in
new file mode 100644
index 000000000..cdf367375
--- /dev/null
+++ b/memory/jemalloc/src/bin/jemalloc.sh.in
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+
+@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
+export @LD_PRELOAD_VAR@
+exec "$@"
diff --git a/memory/jemalloc/src/bin/jeprof.in b/memory/jemalloc/src/bin/jeprof.in
new file mode 100644
index 000000000..42087fcec
--- /dev/null
+++ b/memory/jemalloc/src/bin/jeprof.in
@@ -0,0 +1,5611 @@
+#! /usr/bin/env perl
+
+# Copyright (c) 1998-2007, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ---
+# Program for printing the profile generated by common/profiler.cc,
+# or by the heap profiler (common/debugallocation.cc)
+#
+# The profile contains a sequence of entries of the form:
+# <count> <stack trace>
+# This program parses the profile, and generates user-readable
+# output.
+#
+# Examples:
+#
+# % tools/jeprof "program" "profile"
+# Enters "interactive" mode
+#
+# % tools/jeprof --text "program" "profile"
+# Generates one line per procedure
+#
+# % tools/jeprof --gv "program" "profile"
+# Generates annotated call-graph and displays via "gv"
+#
+# % tools/jeprof --gv --focus=Mutex "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+#
+# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+# and does not match "string"
+#
+# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --list=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each line.
+#
+# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --disasm=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each PC value.
+#
+# TODO: Use color to indicate files?
+
+use strict;
+use warnings;
+use Getopt::Long;
+
+my $JEPROF_VERSION = "@jemalloc_version@";
+my $PPROF_VERSION = "2.0";
+
+# These are the object tools we use which can come from a
+# user-specified location using --tools, from the JEPROF_TOOLS
+# environment variable, or from the environment.
+my %obj_tool_map = (
+ "objdump" => "objdump",
+ "nm" => "nm",
+ "addr2line" => "addr2line",
+ "c++filt" => "c++filt",
+ ## ConfigureObjTools may add architecture-specific entries:
+ #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables
+ #"addr2line_pdb" => "addr2line-pdb", # ditto
+ #"otool" => "otool", # equivalent of objdump on OS X
+);
+# NOTE: these are lists, so you can put in commandline flags if you want.
+my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local
+my @GV = ("gv");
+my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread
+my @KCACHEGRIND = ("kcachegrind");
+my @PS2PDF = ("ps2pdf");
+# These are used for dynamic profiles
+my @URL_FETCHER = ("curl", "-s", "--fail");
+
+# These are the web pages that servers need to support for dynamic profiles
+my $HEAP_PAGE = "/pprof/heap";
+my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#"
+my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
+ # ?seconds=#&event=x&period=n
+my $GROWTH_PAGE = "/pprof/growth";
+my $CONTENTION_PAGE = "/pprof/contention";
+my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter
+my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
+my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
+ # "?seconds=#",
+ # "?tags_regexp=#" and
+ # "?type=#".
+my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST
+my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
+
+# These are the web pages that can be named on the command line.
+# All the alternatives must begin with /.
+my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
+ "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
+ "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
+
+# default binary name
+my $UNKNOWN_BINARY = "(unknown)";
+
+# There is a pervasive dependency on the length (in hex characters,
+# i.e., nibbles) of an address, distinguishing between 32-bit and
+# 64-bit profiles. To err on the safe size, default to 64-bit here:
+my $address_length = 16;
+
+my $dev_null = "/dev/null";
+if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for
+ $dev_null = "nul";
+}
+
+# A list of paths to search for shared object files
+my @prefix_list = ();
+
+# Special routine name that should not have any symbols.
+# Used as separator to parse "addr2line -i" output.
+my $sep_symbol = '_fini';
+my $sep_address = undef;
+
+##### Argument parsing #####
+
+sub usage_string {
+ return <<EOF;
+Usage:
+jeprof [options] <program> <profiles>
+ <profiles> is a space separated list of profile names.
+jeprof [options] <symbolized-profiles>
+ <symbolized-profiles> is a list of profile files where each file contains
+ the necessary symbol mappings as well as profile data (likely generated
+ with --raw).
+jeprof [options] <profile>
+ <profile> is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE
+
+ Each name can be:
+ /path/to/profile - a path to a profile file
+ host:port[/<service>] - a location of a service to get profile from
+
+ The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
+ $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
+ $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
+ For instance:
+ jeprof http://myserver.com:80$HEAP_PAGE
+ If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
+jeprof --symbols <program>
+ Maps addresses to symbol names. In this mode, stdin should be a
+ list of library mappings, in the same format as is found in the heap-
+ and cpu-profile files (this loosely matches that of /proc/self/maps
+ on linux), followed by a list of hex addresses to map, one per line.
+
+ For more help with querying remote servers, including how to add the
+ necessary server-side support code, see this filename (or one like it):
+
+ /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
+
+Options:
+ --cum Sort by cumulative data
+ --base=<base> Subtract <base> from <profile> before display
+ --interactive Run in interactive mode (interactive "help" gives help) [default]
+ --seconds=<n> Length of time for dynamic profiles [default=30 secs]
+ --add_lib=<file> Read additional symbols and line info from the given library
+ --lib_prefix=<dir> Comma separated list of library path prefixes
+
+Reporting Granularity:
+ --addresses Report at address level
+ --lines Report at source line level
+ --functions Report at function level [default]
+ --files Report at source file level
+
+Output type:
+ --text Generate text report
+ --callgrind Generate callgrind format to stdout
+ --gv Generate Postscript and display
+ --evince Generate PDF and display
+ --web Generate SVG and display
+ --list=<regexp> Generate source listing of matching routines
+ --disasm=<regexp> Generate disassembly of matching routines
+ --symbols Print demangled symbol names found at given addresses
+ --dot Generate DOT file to stdout
+ --ps Generate Postcript to stdout
+ --pdf Generate PDF to stdout
+ --svg Generate SVG to stdout
+ --gif Generate GIF to stdout
+ --raw Generate symbolized jeprof data (useful with remote fetch)
+
+Heap-Profile Options:
+ --inuse_space Display in-use (mega)bytes [default]
+ --inuse_objects Display in-use objects
+ --alloc_space Display allocated (mega)bytes
+ --alloc_objects Display allocated objects
+ --show_bytes Display space in bytes
+ --drop_negative Ignore negative differences
+
+Contention-profile options:
+ --total_delay Display total delay at each region [default]
+ --contentions Display number of delays at each region
+ --mean_delay Display mean delay at each region
+
+Call-graph Options:
+ --nodecount=<n> Show at most so many nodes [default=80]
+ --nodefraction=<f> Hide nodes below <f>*total [default=.005]
+ --edgefraction=<f> Hide edges below <f>*total [default=.001]
+ --maxdegree=<n> Max incoming/outgoing edges per node [default=8]
+ --focus=<regexp> Focus on backtraces with nodes matching <regexp>
+ --thread=<n> Show profile for thread <n>
+ --ignore=<regexp> Ignore backtraces with nodes matching <regexp>
+ --scale=<n> Set GV scaling [default=0]
+ --heapcheck Make nodes with non-0 object counts
+ (i.e. direct leak generators) more visible
+ --retain=<regexp> Retain only nodes that match <regexp>
+ --exclude=<regexp> Exclude all nodes that match <regexp>
+
+Miscellaneous:
+ --tools=<prefix or binary:fullpath>[,...] \$PATH for object tool pathnames
+ --test Run unit tests
+ --help This message
+ --version Version information
+
+Environment Variables:
+ JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof
+ JEPROF_TOOLS Prefix for object tools pathnames
+
+Examples:
+
+jeprof /bin/ls ls.prof
+ Enters "interactive" mode
+jeprof --text /bin/ls ls.prof
+ Outputs one line per procedure
+jeprof --web /bin/ls ls.prof
+ Displays annotated call-graph in web browser
+jeprof --gv /bin/ls ls.prof
+ Displays annotated call-graph via 'gv'
+jeprof --gv --focus=Mutex /bin/ls ls.prof
+ Restricts to code paths including a .*Mutex.* entry
+jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
+ Code paths including Mutex but not string
+jeprof --list=getdir /bin/ls ls.prof
+ (Per-line) annotated source listing for getdir()
+jeprof --disasm=getdir /bin/ls ls.prof
+ (Per-PC) annotated disassembly for getdir()
+
+jeprof http://localhost:1234/
+ Enters "interactive" mode
+jeprof --text localhost:1234
+ Outputs one line per procedure for localhost:1234
+jeprof --raw localhost:1234 > ./local.raw
+jeprof --text ./local.raw
+ Fetches a remote profile for later analysis and then
+ analyzes it in text mode.
+EOF
+}
+
+sub version_string {
+ return <<EOF
+jeprof (part of jemalloc $JEPROF_VERSION)
+based on pprof (part of gperftools $PPROF_VERSION)
+
+Copyright 1998-2007 Google Inc.
+
+This is BSD licensed software; see the source for copying conditions
+and license information.
+There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE.
+EOF
+}
+
+sub usage {
+ my $msg = shift;
+ print STDERR "$msg\n\n";
+ print STDERR usage_string();
+ print STDERR "\nFATAL ERROR: $msg\n"; # just as a reminder
+ exit(1);
+}
+
+sub Init() {
+ # Setup tmp-file name and handler to clean it up.
+ # We do this in the very beginning so that we can use
+ # error() and cleanup() function anytime here after.
+ $main::tmpfile_sym = "/tmp/jeprof$$.sym";
+ $main::tmpfile_ps = "/tmp/jeprof$$";
+ $main::next_tmpfile = 0;
+ $SIG{'INT'} = \&sighandler;
+
+ # Cache from filename/linenumber to source code
+ $main::source_cache = ();
+
+ $main::opt_help = 0;
+ $main::opt_version = 0;
+
+ $main::opt_cum = 0;
+ $main::opt_base = '';
+ $main::opt_addresses = 0;
+ $main::opt_lines = 0;
+ $main::opt_functions = 0;
+ $main::opt_files = 0;
+ $main::opt_lib_prefix = "";
+
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_list = "";
+ $main::opt_disasm = "";
+ $main::opt_symbols = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ $main::opt_dot = 0;
+ $main::opt_ps = 0;
+ $main::opt_pdf = 0;
+ $main::opt_gif = 0;
+ $main::opt_svg = 0;
+ $main::opt_raw = 0;
+
+ $main::opt_nodecount = 80;
+ $main::opt_nodefraction = 0.005;
+ $main::opt_edgefraction = 0.001;
+ $main::opt_maxdegree = 8;
+ $main::opt_focus = '';
+ $main::opt_thread = undef;
+ $main::opt_ignore = '';
+ $main::opt_scale = 0;
+ $main::opt_heapcheck = 0;
+ $main::opt_retain = '';
+ $main::opt_exclude = '';
+ $main::opt_seconds = 30;
+ $main::opt_lib = "";
+
+ $main::opt_inuse_space = 0;
+ $main::opt_inuse_objects = 0;
+ $main::opt_alloc_space = 0;
+ $main::opt_alloc_objects = 0;
+ $main::opt_show_bytes = 0;
+ $main::opt_drop_negative = 0;
+ $main::opt_interactive = 0;
+
+ $main::opt_total_delay = 0;
+ $main::opt_contentions = 0;
+ $main::opt_mean_delay = 0;
+
+ $main::opt_tools = "";
+ $main::opt_debug = 0;
+ $main::opt_test = 0;
+
+ # These are undocumented flags used only by unittests.
+ $main::opt_test_stride = 0;
+
+ # Are we using $SYMBOL_PAGE?
+ $main::use_symbol_page = 0;
+
+ # Files returned by TempName.
+ %main::tempnames = ();
+
+ # Type of profile we are dealing with
+ # Supported types:
+ # cpu
+ # heap
+ # growth
+ # contention
+ $main::profile_type = ''; # Empty type means "unknown"
+
+ GetOptions("help!" => \$main::opt_help,
+ "version!" => \$main::opt_version,
+ "cum!" => \$main::opt_cum,
+ "base=s" => \$main::opt_base,
+ "seconds=i" => \$main::opt_seconds,
+ "add_lib=s" => \$main::opt_lib,
+ "lib_prefix=s" => \$main::opt_lib_prefix,
+ "functions!" => \$main::opt_functions,
+ "lines!" => \$main::opt_lines,
+ "addresses!" => \$main::opt_addresses,
+ "files!" => \$main::opt_files,
+ "text!" => \$main::opt_text,
+ "callgrind!" => \$main::opt_callgrind,
+ "list=s" => \$main::opt_list,
+ "disasm=s" => \$main::opt_disasm,
+ "symbols!" => \$main::opt_symbols,
+ "gv!" => \$main::opt_gv,
+ "evince!" => \$main::opt_evince,
+ "web!" => \$main::opt_web,
+ "dot!" => \$main::opt_dot,
+ "ps!" => \$main::opt_ps,
+ "pdf!" => \$main::opt_pdf,
+ "svg!" => \$main::opt_svg,
+ "gif!" => \$main::opt_gif,
+ "raw!" => \$main::opt_raw,
+ "interactive!" => \$main::opt_interactive,
+ "nodecount=i" => \$main::opt_nodecount,
+ "nodefraction=f" => \$main::opt_nodefraction,
+ "edgefraction=f" => \$main::opt_edgefraction,
+ "maxdegree=i" => \$main::opt_maxdegree,
+ "focus=s" => \$main::opt_focus,
+ "thread=s" => \$main::opt_thread,
+ "ignore=s" => \$main::opt_ignore,
+ "scale=i" => \$main::opt_scale,
+ "heapcheck" => \$main::opt_heapcheck,
+ "retain=s" => \$main::opt_retain,
+ "exclude=s" => \$main::opt_exclude,
+ "inuse_space!" => \$main::opt_inuse_space,
+ "inuse_objects!" => \$main::opt_inuse_objects,
+ "alloc_space!" => \$main::opt_alloc_space,
+ "alloc_objects!" => \$main::opt_alloc_objects,
+ "show_bytes!" => \$main::opt_show_bytes,
+ "drop_negative!" => \$main::opt_drop_negative,
+ "total_delay!" => \$main::opt_total_delay,
+ "contentions!" => \$main::opt_contentions,
+ "mean_delay!" => \$main::opt_mean_delay,
+ "tools=s" => \$main::opt_tools,
+ "test!" => \$main::opt_test,
+ "debug!" => \$main::opt_debug,
+ # Undocumented flags used only by unittests:
+ "test_stride=i" => \$main::opt_test_stride,
+ ) || usage("Invalid option(s)");
+
+ # Deal with the standard --help and --version
+ if ($main::opt_help) {
+ print usage_string();
+ exit(0);
+ }
+
+ if ($main::opt_version) {
+ print version_string();
+ exit(0);
+ }
+
+ # Disassembly/listing/symbols mode requires address-level info
+ if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
+ $main::opt_functions = 0;
+ $main::opt_lines = 0;
+ $main::opt_addresses = 1;
+ $main::opt_files = 0;
+ }
+
+ # Check heap-profiling flags
+ if ($main::opt_inuse_space +
+ $main::opt_inuse_objects +
+ $main::opt_alloc_space +
+ $main::opt_alloc_objects > 1) {
+ usage("Specify at most on of --inuse/--alloc options");
+ }
+
+ # Check output granularities
+ my $grains =
+ $main::opt_functions +
+ $main::opt_lines +
+ $main::opt_addresses +
+ $main::opt_files +
+ 0;
+ if ($grains > 1) {
+ usage("Only specify one output granularity option");
+ }
+ if ($grains == 0) {
+ $main::opt_functions = 1;
+ }
+
+ # Check output modes
+ my $modes =
+ $main::opt_text +
+ $main::opt_callgrind +
+ ($main::opt_list eq '' ? 0 : 1) +
+ ($main::opt_disasm eq '' ? 0 : 1) +
+ ($main::opt_symbols == 0 ? 0 : 1) +
+ $main::opt_gv +
+ $main::opt_evince +
+ $main::opt_web +
+ $main::opt_dot +
+ $main::opt_ps +
+ $main::opt_pdf +
+ $main::opt_svg +
+ $main::opt_gif +
+ $main::opt_raw +
+ $main::opt_interactive +
+ 0;
+ if ($modes > 1) {
+ usage("Only specify one output mode");
+ }
+ if ($modes == 0) {
+ if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode
+ $main::opt_interactive = 1;
+ } else {
+ $main::opt_text = 1;
+ }
+ }
+
+ if ($main::opt_test) {
+ RunUnitTests();
+ # Should not return
+ exit(1);
+ }
+
+ # Binary name and profile arguments list
+ $main::prog = "";
+ @main::pfile_args = ();
+
+ # Remote profiling without a binary (using $SYMBOL_PAGE instead)
+ if (@ARGV > 0) {
+ if (IsProfileURL($ARGV[0])) {
+ $main::use_symbol_page = 1;
+ } elsif (IsSymbolizedProfileFile($ARGV[0])) {
+ $main::use_symbolized_profile = 1;
+ $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file
+ }
+ }
+
+ if ($main::use_symbol_page || $main::use_symbolized_profile) {
+ # We don't need a binary!
+ my %disabled = ('--lines' => $main::opt_lines,
+ '--disasm' => $main::opt_disasm);
+ for my $option (keys %disabled) {
+ usage("$option cannot be used without a binary") if $disabled{$option};
+ }
+ # Set $main::prog later...
+ scalar(@ARGV) || usage("Did not specify profile file");
+ } elsif ($main::opt_symbols) {
+ # --symbols needs a binary-name (to run nm on, etc) but not profiles
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ } else {
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ scalar(@ARGV) || usage("Did not specify profile file");
+ }
+
+ # Parse profile file/location arguments
+ foreach my $farg (@ARGV) {
+ if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
+ my $machine = $1;
+ my $num_machines = $2;
+ my $path = $3;
+ for (my $i = 0; $i < $num_machines; $i++) {
+ unshift(@main::pfile_args, "$i.$machine$path");
+ }
+ } else {
+ unshift(@main::pfile_args, $farg);
+ }
+ }
+
+ if ($main::use_symbol_page) {
+ unless (IsProfileURL($main::pfile_args[0])) {
+ error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
+ }
+ CheckSymbolPage();
+ $main::prog = FetchProgramName();
+ } elsif (!$main::use_symbolized_profile) { # may not need objtools!
+ ConfigureObjTools($main::prog)
+ }
+
+ # Break the opt_lib_prefix into the prefix_list array
+ @prefix_list = split (',', $main::opt_lib_prefix);
+
+ # Remove trailing / from the prefixes, in the list to prevent
+ # searching things like /my/path//lib/mylib.so
+ foreach (@prefix_list) {
+ s|/+$||;
+ }
+}
+
+sub FilterAndPrint {
+ my ($profile, $symbols, $libs, $thread) = @_;
+
+ # Get total data in profile
+ my $total = TotalProfile($profile);
+
+ # Remove uniniteresting stack items
+ $profile = RemoveUninterestingFrames($symbols, $profile);
+
+ # Focus?
+ if ($main::opt_focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $main::opt_focus);
+ }
+
+ # Ignore?
+ if ($main::opt_ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
+ }
+
+ my $calls = ExtractCalls($symbols, $profile);
+
+ # Reduce profiles to required output granularity, and also clean
+ # each stack trace so a given entry exists at most once.
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ # Print
+ if (!$main::opt_interactive) {
+ if ($main::opt_disasm) {
+ PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
+ } elsif ($main::opt_list) {
+ PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
+ } elsif ($main::opt_text) {
+ # Make sure the output is empty when have nothing to report
+ # (only matters when --heapcheck is given but we must be
+ # compatible with old branches that did not pass --heapcheck always):
+ if ($total != 0) {
+ printf("Total%s: %s %s\n",
+ (defined($thread) ? " (t$thread)" : ""),
+ Unparse($total), Units());
+ }
+ PrintText($symbols, $flat, $cumulative, -1);
+ } elsif ($main::opt_raw) {
+ PrintSymbolizedProfile($symbols, $profile, $main::prog);
+ } elsif ($main::opt_callgrind) {
+ PrintCallgrind($calls);
+ } else {
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), "");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), "");
+ } elsif ($main::opt_web) {
+ my $tmp = TempName($main::next_tmpfile, "svg");
+ RunWeb($tmp);
+ # The command we run might hand the file name off
+ # to an already running browser instance and then exit.
+ # Normally, we'd remove $tmp on exit (right now),
+ # but fork a child to remove $tmp a little later, so that the
+ # browser has time to load it first.
+ delete $main::tempnames{$tmp};
+ if (fork() == 0) {
+ sleep 5;
+ unlink($tmp);
+ exit(0);
+ }
+ }
+ } else {
+ cleanup();
+ exit(1);
+ }
+ }
+ } else {
+ InteractiveMode($profile, $symbols, $libs, $total);
+ }
+}
+
+sub Main() {
+ Init();
+ $main::collected_profile = undef;
+ @main::profile_files = ();
+ $main::op_time = time();
+
+ # Printing symbols is special and requires a lot less info that most.
+ if ($main::opt_symbols) {
+ PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin
+ return;
+ }
+
+ # Fetch all profile data
+ FetchDynamicProfiles();
+
+ # this will hold symbols that we read from the profile files
+ my $symbol_map = {};
+
+ # Read one profile, pick the last item on the list
+ my $data = ReadProfile($main::prog, pop(@main::profile_files));
+ my $profile = $data->{profile};
+ my $pcs = $data->{pcs};
+ my $libs = $data->{libs}; # Info about main program and shared libraries
+ $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
+
+ # Add additional profiles, if available.
+ if (scalar(@main::profile_files) > 0) {
+ foreach my $pname (@main::profile_files) {
+ my $data2 = ReadProfile($main::prog, $pname);
+ $profile = AddProfile($profile, $data2->{profile});
+ $pcs = AddPcs($pcs, $data2->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
+ }
+ }
+
+ # Subtract base from profile, if specified
+ if ($main::opt_base ne '') {
+ my $base = ReadProfile($main::prog, $main::opt_base);
+ $profile = SubtractProfile($profile, $base->{profile});
+ $pcs = AddPcs($pcs, $base->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
+ }
+
+ # Collect symbols
+ my $symbols;
+ if ($main::use_symbolized_profile) {
+ $symbols = FetchSymbols($pcs, $symbol_map);
+ } elsif ($main::use_symbol_page) {
+ $symbols = FetchSymbols($pcs);
+ } else {
+ # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
+ # which may differ from the data from subsequent profiles, especially
+ # if they were run on different machines. Use appropriate libs for
+ # each pc somehow.
+ $symbols = ExtractSymbols($libs, $pcs);
+ }
+
+ if (!defined($main::opt_thread)) {
+ FilterAndPrint($profile, $symbols, $libs);
+ }
+ if (defined($data->{threads})) {
+ foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
+ if (defined($main::opt_thread) &&
+ ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
+ my $thread_profile = $data->{threads}{$thread};
+ FilterAndPrint($thread_profile, $symbols, $libs, $thread);
+ }
+ }
+ }
+
+ cleanup();
+ exit(0);
+}
+
+##### Entry Point #####
+
+Main();
+
+# Temporary code to detect if we're running on a Goobuntu system.
+# These systems don't have the right stuff installed for the special
+# Readline libraries to work, so as a temporary workaround, we default
+# to using the normal stdio code, rather than the fancier readline-based
+# code
+sub ReadlineMightFail {
+ if (-e '/lib/libtermcap.so.2') {
+ return 0; # libtermcap exists, so readline should be okay
+ } else {
+ return 1;
+ }
+}
+
+sub RunGV {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
+ # Options using double dash are supported by this gv version.
+ # Also, turn on noantialias to better handle bug in gv for
+ # postscript files with large dimensions.
+ # TODO: Maybe we should not pass the --noantialias flag
+ # if the gv version is known to work properly without the flag.
+ system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
+ . $bg);
+ } else {
+ # Old gv version - only supports options that use single dash.
+ print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
+ system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
+ }
+}
+
+sub RunEvince {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ system(ShellEscape(@EVINCE, $fname) . $bg);
+}
+
+sub RunWeb {
+ my $fname = shift;
+ print STDERR "Loading web page file:///$fname\n";
+
+ if (`uname` =~ /Darwin/) {
+ # OS X: open will use standard preference for SVG files.
+ system("/usr/bin/open", $fname);
+ return;
+ }
+
+ # Some kind of Unix; try generic symlinks, then specific browsers.
+ # (Stop once we find one.)
+ # Works best if the browser is already running.
+ my @alt = (
+ "/etc/alternatives/gnome-www-browser",
+ "/etc/alternatives/x-www-browser",
+ "google-chrome",
+ "firefox",
+ );
+ foreach my $b (@alt) {
+ if (system($b, $fname) == 0) {
+ return;
+ }
+ }
+
+ print STDERR "Could not load web browser.\n";
+}
+
+sub RunKcachegrind {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
+ system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
+}
+
+
+##### Interactive helper routines #####
+
+sub InteractiveMode {
+ $| = 1; # Make output unbuffered for interactive mode
+ my ($orig_profile, $symbols, $libs, $total) = @_;
+
+ print STDERR "Welcome to jeprof! For help, type 'help'.\n";
+
+ # Use ReadLine if it's installed and input comes from a console.
+ if ( -t STDIN &&
+ !ReadlineMightFail() &&
+ defined(eval {require Term::ReadLine}) ) {
+ my $term = new Term::ReadLine 'jeprof';
+ while ( defined ($_ = $term->readline('(jeprof) '))) {
+ $term->addhistory($_) if /\S/;
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+ }
+ } else { # don't have readline
+ while (1) {
+ print STDERR "(jeprof) ";
+ $_ = <STDIN>;
+ last if ! defined $_ ;
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+
+ # Save some flags that might be reset by InteractiveCommand()
+ my $save_opt_lines = $main::opt_lines;
+
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+
+ # Restore flags
+ $main::opt_lines = $save_opt_lines;
+ }
+ }
+}
+
+# Takes two args: orig profile, and command to run.
+# Returns 1 if we should keep going, or 0 if we were asked to quit
+sub InteractiveCommand {
+ my($orig_profile, $symbols, $libs, $total, $command) = @_;
+ $_ = $command; # just to make future m//'s easier
+ if (!defined($_)) {
+ print STDERR "\n";
+ return 0;
+ }
+ if (m/^\s*quit/) {
+ return 0;
+ }
+ if (m/^\s*help/) {
+ InteractiveHelpMessage();
+ return 1;
+ }
+ # Clear all the mode options -- mode is controlled by "$command"
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_disasm = 0;
+ $main::opt_list = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_cum = 0;
+
+ if (m/^\s*(text|top)(\d*)\s*(.*)/) {
+ $main::opt_text = 1;
+
+ my $line_limit = ($2 ne "") ? int($2) : 10;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($3);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintText($symbols, $flat, $cumulative, $line_limit);
+ return 1;
+ }
+ if (m/^\s*callgrind\s*([^ \n]*)/) {
+ $main::opt_callgrind = 1;
+
+ # Get derived profiles
+ my $calls = ExtractCalls($symbols, $orig_profile);
+ my $filename = $1;
+ if ( $1 eq '' ) {
+ $filename = TempName($main::next_tmpfile, "callgrind");
+ }
+ PrintCallgrind($calls, $filename);
+ if ( $1 eq '' ) {
+ RunKcachegrind($filename, " & ");
+ $main::next_tmpfile++;
+ }
+
+ return 1;
+ }
+ if (m/^\s*(web)?list\s*(.+)/) {
+ my $html = (defined($1) && ($1 eq "web"));
+ $main::opt_list = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($2);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
+ return 1;
+ }
+ if (m/^\s*disasm\s*(.+)/) {
+ $main::opt_disasm = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($1);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintDisassembly($libs, $flat, $cumulative, $routine);
+ return 1;
+ }
+ if (m/^\s*(gv|web|evince)\s*(.*)/) {
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ if ($1 eq "gv") {
+ $main::opt_gv = 1;
+ } elsif ($1 eq "evince") {
+ $main::opt_evince = 1;
+ } elsif ($1 eq "web") {
+ $main::opt_web = 1;
+ }
+
+ my $focus;
+ my $ignore;
+ ($focus, $ignore) = ParseInteractiveArgs($2);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols,
+ $focus, $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), " &");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
+ } elsif ($main::opt_web) {
+ RunWeb(TempName($main::next_tmpfile, "svg"));
+ }
+ $main::next_tmpfile++;
+ }
+ return 1;
+ }
+ if (m/^\s*$/) {
+ return 1;
+ }
+ print STDERR "Unknown command: try 'help'.\n";
+ return 1;
+}
+
+
+sub ProcessProfile {
+ my $total_count = shift;
+ my $orig_profile = shift;
+ my $symbols = shift;
+ my $focus = shift;
+ my $ignore = shift;
+
+ # Process current profile to account for various settings
+ my $profile = $orig_profile;
+ printf("Total: %s %s\n", Unparse($total_count), Units());
+ if ($focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $focus);
+ my $focus_count = TotalProfile($profile);
+ printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
+ $focus,
+ Unparse($focus_count), Units(),
+ Unparse($total_count), ($focus_count*100.0) / $total_count);
+ }
+ if ($ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $ignore);
+ my $ignore_count = TotalProfile($profile);
+ printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
+ $ignore,
+ Unparse($ignore_count), Units(),
+ Unparse($total_count),
+ ($ignore_count*100.0) / $total_count);
+ }
+
+ return $profile;
+}
+
+sub InteractiveHelpMessage {
+ print STDERR <<ENDOFHELP;
+Interactive jeprof mode
+
+Commands:
+ gv
+ gv [focus] [-ignore1] [-ignore2]
+ Show graphical hierarchical display of current profile. Without
+ any arguments, shows all samples in the profile. With the optional
+ "focus" argument, restricts the samples shown to just those where
+ the "focus" regular expression matches a routine name on the stack
+ trace.
+
+ web
+ web [focus] [-ignore1] [-ignore2]
+ Like GV, but displays profile in your web browser instead of using
+ Ghostview. Works best if your web browser is already running.
+ To change the browser that gets used:
+ On Linux, set the /etc/alternatives/gnome-www-browser symlink.
+ On OS X, change the Finder association for SVG files.
+
+ list [routine_regexp] [-ignore1] [-ignore2]
+ Show source listing of routines whose names match "routine_regexp"
+
+ weblist [routine_regexp] [-ignore1] [-ignore2]
+ Displays a source listing of routines whose names match "routine_regexp"
+ in a web browser. You can click on source lines to view the
+ corresponding disassembly.
+
+ top [--cum] [-ignore1] [-ignore2]
+ top20 [--cum] [-ignore1] [-ignore2]
+ top37 [--cum] [-ignore1] [-ignore2]
+ Show top lines ordered by flat profile count, or cumulative count
+ if --cum is specified. If a number is present after 'top', the
+ top K routines will be shown (defaults to showing the top 10)
+
+ disasm [routine_regexp] [-ignore1] [-ignore2]
+ Show disassembly of routines whose names match "routine_regexp",
+ annotated with sample counts.
+
+ callgrind
+ callgrind [filename]
+ Generates callgrind file. If no filename is given, kcachegrind is called.
+
+ help - This listing
+ quit or ^D - End jeprof
+
+For commands that accept optional -ignore tags, samples where any routine in
+the stack trace matches the regular expression in any of the -ignore
+parameters will be ignored.
+
+Further pprof details are available at this location (or one similar):
+
+ /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
+ /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
+
+ENDOFHELP
+}
+sub ParseInteractiveArgs {
+ my $args = shift;
+ my $focus = "";
+ my $ignore = "";
+ my @x = split(/ +/, $args);
+ foreach $a (@x) {
+ if ($a =~ m/^(--|-)lines$/) {
+ $main::opt_lines = 1;
+ } elsif ($a =~ m/^(--|-)cum$/) {
+ $main::opt_cum = 1;
+ } elsif ($a =~ m/^-(.*)/) {
+ $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
+ } else {
+ $focus .= (($focus ne "") ? "|" : "" ) . $a;
+ }
+ }
+ if ($ignore ne "") {
+ print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
+ }
+ return ($focus, $ignore);
+}
+
+##### Output code #####
+
+sub TempName {
+ my $fnum = shift;
+ my $ext = shift;
+ my $file = "$main::tmpfile_ps.$fnum.$ext";
+ $main::tempnames{$file} = 1;
+ return $file;
+}
+
+# Print profile data in packed binary format (64-bit) to standard out
+sub PrintProfileData {
+ my $profile = shift;
+
+ # print header (64-bit style)
+ # (zero) (header-size) (version) (sample-period) (zero)
+ print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
+
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ my $depth = $#addrs + 1;
+ # int(foo / 2**32) is the only reliable way to get rid of bottom
+ # 32 bits on both 32- and 64-bit systems.
+ print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
+ print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
+
+ foreach my $full_addr (@addrs) {
+ my $addr = $full_addr;
+ $addr =~ s/0x0*//; # strip off leading 0x, zeroes
+ if (length($addr) > 16) {
+ print STDERR "Invalid address in profile: $full_addr\n";
+ next;
+ }
+ my $low_addr = substr($addr, -8); # get last 8 hex chars
+ my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars
+ print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
+ }
+ }
+ }
+}
+
+# Print symbols and profile data
+sub PrintSymbolizedProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $prog = shift;
+
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+
+ print '--- ', $symbol_marker, "\n";
+ if (defined($prog)) {
+ print 'binary=', $prog, "\n";
+ }
+ while (my ($pc, $name) = each(%{$symbols})) {
+ my $sep = ' ';
+ print '0x', $pc;
+ # We have a list of function names, which include the inlined
+ # calls. They are separated (and terminated) by --, which is
+ # illegal in function names.
+ for (my $j = 2; $j <= $#{$name}; $j += 3) {
+ print $sep, $name->[$j];
+ $sep = '--';
+ }
+ print "\n";
+ }
+ print '---', "\n";
+
+ my $profile_marker;
+ if ($main::profile_type eq 'heap') {
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'growth') {
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'contention') {
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } else { # elsif ($main::profile_type eq 'cpu')
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ }
+
+ print '--- ', $profile_marker, "\n";
+ if (defined($main::collected_profile)) {
+ # if used with remote fetch, simply dump the collected profile to output.
+ open(SRC, "<$main::collected_profile");
+ while (<SRC>) {
+ print $_;
+ }
+ close(SRC);
+ } else {
+ # --raw/http: For everything to work correctly for non-remote profiles, we
+ # would need to extend PrintProfileData() to handle all possible profile
+ # types, re-enable the code that is currently disabled in ReadCPUProfile()
+ # and FixCallerAddresses(), and remove the remote profile dumping code in
+ # the block above.
+ die "--raw/http: jeprof can only dump remote profiles for --raw\n";
+ # dump a cpu-format profile to standard out
+ PrintProfileData($profile);
+ }
+}
+
+# Print text output
+sub PrintText {
+ my $symbols = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $line_limit = shift;
+
+ my $total = TotalProfile($flat);
+
+ # Which profile to sort by?
+ my $s = $main::opt_cum ? $cumulative : $flat;
+
+ my $running_sum = 0;
+ my $lines = 0;
+ foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
+ keys(%{$cumulative})) {
+ my $f = GetEntry($flat, $k);
+ my $c = GetEntry($cumulative, $k);
+ $running_sum += $f;
+
+ my $sym = $k;
+ if (exists($symbols->{$k})) {
+ $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
+ if ($main::opt_addresses) {
+ $sym = $k . " " . $sym;
+ }
+ }
+
+ if ($f != 0 || $c != 0) {
+ printf("%8s %6s %6s %8s %6s %s\n",
+ Unparse($f),
+ Percent($f, $total),
+ Percent($running_sum, $total),
+ Unparse($c),
+ Percent($c, $total),
+ $sym);
+ }
+ $lines++;
+ last if ($line_limit >= 0 && $lines >= $line_limit);
+ }
+}
+
+# Callgrind format has a compression for repeated function and file
+# names. You show the name the first time, and just use its number
+# subsequently. This can cut down the file to about a third or a
+# quarter of its uncompressed size. $key and $val are the key/value
+# pair that would normally be printed by callgrind; $map is a map from
+# value to number.
+sub CompressedCGName {
+ my($key, $val, $map) = @_;
+ my $idx = $map->{$val};
+ # For very short keys, providing an index hurts rather than helps.
+ if (length($val) <= 3) {
+ return "$key=$val\n";
+ } elsif (defined($idx)) {
+ return "$key=($idx)\n";
+ } else {
+ # scalar(keys $map) gives the number of items in the map.
+ $idx = scalar(keys(%{$map})) + 1;
+ $map->{$val} = $idx;
+ return "$key=($idx) $val\n";
+ }
+}
+
+# Print the call graph in a way that's suiteable for callgrind.
+sub PrintCallgrind {
+ my $calls = shift;
+ my $filename;
+ my %filename_to_index_map;
+ my %fnname_to_index_map;
+
+ if ($main::opt_interactive) {
+ $filename = shift;
+ print STDERR "Writing callgrind file to '$filename'.\n"
+ } else {
+ $filename = "&STDOUT";
+ }
+ open(CG, ">$filename");
+ printf CG ("events: Hits\n\n");
+ foreach my $call ( map { $_->[0] }
+ sort { $a->[1] cmp $b ->[1] ||
+ $a->[2] <=> $b->[2] }
+ map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ [$_, $1, $2] }
+ keys %$calls ) {
+ my $count = int($calls->{$call});
+ $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ my ( $caller_file, $caller_line, $caller_function,
+ $callee_file, $callee_line, $callee_function ) =
+ ( $1, $2, $3, $5, $6, $7 );
+
+ # TODO(csilvers): for better compression, collect all the
+ # caller/callee_files and functions first, before printing
+ # anything, and only compress those referenced more than once.
+ printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
+ printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
+ if (defined $6) {
+ printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
+ printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
+ printf CG ("calls=$count $callee_line\n");
+ }
+ printf CG ("$caller_line $count\n\n");
+ }
+}
+
+# Print disassembly for all all routines that match $main::opt_disasm
+sub PrintDisassembly {
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $disasm_opts = shift;
+
+ my $total = TotalProfile($flat);
+
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ # See if there are any samples in this routine
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ PrintDisassembledFunction($lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr, $total);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+}
+
+# Return reference to array of tuples of the form:
+# [start_address, filename, linenumber, instruction, limit_address]
+# E.g.,
+# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
+sub Disassemble {
+ my $prog = shift;
+ my $offset = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+
+ my $objdump = $obj_tool_map{"objdump"};
+ my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
+ "--start-address=0x$start_addr",
+ "--stop-address=0x$end_addr", $prog);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ my @result = ();
+ my $filename = "";
+ my $linenumber = -1;
+ my $last = ["", "", "", ""];
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ chop;
+ if (m|\s*([^:\s]+):(\d+)\s*$|) {
+ # Location line of the form:
+ # <filename>:<linenumber>
+ $filename = $1;
+ $linenumber = $2;
+ } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
+ # Disassembly line -- zero-extend address to full length
+ my $addr = HexExtend($1);
+ my $k = AddressAdd($addr, $offset);
+ $last->[4] = $k; # Store ending address for previous instruction
+ $last = [$k, $filename, $linenumber, $2, $end_addr];
+ push(@result, $last);
+ }
+ }
+ close(OBJDUMP);
+ return @result;
+}
+
+# The input file should contain lines of the form /proc/maps-like
+# output (same format as expected from the profiles) or that looks
+# like hex addresses (like "0xDEADBEEF"). We will parse all
+# /proc/maps output, and for all the hex addresses, we will output
+# "short" symbol names, one per line, in the same order as the input.
+sub PrintSymbols {
+ my $maps_and_symbols_file = shift;
+
+ # ParseLibraries expects pcs to be in a set. Fine by us...
+ my @pclist = (); # pcs in sorted order
+ my $pcs = {};
+ my $map = "";
+ foreach my $line (<$maps_and_symbols_file>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
+ push(@pclist, HexExtend($1));
+ $pcs->{$pclist[-1]} = 1;
+ } else {
+ $map .= $line;
+ }
+ }
+
+ my $libs = ParseLibraries($main::prog, $map, $pcs);
+ my $symbols = ExtractSymbols($libs, $pcs);
+
+ foreach my $pc (@pclist) {
+ # ->[0] is the shortname, ->[2] is the full name
+ print(($symbols->{$pc}->[0] || "??") . "\n");
+ }
+}
+
+
+# For sorting functions by name
+sub ByName {
+ return ShortFunctionName($a) cmp ShortFunctionName($b);
+}
+
+# Print source-listing for all all routines that match $list_opts
+sub PrintListing {
+ my $total = shift;
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $list_opts = shift;
+ my $html = shift;
+
+ my $output = \*STDOUT;
+ my $fname = "";
+
+ if ($html) {
+ # Arrange to write the output to a temporary file
+ $fname = TempName($main::next_tmpfile, "html");
+ $main::next_tmpfile++;
+ if (!open(TEMP, ">$fname")) {
+ print STDERR "$fname: $!\n";
+ return;
+ }
+ $output = \*TEMP;
+ print $output HtmlListingHeader();
+ printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
+ $main::prog, Unparse($total), Units());
+ }
+
+ my $listed = 0;
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ # Print if there are any samples in this routine
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ $listed += PrintSource(
+ $lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr,
+ $html,
+ $output);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+
+ if ($html) {
+ if ($listed > 0) {
+ print $output HtmlListingFooter();
+ close($output);
+ RunWeb($fname);
+ } else {
+ close($output);
+ unlink($fname);
+ }
+ }
+}
+
+sub HtmlListingHeader {
+ return <<'EOF';
+<DOCTYPE html>
+<html>
+<head>
+<title>Pprof listing</title>
+<style type="text/css">
+body {
+ font-family: sans-serif;
+}
+h1 {
+ font-size: 1.5em;
+ margin-bottom: 4px;
+}
+.legend {
+ font-size: 1.25em;
+}
+.line {
+ color: #aaaaaa;
+}
+.nop {
+ color: #aaaaaa;
+}
+.unimportant {
+ color: #cccccc;
+}
+.disasmloc {
+ color: #000000;
+}
+.deadsrc {
+ cursor: pointer;
+}
+.deadsrc:hover {
+ background-color: #eeeeee;
+}
+.livesrc {
+ color: #0000ff;
+ cursor: pointer;
+}
+.livesrc:hover {
+ background-color: #eeeeee;
+}
+.asm {
+ color: #008800;
+ display: none;
+}
+</style>
+<script type="text/javascript">
+function jeprof_toggle_asm(e) {
+ var target;
+ if (!e) e = window.event;
+ if (e.target) target = e.target;
+ else if (e.srcElement) target = e.srcElement;
+
+ if (target) {
+ var asm = target.nextSibling;
+ if (asm && asm.className == "asm") {
+ asm.style.display = (asm.style.display == "block" ? "" : "block");
+ e.preventDefault();
+ return false;
+ }
+ }
+}
+</script>
+</head>
+<body>
+EOF
+}
+
+sub HtmlListingFooter {
+ return <<'EOF';
+</body>
+</html>
+EOF
+}
+
+sub HtmlEscape {
+ my $text = shift;
+ $text =~ s/&/&amp;/g;
+ $text =~ s/</&lt;/g;
+ $text =~ s/>/&gt;/g;
+ return $text;
+}
+
+# Returns the indentation of the line, if it has any non-whitespace
+# characters. Otherwise, returns -1.
+sub Indentation {
+ my $line = shift;
+ if (m/^(\s*)\S/) {
+ return length($1);
+ } else {
+ return -1;
+ }
+}
+
+# If the symbol table contains inlining info, Disassemble() may tag an
+# instruction with a location inside an inlined function. But for
+# source listings, we prefer to use the location in the function we
+# are listing. So use MapToSymbols() to fetch full location
+# information for each instruction and then pick out the first
+# location from a location list (location list contains callers before
+# callees in case of inlining).
+#
+# After this routine has run, each entry in $instructions contains:
+# [0] start address
+# [1] filename for function we are listing
+# [2] line number for function we are listing
+# [3] disassembly
+# [4] limit address
+# [5] most specific filename (may be different from [1] due to inlining)
+# [6] most specific line number (may be different from [2] due to inlining)
+sub GetTopLevelLineNumbers {
+ my ($lib, $offset, $instructions) = @_;
+ my $pcs = [];
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ push(@{$pcs}, $instructions->[$i]->[0]);
+ }
+ my $symbols = {};
+ MapToSymbols($lib, $offset, $pcs, $symbols);
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ my $e = $instructions->[$i];
+ push(@{$e}, $e->[1]);
+ push(@{$e}, $e->[2]);
+ my $addr = $e->[0];
+ my $sym = $symbols->{$addr};
+ if (defined($sym)) {
+ if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
+ $e->[1] = $1; # File name
+ $e->[2] = $2; # Line number
+ }
+ }
+ }
+}
+
+# Print source-listing for one routine
+sub PrintSource {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $html = shift;
+ my $output = shift;
+
+ # Disassemble all instructions (just to get line numbers)
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+ GetTopLevelLineNumbers($prog, $offset, \@instructions);
+
+ # Hack 1: assume that the first source file encountered in the
+ # disassembly contains the routine
+ my $filename = undef;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[2] >= 0) {
+ $filename = $instructions[$i]->[1];
+ last;
+ }
+ }
+ if (!defined($filename)) {
+ print STDERR "no filename found in $routine\n";
+ return 0;
+ }
+
+ # Hack 2: assume that the largest line number from $filename is the
+ # end of the procedure. This is typically safe since if P1 contains
+ # an inlined call to P2, then P2 usually occurs earlier in the
+ # source file. If this does not work, we might have to compute a
+ # density profile or just print all regions we find.
+ my $lastline = 0;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ my $f = $instructions[$i]->[1];
+ my $l = $instructions[$i]->[2];
+ if (($f eq $filename) && ($l > $lastline)) {
+ $lastline = $l;
+ }
+ }
+
+ # Hack 3: assume the first source location from "filename" is the start of
+ # the source code.
+ my $firstline = 1;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[1] eq $filename) {
+ $firstline = $instructions[$i]->[2];
+ last;
+ }
+ }
+
+ # Hack 4: Extend last line forward until its indentation is less than
+ # the indentation we saw on $firstline
+ my $oldlastline = $lastline;
+ {
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ my $first_indentation = -1;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ my $indent = Indentation($_);
+ if ($l >= $firstline) {
+ if ($first_indentation < 0 && $indent >= 0) {
+ $first_indentation = $indent;
+ last if ($first_indentation == 0);
+ }
+ }
+ if ($l >= $lastline && $indent >= 0) {
+ if ($indent >= $first_indentation) {
+ $lastline = $l+1;
+ } else {
+ last;
+ }
+ }
+ }
+ close(FILE);
+ }
+
+ # Assign all samples to the range $firstline,$lastline,
+ # Hack 4: If an instruction does not occur in the range, its samples
+ # are moved to the next instruction that occurs in the range.
+ my $samples1 = {}; # Map from line number to flat count
+ my $samples2 = {}; # Map from line number to cumulative count
+ my $running1 = 0; # Unassigned flat counts
+ my $running2 = 0; # Unassigned cumulative counts
+ my $total1 = 0; # Total flat counts
+ my $total2 = 0; # Total cumulative counts
+ my %disasm = (); # Map from line number to disassembly
+ my $running_disasm = ""; # Unassigned disassembly
+ my $skip_marker = "---\n";
+ if ($html) {
+ $skip_marker = "";
+ for (my $l = $firstline; $l <= $lastline; $l++) {
+ $disasm{$l} = "";
+ }
+ }
+ my $last_dis_filename = '';
+ my $last_dis_linenum = -1;
+ my $last_touched_line = -1; # To detect gaps in disassembly for a line
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+
+ if ($html) {
+ my $dis = sprintf(" %6s %6s \t\t%8s: %s ",
+ HtmlPrintNumber($c1),
+ HtmlPrintNumber($c2),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+
+ # Append the most specific source line associated with this instruction
+ if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
+ $dis = HtmlEscape($dis);
+ my $f = $e->[5];
+ my $l = $e->[6];
+ if ($f ne $last_dis_filename) {
+ $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } elsif ($l ne $last_dis_linenum) {
+ # De-emphasize the unchanged file name portion
+ $dis .= sprintf("<span class=unimportant>%s</span>" .
+ "<span class=disasmloc>:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } else {
+ # De-emphasize the entire location
+ $dis .= sprintf("<span class=unimportant>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ }
+ $last_dis_filename = $f;
+ $last_dis_linenum = $l;
+ $running_disasm .= $dis;
+ $running_disasm .= "\n";
+ }
+
+ $running1 += $c1;
+ $running2 += $c2;
+ $total1 += $c1;
+ $total2 += $c2;
+ my $file = $e->[1];
+ my $line = $e->[2];
+ if (($file eq $filename) &&
+ ($line >= $firstline) &&
+ ($line <= $lastline)) {
+ # Assign all accumulated samples to this line
+ AddEntry($samples1, $line, $running1);
+ AddEntry($samples2, $line, $running2);
+ $running1 = 0;
+ $running2 = 0;
+ if ($html) {
+ if ($line != $last_touched_line && $disasm{$line} ne '') {
+ $disasm{$line} .= "\n";
+ }
+ $disasm{$line} .= $running_disasm;
+ $running_disasm = '';
+ $last_touched_line = $line;
+ }
+ }
+ }
+
+ # Assign any leftover samples to $lastline
+ AddEntry($samples1, $lastline, $running1);
+ AddEntry($samples2, $lastline, $running2);
+ if ($html) {
+ if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
+ $disasm{$lastline} .= "\n";
+ }
+ $disasm{$lastline} .= $running_disasm;
+ }
+
+ if ($html) {
+ printf $output (
+ "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
+ "Total:%6s %6s (flat / cumulative %s)\n",
+ HtmlEscape(ShortFunctionName($routine)),
+ HtmlEscape(CleanFileName($filename)),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ } else {
+ printf $output (
+ "ROUTINE ====================== %s in %s\n" .
+ "%6s %6s Total %s (flat / cumulative)\n",
+ ShortFunctionName($routine),
+ CleanFileName($filename),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ }
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ if ($l >= $firstline - 5 &&
+ (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+ chop;
+ my $text = $_;
+ if ($l == $firstline) { print $output $skip_marker; }
+ my $n1 = GetEntry($samples1, $l);
+ my $n2 = GetEntry($samples2, $l);
+ if ($html) {
+ # Emit a span that has one of the following classes:
+ # livesrc -- has samples
+ # deadsrc -- has disassembly, but with no samples
+ # nop -- has no matching disasembly
+ # Also emit an optional span containing disassembly.
+ my $dis = $disasm{$l};
+ my $asm = "";
+ if (defined($dis) && $dis ne '') {
+ $asm = "<span class=\"asm\">" . $dis . "</span>";
+ }
+ my $source_class = (($n1 + $n2 > 0)
+ ? "livesrc"
+ : (($asm ne "") ? "deadsrc" : "nop"));
+ printf $output (
+ "<span class=\"line\">%5d</span> " .
+ "<span class=\"%s\">%6s %6s %s</span>%s\n",
+ $l, $source_class,
+ HtmlPrintNumber($n1),
+ HtmlPrintNumber($n2),
+ HtmlEscape($text),
+ $asm);
+ } else {
+ printf $output(
+ "%6s %6s %4d: %s\n",
+ UnparseAlt($n1),
+ UnparseAlt($n2),
+ $l,
+ $text);
+ }
+ if ($l == $lastline) { print $output $skip_marker; }
+ };
+ }
+ close(FILE);
+ if ($html) {
+ print $output "</pre>\n";
+ }
+ return 1;
+}
+
+# Return the source line for the specified file/linenumber.
+# Returns undef if not found.
+sub SourceLine {
+ my $file = shift;
+ my $line = shift;
+
+ # Look in cache
+ if (!defined($main::source_cache{$file})) {
+ if (100 < scalar keys(%main::source_cache)) {
+ # Clear the cache when it gets too big
+ $main::source_cache = ();
+ }
+
+ # Read all lines from the file
+ if (!open(FILE, "<$file")) {
+ print STDERR "$file: $!\n";
+ $main::source_cache{$file} = []; # Cache the negative result
+ return undef;
+ }
+ my $lines = [];
+ push(@{$lines}, ""); # So we can use 1-based line numbers as indices
+ while (<FILE>) {
+ push(@{$lines}, $_);
+ }
+ close(FILE);
+
+ # Save the lines in the cache
+ $main::source_cache{$file} = $lines;
+ }
+
+ my $lines = $main::source_cache{$file};
+ if (($line < 0) || ($line > $#{$lines})) {
+ return undef;
+ } else {
+ return $lines->[$line];
+ }
+}
+
+# Print disassembly for one routine with interspersed source if available
+sub PrintDisassembledFunction {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $total = shift;
+
+ # Disassemble all instructions
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+
+ # Make array of counts per instruction
+ my @flat_count = ();
+ my @cum_count = ();
+ my $flat_total = 0;
+ my $cum_total = 0;
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+ push(@flat_count, $c1);
+ push(@cum_count, $c2);
+ $flat_total += $c1;
+ $cum_total += $c2;
+ }
+
+ # Print header with total counts
+ printf("ROUTINE ====================== %s\n" .
+ "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
+ ShortFunctionName($routine),
+ Unparse($flat_total),
+ Unparse($cum_total),
+ Units(),
+ ($cum_total * 100.0) / $total);
+
+ # Process instructions in order
+ my $current_file = "";
+ for (my $i = 0; $i <= $#instructions; ) {
+ my $e = $instructions[$i];
+
+ # Print the new file name whenever we switch files
+ if ($e->[1] ne $current_file) {
+ $current_file = $e->[1];
+ my $fname = $current_file;
+ $fname =~ s|^\./||; # Trim leading "./"
+
+ # Shorten long file names
+ if (length($fname) >= 58) {
+ $fname = "..." . substr($fname, -55);
+ }
+ printf("-------------------- %s\n", $fname);
+ }
+
+ # TODO: Compute range of lines to print together to deal with
+ # small reorderings.
+ my $first_line = $e->[2];
+ my $last_line = $first_line;
+ my %flat_sum = ();
+ my %cum_sum = ();
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ $flat_sum{$l} = 0;
+ $cum_sum{$l} = 0;
+ }
+
+ # Find run of instructions for this range of source lines
+ my $first_inst = $i;
+ while (($i <= $#instructions) &&
+ ($instructions[$i]->[2] >= $first_line) &&
+ ($instructions[$i]->[2] <= $last_line)) {
+ $e = $instructions[$i];
+ $flat_sum{$e->[2]} += $flat_count[$i];
+ $cum_sum{$e->[2]} += $cum_count[$i];
+ $i++;
+ }
+ my $last_inst = $i - 1;
+
+ # Print source lines
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ my $line = SourceLine($current_file, $l);
+ if (!defined($line)) {
+ $line = "?\n";
+ next;
+ } else {
+ $line =~ s/^\s+//;
+ }
+ printf("%6s %6s %5d: %s",
+ UnparseAlt($flat_sum{$l}),
+ UnparseAlt($cum_sum{$l}),
+ $l,
+ $line);
+ }
+
+ # Print disassembly
+ for (my $x = $first_inst; $x <= $last_inst; $x++) {
+ my $e = $instructions[$x];
+ printf("%6s %6s %8s: %6s\n",
+ UnparseAlt($flat_count[$x]),
+ UnparseAlt($cum_count[$x]),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+ }
+ }
+}
+
+# Print DOT graph
+sub PrintDot {
+ my $prog = shift;
+ my $symbols = shift;
+ my $raw = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $overall_total = shift;
+
+ # Get total
+ my $local_total = TotalProfile($flat);
+ my $nodelimit = int($main::opt_nodefraction * $local_total);
+ my $edgelimit = int($main::opt_edgefraction * $local_total);
+ my $nodecount = $main::opt_nodecount;
+
+ # Find nodes to include
+ my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
+ abs(GetEntry($cumulative, $a))
+ || $a cmp $b }
+ keys(%{$cumulative}));
+ my $last = $nodecount - 1;
+ if ($last > $#list) {
+ $last = $#list;
+ }
+ while (($last >= 0) &&
+ (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
+ $last--;
+ }
+ if ($last < 0) {
+ print STDERR "No nodes to print\n";
+ return 0;
+ }
+
+ if ($nodelimit > 0 || $edgelimit > 0) {
+ printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
+ Unparse($nodelimit), Units(),
+ Unparse($edgelimit), Units());
+ }
+
+ # Open DOT output file
+ my $output;
+ my $escaped_dot = ShellEscape(@DOT);
+ my $escaped_ps2pdf = ShellEscape(@PS2PDF);
+ if ($main::opt_gv) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
+ $output = "| $escaped_dot -Tps2 >$escaped_outfile";
+ } elsif ($main::opt_evince) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
+ } elsif ($main::opt_ps) {
+ $output = "| $escaped_dot -Tps2";
+ } elsif ($main::opt_pdf) {
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
+ } elsif ($main::opt_web || $main::opt_svg) {
+ # We need to post-process the SVG, so write to a temporary file always.
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
+ $output = "| $escaped_dot -Tsvg >$escaped_outfile";
+ } elsif ($main::opt_gif) {
+ $output = "| $escaped_dot -Tgif";
+ } else {
+ $output = ">&STDOUT";
+ }
+ open(DOT, $output) || error("$output: $!\n");
+
+ # Title
+ printf DOT ("digraph \"%s; %s %s\" {\n",
+ $prog,
+ Unparse($overall_total),
+ Units());
+ if ($main::opt_pdf) {
+ # The output is more printable if we set the page size for dot.
+ printf DOT ("size=\"8,11\"\n");
+ }
+ printf DOT ("node [width=0.375,height=0.25];\n");
+
+ # Print legend
+ printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
+ "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
+ $prog,
+ sprintf("Total %s: %s", Units(), Unparse($overall_total)),
+ sprintf("Focusing on: %s", Unparse($local_total)),
+ sprintf("Dropped nodes with <= %s abs(%s)",
+ Unparse($nodelimit), Units()),
+ sprintf("Dropped edges with <= %s %s",
+ Unparse($edgelimit), Units())
+ );
+
+ # Print nodes
+ my %node = ();
+ my $nextnode = 1;
+ foreach my $a (@list[0..$last]) {
+ # Pick font size
+ my $f = GetEntry($flat, $a);
+ my $c = GetEntry($cumulative, $a);
+
+ my $fs = 8;
+ if ($local_total > 0) {
+ $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
+ }
+
+ $node{$a} = $nextnode++;
+ my $sym = $a;
+ $sym =~ s/\s+/\\n/g;
+ $sym =~ s/::/\\n/g;
+
+ # Extra cumulative info to print for non-leaves
+ my $extra = "";
+ if ($f != $c) {
+ $extra = sprintf("\\rof %s (%s)",
+ Unparse($c),
+ Percent($c, $local_total));
+ }
+ my $style = "";
+ if ($main::opt_heapcheck) {
+ if ($f > 0) {
+ # make leak-causing nodes more visible (add a background)
+ $style = ",style=filled,fillcolor=gray"
+ } elsif ($f < 0) {
+ # make anti-leak-causing nodes (which almost never occur)
+ # stand out as well (triple border)
+ $style = ",peripheries=3"
+ }
+ }
+
+ printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
+ "\",shape=box,fontsize=%.1f%s];\n",
+ $node{$a},
+ $sym,
+ Unparse($f),
+ Percent($f, $local_total),
+ $extra,
+ $fs,
+ $style,
+ );
+ }
+
+ # Get edges and counts per edge
+ my %edge = ();
+ my $n;
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$raw})) {
+ # TODO: omit low %age edges
+ $n = $raw->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ for (my $i = 1; $i <= $#translated; $i++) {
+ my $src = $translated[$i];
+ my $dst = $translated[$i-1];
+ #next if ($src eq $dst); # Avoid self-edges?
+ if (exists($node{$src}) && exists($node{$dst})) {
+ my $edge_label = "$src\001$dst";
+ if (!exists($edge{$edge_label})) {
+ $edge{$edge_label} = 0;
+ }
+ $edge{$edge_label} += $n;
+ }
+ }
+ }
+
+ # Print edges (process in order of decreasing counts)
+ my %indegree = (); # Number of incoming edges added per node so far
+ my %outdegree = (); # Number of outgoing edges added per node so far
+ foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
+ my @x = split(/\001/, $e);
+ $n = $edge{$e};
+
+ # Initialize degree of kept incoming and outgoing edges if necessary
+ my $src = $x[0];
+ my $dst = $x[1];
+ if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
+ if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
+
+ my $keep;
+ if ($indegree{$dst} == 0) {
+ # Keep edge if needed for reachability
+ $keep = 1;
+ } elsif (abs($n) <= $edgelimit) {
+ # Drop if we are below --edgefraction
+ $keep = 0;
+ } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
+ $indegree{$dst} >= $main::opt_maxdegree) {
+ # Keep limited number of in/out edges per node
+ $keep = 0;
+ } else {
+ $keep = 1;
+ }
+
+ if ($keep) {
+ $outdegree{$src}++;
+ $indegree{$dst}++;
+
+ # Compute line width based on edge count
+ my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
+ if ($fraction > 1) { $fraction = 1; }
+ my $w = $fraction * 2;
+ if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
+ # SVG output treats line widths < 1 poorly.
+ $w = 1;
+ }
+
+ # Dot sometimes segfaults if given edge weights that are too large, so
+ # we cap the weights at a large value
+ my $edgeweight = abs($n) ** 0.7;
+ if ($edgeweight > 100000) { $edgeweight = 100000; }
+ $edgeweight = int($edgeweight);
+
+ my $style = sprintf("setlinewidth(%f)", $w);
+ if ($x[1] =~ m/\(inline\)/) {
+ $style .= ",dashed";
+ }
+
+ # Use a slightly squashed function of the edge count as the weight
+ printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
+ $node{$x[0]},
+ $node{$x[1]},
+ Unparse($n),
+ $edgeweight,
+ $style);
+ }
+ }
+
+ print DOT ("}\n");
+ close(DOT);
+
+ if ($main::opt_web || $main::opt_svg) {
+ # Rewrite SVG to be more usable inside web browser.
+ RewriteSvg(TempName($main::next_tmpfile, "svg"));
+ }
+
+ return 1;
+}
+
+sub RewriteSvg {
+ my $svgfile = shift;
+
+ open(SVG, $svgfile) || die "open temp svg: $!";
+ my @svg = <SVG>;
+ close(SVG);
+ unlink $svgfile;
+ my $svg = join('', @svg);
+
+ # Dot's SVG output is
+ #
+ # <svg width="___" height="___"
+ # viewBox="___" xmlns=...>
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </svg>
+ #
+ # Change it to
+ #
+ # <svg width="100%" height="100%"
+ # xmlns=...>
+ # $svg_javascript
+ # <g id="viewport" transform="translate(0,0)">
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </g>
+ # </svg>
+
+ # Fix width, height; drop viewBox.
+ $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
+
+ # Insert script, viewport <g> above first <g>
+ my $svg_javascript = SvgJavascript();
+ my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
+ $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
+
+ # Insert final </g> above </svg>.
+ $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
+ $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
+
+ if ($main::opt_svg) {
+ # --svg: write to standard output.
+ print $svg;
+ } else {
+ # Write back to temporary file.
+ open(SVG, ">$svgfile") || die "open $svgfile: $!";
+ print SVG $svg;
+ close(SVG);
+ }
+}
+
+sub SvgJavascript {
+ return <<'EOF';
+<script type="text/ecmascript"><![CDATA[
+// SVGPan
+// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
+// Local modification: if(true || ...) below to force panning, never moving.
+
+/**
+ * SVGPan library 1.2
+ * ====================
+ *
+ * Given an unique existing element with id "viewport", including the
+ * the library into any SVG adds the following capabilities:
+ *
+ * - Mouse panning
+ * - Mouse zooming (using the wheel)
+ * - Object dargging
+ *
+ * Known issues:
+ *
+ * - Zooming (while panning) on Safari has still some issues
+ *
+ * Releases:
+ *
+ * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
+ * Fixed a bug with browser mouse handler interaction
+ *
+ * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui
+ * Updated the zoom code to support the mouse wheel on Safari/Chrome
+ *
+ * 1.0, Andrea Leofreddi
+ * First release
+ *
+ * This code is licensed under the following BSD license:
+ *
+ * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are those of the
+ * authors and should not be interpreted as representing official policies, either expressed
+ * or implied, of Andrea Leofreddi.
+ */
+
+var root = document.documentElement;
+
+var state = 'none', stateTarget, stateOrigin, stateTf;
+
+setupHandlers(root);
+
+/**
+ * Register handlers
+ */
+function setupHandlers(root){
+ setAttributes(root, {
+ "onmouseup" : "add(evt)",
+ "onmousedown" : "handleMouseDown(evt)",
+ "onmousemove" : "handleMouseMove(evt)",
+ "onmouseup" : "handleMouseUp(evt)",
+ //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
+ });
+
+ if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
+ window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
+ else
+ window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
+
+ var g = svgDoc.getElementById("svg");
+ g.width = "100%";
+ g.height = "100%";
+}
+
+/**
+ * Instance an SVGPoint object with given event coordinates.
+ */
+function getEventPoint(evt) {
+ var p = root.createSVGPoint();
+
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+
+ return p;
+}
+
+/**
+ * Sets the current transform matrix of an element.
+ */
+function setCTM(element, matrix) {
+ var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
+
+ element.setAttribute("transform", s);
+}
+
+/**
+ * Dumps a matrix to a string (useful for debug).
+ */
+function dumpMatrix(matrix) {
+ var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]";
+
+ return s;
+}
+
+/**
+ * Sets attributes of an element.
+ */
+function setAttributes(element, attributes){
+ for (i in attributes)
+ element.setAttributeNS(null, i, attributes[i]);
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseWheel(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var delta;
+
+ if(evt.wheelDelta)
+ delta = evt.wheelDelta / 3600; // Chrome/Safari
+ else
+ delta = evt.detail / -90; // Mozilla
+
+ var z = 1 + delta; // Zoom factor: 0.9/1.1
+
+ var g = svgDoc.getElementById("viewport");
+
+ var p = getEventPoint(evt);
+
+ p = p.matrixTransform(g.getCTM().inverse());
+
+ // Compute new scale matrix in current mouse position
+ var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
+
+ setCTM(g, g.getCTM().multiply(k));
+
+ stateTf = stateTf.multiply(k.inverse());
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseMove(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(state == 'pan') {
+ // Pan mode
+ var p = getEventPoint(evt).matrixTransform(stateTf);
+
+ setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
+ } else if(state == 'move') {
+ // Move mode
+ var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
+
+ setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
+
+ stateOrigin = p;
+ }
+}
+
+/**
+ * Handle click event.
+ */
+function handleMouseDown(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(true || evt.target.tagName == "svg") {
+ // Pan mode
+ state = 'pan';
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ } else {
+ // Move mode
+ state = 'move';
+
+ stateTarget = evt.target;
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ }
+}
+
+/**
+ * Handle mouse button release event.
+ */
+function handleMouseUp(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ if(state == 'pan' || state == 'move') {
+ // Quit pan mode
+ state = '';
+ }
+}
+
+]]></script>
+EOF
+}
+
+# Provides a map from fullname to shortname for cases where the
+# shortname is ambiguous. The symlist has both the fullname and
+# shortname for all symbols, which is usually fine, but sometimes --
+# such as overloaded functions -- two different fullnames can map to
+# the same shortname. In that case, we use the address of the
+# function to disambiguate the two. This function fills in a map that
+# maps fullnames to modified shortnames in such cases. If a fullname
+# is not present in the map, the 'normal' shortname provided by the
+# symlist is the appropriate one to use.
+sub FillFullnameToShortnameMap {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $shortnames_seen_once = {};
+ my $shortnames_seen_more_than_once = {};
+
+ foreach my $symlist (values(%{$symbols})) {
+ # TODO(csilvers): deal with inlined symbols too.
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address
+ next; # the only collisions we care about are when addresses differ
+ }
+ if (defined($shortnames_seen_once->{$shortname}) &&
+ $shortnames_seen_once->{$shortname} ne $fullname) {
+ $shortnames_seen_more_than_once->{$shortname} = 1;
+ } else {
+ $shortnames_seen_once->{$shortname} = $fullname;
+ }
+ }
+
+ foreach my $symlist (values(%{$symbols})) {
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ # TODO(csilvers): take in a list of addresses we care about, and only
+ # store in the map if $symlist->[1] is in that list. Saves space.
+ next if defined($fullname_to_shortname_map->{$fullname});
+ if (defined($shortnames_seen_more_than_once->{$shortname})) {
+ if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it
+ $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
+ }
+ }
+ }
+}
+
+# Return a small number that identifies the argument.
+# Multiple calls with the same argument will return the same number.
+# Calls with different arguments will return different numbers.
+sub ShortIdFor {
+ my $key = shift;
+ my $id = $main::uniqueid{$key};
+ if (!defined($id)) {
+ $id = keys(%main::uniqueid) + 1;
+ $main::uniqueid{$key} = $id;
+ }
+ return $id;
+}
+
+# Translate a stack of addresses into a stack of symbols
+sub TranslateStack {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $k = shift;
+
+ my @addrs = split(/\n/, $k);
+ my @result = ();
+ for (my $i = 0; $i <= $#addrs; $i++) {
+ my $a = $addrs[$i];
+
+ # Skip large addresses since they sometimes show up as fake entries on RH9
+ if (length($a) > 8 && $a gt "7fffffffffffffff") {
+ next;
+ }
+
+ if ($main::opt_disasm || $main::opt_list) {
+ # We want just the address for the key
+ push(@result, $a);
+ next;
+ }
+
+ my $symlist = $symbols->{$a};
+ if (!defined($symlist)) {
+ $symlist = [$a, "", $a];
+ }
+
+ # We can have a sequence of symbols for a particular entry
+ # (more than one symbol in the case of inlining). Callers
+ # come before callees in symlist, so walk backwards since
+ # the translated stack should contain callees before callers.
+ for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
+ my $func = $symlist->[$j-2];
+ my $fileline = $symlist->[$j-1];
+ my $fullfunc = $symlist->[$j];
+ if (defined($fullname_to_shortname_map->{$fullfunc})) {
+ $func = $fullname_to_shortname_map->{$fullfunc};
+ }
+ if ($j > 2) {
+ $func = "$func (inline)";
+ }
+
+ # Do not merge nodes corresponding to Callback::Run since that
+ # causes confusing cycles in dot display. Instead, we synthesize
+ # a unique name for this frame per caller.
+ if ($func =~ m/Callback.*::Run$/) {
+ my $caller = ($i > 0) ? $addrs[$i-1] : 0;
+ $func = "Run#" . ShortIdFor($caller);
+ }
+
+ if ($main::opt_addresses) {
+ push(@result, "$a $func $fileline");
+ } elsif ($main::opt_lines) {
+ if ($func eq '??' && $fileline eq '??:0') {
+ push(@result, "$a");
+ } else {
+ push(@result, "$func $fileline");
+ }
+ } elsif ($main::opt_functions) {
+ if ($func eq '??') {
+ push(@result, "$a");
+ } else {
+ push(@result, $func);
+ }
+ } elsif ($main::opt_files) {
+ if ($fileline eq '??:0' || $fileline eq '') {
+ push(@result, "$a");
+ } else {
+ my $f = $fileline;
+ $f =~ s/:\d+$//;
+ push(@result, $f);
+ }
+ } else {
+ push(@result, $a);
+ last; # Do not print inlined info
+ }
+ }
+ }
+
+ # print join(",", @addrs), " => ", join(",", @result), "\n";
+ return @result;
+}
+
+# Generate percent string for a number and a total
+sub Percent {
+ my $num = shift;
+ my $tot = shift;
+ if ($tot != 0) {
+ return sprintf("%.1f%%", $num * 100.0 / $tot);
+ } else {
+ return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
+ }
+}
+
+# Generate pretty-printed form of number
+sub Unparse {
+ my $num = shift;
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return sprintf("%d", $num);
+ } else {
+ if ($main::opt_show_bytes) {
+ return sprintf("%d", $num);
+ } else {
+ return sprintf("%.1f", $num / 1048576.0);
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
+ } else {
+ return sprintf("%d", $num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to "."
+sub UnparseAlt {
+ my $num = shift;
+ if ($num == 0) {
+ return ".";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to ""
+sub HtmlPrintNumber {
+ my $num = shift;
+ if ($num == 0) {
+ return "";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Return output units
+sub Units {
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return "objects";
+ } else {
+ if ($main::opt_show_bytes) {
+ return "B";
+ } else {
+ return "MB";
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return "seconds";
+ } else {
+ return "samples";
+ }
+}
+
+##### Profile manipulation code #####
+
+# Generate flattened profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a]
+sub FlatProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ AddEntry($result, $addrs[0], $count);
+ }
+ }
+ return $result;
+}
+
+# Generate cumulative profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a], [b], [c], [d]
+sub CumulativeProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ AddEntry($result, $a, $count);
+ }
+ }
+ return $result;
+}
+
+# If the second-youngest PC on the stack is always the same, returns
+# that pc. Otherwise, returns undef.
+sub IsSecondPcAlwaysTheSame {
+ my $profile = shift;
+
+ my $second_pc = undef;
+ foreach my $k (keys(%{$profile})) {
+ my @addrs = split(/\n/, $k);
+ if ($#addrs < 1) {
+ return undef;
+ }
+ if (not defined $second_pc) {
+ $second_pc = $addrs[1];
+ } else {
+ if ($second_pc ne $addrs[1]) {
+ return undef;
+ }
+ }
+ }
+ return $second_pc;
+}
+
+sub ExtractSymbolLocation {
+ my $symbols = shift;
+ my $address = shift;
+ # 'addr2line' outputs "??:0" for unknown locations; we do the
+ # same to be consistent.
+ my $location = "??:0:unknown";
+ if (exists $symbols->{$address}) {
+ my $file = $symbols->{$address}->[1];
+ if ($file eq "?") {
+ $file = "??:0"
+ }
+ $location = $file . ":" . $symbols->{$address}->[0];
+ }
+ return $location;
+}
+
+# Extracts a graph of calls.
+sub ExtractCalls {
+ my $symbols = shift;
+ my $profile = shift;
+
+ my $calls = {};
+ while( my ($stack_trace, $count) = each %$profile ) {
+ my @address = split(/\n/, $stack_trace);
+ my $destination = ExtractSymbolLocation($symbols, $address[0]);
+ AddEntry($calls, $destination, $count);
+ for (my $i = 1; $i <= $#address; $i++) {
+ my $source = ExtractSymbolLocation($symbols, $address[$i]);
+ my $call = "$source -> $destination";
+ AddEntry($calls, $call, $count);
+ $destination = $source;
+ }
+ }
+
+ return $calls;
+}
+
+sub FilterFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
+ return $profile;
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ my $sym;
+ if (exists($symbols->{$a})) {
+ $sym = $symbols->{$a}->[0];
+ } else {
+ $sym = $a;
+ }
+ if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
+ next;
+ }
+ if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
+ next;
+ }
+ push(@path, $a);
+ }
+ if (scalar(@path) > 0) {
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ }
+
+ return $result;
+}
+
+sub RemoveUninterestingFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ # List of function names to skip
+ my %skip = ();
+ my $skip_regexp = 'NOMATCH';
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ foreach my $name ('calloc',
+ 'cfree',
+ 'malloc',
+ 'free',
+ 'memalign',
+ 'posix_memalign',
+ 'aligned_alloc',
+ 'pvalloc',
+ 'valloc',
+ 'realloc',
+ 'mallocx', # jemalloc
+ 'rallocx', # jemalloc
+ 'xallocx', # jemalloc
+ 'dallocx', # jemalloc
+ 'sdallocx', # jemalloc
+ 'tc_calloc',
+ 'tc_cfree',
+ 'tc_malloc',
+ 'tc_free',
+ 'tc_memalign',
+ 'tc_posix_memalign',
+ 'tc_pvalloc',
+ 'tc_valloc',
+ 'tc_realloc',
+ 'tc_new',
+ 'tc_delete',
+ 'tc_newarray',
+ 'tc_deletearray',
+ 'tc_new_nothrow',
+ 'tc_newarray_nothrow',
+ 'do_malloc',
+ '::do_malloc', # new name -- got moved to an unnamed ns
+ '::do_malloc_or_cpp_alloc',
+ 'DoSampledAllocation',
+ 'simple_alloc::allocate',
+ '__malloc_alloc_template::allocate',
+ '__builtin_delete',
+ '__builtin_new',
+ '__builtin_vec_delete',
+ '__builtin_vec_new',
+ 'operator new',
+ 'operator new[]',
+ # The entry to our memory-allocation routines on OS X
+ 'malloc_zone_malloc',
+ 'malloc_zone_calloc',
+ 'malloc_zone_valloc',
+ 'malloc_zone_realloc',
+ 'malloc_zone_memalign',
+ 'malloc_zone_free',
+ # These mark the beginning/end of our custom sections
+ '__start_google_malloc',
+ '__stop_google_malloc',
+ '__start_malloc_hook',
+ '__stop_malloc_hook') {
+ $skip{$name} = 1;
+ $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything
+ }
+ # TODO: Remove TCMalloc once everything has been
+ # moved into the tcmalloc:: namespace and we have flushed
+ # old code out of the system.
+ $skip_regexp = "TCMalloc|^tcmalloc::";
+ } elsif ($main::profile_type eq 'contention') {
+ foreach my $vname ('base::RecordLockProfileData',
+ 'base::SubmitMutexProfileData',
+ 'base::SubmitSpinLockProfileData',
+ 'Mutex::Unlock',
+ 'Mutex::UnlockSlow',
+ 'Mutex::ReaderUnlock',
+ 'MutexLock::~MutexLock',
+ 'SpinLock::Unlock',
+ 'SpinLock::SlowUnlock',
+ 'SpinLockHolder::~SpinLockHolder') {
+ $skip{$vname} = 1;
+ }
+ } elsif ($main::profile_type eq 'cpu') {
+ # Drop signal handlers used for CPU profile collection
+ # TODO(dpeng): this should not be necessary; it's taken
+ # care of by the general 2nd-pc mechanism below.
+ foreach my $name ('ProfileData::Add', # historical
+ 'ProfileData::prof_handler', # historical
+ 'CpuProfiler::prof_handler',
+ '__FRAME_END__',
+ '__pthread_sighandler',
+ '__restore') {
+ $skip{$name} = 1;
+ }
+ } else {
+ # Nothing skipped for unknown types
+ }
+
+ if ($main::profile_type eq 'cpu') {
+ # If all the second-youngest program counters are the same,
+ # this STRONGLY suggests that it is an artifact of measurement,
+ # i.e., stack frames pushed by the CPU profiler signal handler.
+ # Hence, we delete them.
+ # (The topmost PC is read from the signal structure, not from
+ # the stack, so it does not get involved.)
+ while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
+ my $result = {};
+ my $func = '';
+ if (exists($symbols->{$second_pc})) {
+ $second_pc = $symbols->{$second_pc}->[0];
+ }
+ print STDERR "Removing $second_pc from all stack traces.\n";
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ splice @addrs, 1, 1;
+ my $reduced_path = join("\n", @addrs);
+ AddEntry($result, $reduced_path, $count);
+ }
+ $profile = $result;
+ }
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ if (exists($symbols->{$a})) {
+ my $func = $symbols->{$a}->[0];
+ if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
+ # Throw away the portion of the backtrace seen so far, under the
+ # assumption that previous frames were for functions internal to the
+ # allocator.
+ @path = ();
+ next;
+ }
+ }
+ push(@path, $a);
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+
+ $result = FilterFrames($symbols, $result);
+
+ return $result;
+}
+
+# Reduce profile to granularity given by user
+sub ReduceProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $result = {};
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ my @path = ();
+ my %seen = ();
+ $seen{''} = 1; # So that empty keys are skipped
+ foreach my $e (@translated) {
+ # To avoid double-counting due to recursion, skip a stack-trace
+ # entry if it has already been seen
+ if (!$seen{$e}) {
+ $seen{$e} = 1;
+ push(@path, $e);
+ }
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ return $result;
+}
+
+# Does the specified symbol array match the regexp?
+sub SymbolMatches {
+ my $sym = shift;
+ my $re = shift;
+ if (defined($sym)) {
+ for (my $i = 0; $i < $#{$sym}; $i += 3) {
+ if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+# Focus only on paths involving specified regexps
+sub FocusProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $focus = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
+ AddEntry($result, $k, $count);
+ last;
+ }
+ }
+ }
+ return $result;
+}
+
+# Focus only on paths not involving specified regexps
+sub IgnoreProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $ignore = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my $matched = 0;
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
+ $matched = 1;
+ last;
+ }
+ }
+ if (!$matched) {
+ AddEntry($result, $k, $count);
+ }
+ }
+ return $result;
+}
+
+# Get total count in profile
+sub TotalProfile {
+ my $profile = shift;
+ my $result = 0;
+ foreach my $k (keys(%{$profile})) {
+ $result += $profile->{$k};
+ }
+ return $result;
+}
+
+# Add A to B
+sub AddProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k};
+ AddEntry($R, $k, $v);
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ my $v = $B->{$k};
+ AddEntry($R, $k, $v);
+ }
+ return $R;
+}
+
+# Merges symbol maps
+sub MergeSymbols {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = $A->{$k};
+ }
+ if (defined($B)) {
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = $B->{$k};
+ }
+ }
+ return $R;
+}
+
+
+# Add A to B
+sub AddPcs {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = 1
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = 1
+ }
+ return $R;
+}
+
+# Subtract B from A
+sub SubtractProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k} - GetEntry($B, $k);
+ if ($v < 0 && $main::opt_drop_negative) {
+ $v = 0;
+ }
+ AddEntry($R, $k, $v);
+ }
+ if (!$main::opt_drop_negative) {
+ # Take care of when subtracted profile has more entries
+ foreach my $k (keys(%{$B})) {
+ if (!exists($A->{$k})) {
+ AddEntry($R, $k, 0 - $B->{$k});
+ }
+ }
+ }
+ return $R;
+}
+
+# Get entry from profile; zero if not present
+sub GetEntry {
+ my $profile = shift;
+ my $k = shift;
+ if (exists($profile->{$k})) {
+ return $profile->{$k};
+ } else {
+ return 0;
+ }
+}
+
+# Add entry to specified profile
+sub AddEntry {
+ my $profile = shift;
+ my $k = shift;
+ my $n = shift;
+ if (!exists($profile->{$k})) {
+ $profile->{$k} = 0;
+ }
+ $profile->{$k} += $n;
+}
+
+# Add a stack of entries to specified profile, and add them to the $pcs
+# list.
+sub AddEntries {
+ my $profile = shift;
+ my $pcs = shift;
+ my $stack = shift;
+ my $count = shift;
+ my @k = ();
+
+ foreach my $e (split(/\s+/, $stack)) {
+ my $pc = HexExtend($e);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+ AddEntry($profile, (join "\n", @k), $count);
+}
+
+##### Code to profile a server dynamically #####
+
+sub CheckSymbolPage {
+ my $url = SymbolPageURL();
+ my $command = ShellEscape(@URL_FETCHER, $url);
+ open(SYMBOL, "$command |") or error($command);
+ my $line = <SYMBOL>;
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(SYMBOL);
+ unless (defined($line)) {
+ error("$url doesn't exist\n");
+ }
+
+ if ($line =~ /^num_symbols:\s+(\d+)$/) {
+ if ($1 == 0) {
+ error("Stripped binary. No symbols available.\n");
+ }
+ } else {
+ error("Failed to get the number of symbols from $url\n");
+ }
+}
+
+sub IsProfileURL {
+ my $profile_name = shift;
+ if (-f $profile_name) {
+ printf STDERR "Using local file $profile_name.\n";
+ return 0;
+ }
+ return 1;
+}
+
+sub ParseProfileURL {
+ my $profile_name = shift;
+
+ if (!defined($profile_name) || $profile_name eq "") {
+ return ();
+ }
+
+ # Split profile URL - matches all non-empty strings, so no test.
+ $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
+
+ my $proto = $1 || "http://";
+ my $hostport = $2;
+ my $prefix = $3;
+ my $profile = $4 || "/";
+
+ my $host = $hostport;
+ $host =~ s/:.*//;
+
+ my $baseurl = "$proto$hostport$prefix";
+ return ($host, $baseurl, $profile);
+}
+
+# We fetch symbols from the first profile argument.
+sub SymbolPageURL {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ return "$baseURL$SYMBOL_PAGE";
+}
+
+sub FetchProgramName() {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ my $url = "$baseURL$PROGRAM_NAME_PAGE";
+ my $command_line = ShellEscape(@URL_FETCHER, $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ my $cmdline = <CMDLINE>;
+ $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(CMDLINE);
+ error("Failed to get program name from $url\n") unless defined($cmdline);
+ $cmdline =~ s/\x00.+//; # Remove argv[1] and latters.
+ $cmdline =~ s!\n!!g; # Remove LFs.
+ return $cmdline;
+}
+
+# Gee, curl's -L (--location) option isn't reliable at least
+# with its 7.12.3 version. Curl will forget to post data if
+# there is a redirection. This function is a workaround for
+# curl. Redirection happens on borg hosts.
+sub ResolveRedirectionForCurl {
+ my $url = shift;
+ my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ while (<CMDLINE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^Location: (.*)/) {
+ $url = $1;
+ }
+ }
+ close(CMDLINE);
+ return $url;
+}
+
+# Add a timeout flat to URL_FETCHER. Returns a new list.
+sub AddFetchTimeout {
+ my $timeout = shift;
+ my @fetcher = @_;
+ if (defined($timeout)) {
+ if (join(" ", @fetcher) =~ m/\bcurl -s/) {
+ push(@fetcher, "--max-time", sprintf("%d", $timeout));
+ } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
+ push(@fetcher, sprintf("--deadline=%d", $timeout));
+ }
+ }
+ return @fetcher;
+}
+
+# Reads a symbol map from the file handle name given as $1, returning
+# the resulting symbol map. Also processes variables relating to symbols.
+# Currently, the only variable processed is 'binary=<value>' which updates
+# $main::prog to have the correct program name.
+sub ReadSymbols {
+ my $in = shift;
+ my $map = {};
+ while (<$in>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Removes all the leading zeroes from the symbols, see comment below.
+ if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
+ $map->{$1} = $2;
+ } elsif (m/^---/) {
+ last;
+ } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1, $2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "binary") {
+ if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
+ printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
+ $main::prog, $value);
+ }
+ $main::prog = $value;
+ } else {
+ printf STDERR ("Ignoring unknown variable in symbols list: " .
+ "'%s' = '%s'\n", $variable, $value);
+ }
+ }
+ }
+ return $map;
+}
+
+sub URLEncode {
+ my $str = shift;
+ $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
+ return $str;
+}
+
+sub AppendSymbolFilterParams {
+ my $url = shift;
+ my @params = ();
+ if ($main::opt_retain ne '') {
+ push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
+ }
+ if ($main::opt_exclude ne '') {
+ push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
+ }
+ if (scalar @params > 0) {
+ $url = sprintf("%s?%s", $url, join("&", @params));
+ }
+ return $url;
+}
+
+# Fetches and processes symbols to prepare them for use in the profile output
+# code. If the optional 'symbol_map' arg is not given, fetches symbols from
+# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols
+# are assumed to have already been fetched into 'symbol_map' and are simply
+# extracted and processed.
+sub FetchSymbols {
+ my $pcset = shift;
+ my $symbol_map = shift;
+
+ my %seen = ();
+ my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq
+
+ if (!defined($symbol_map)) {
+ my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
+
+ open(POSTFILE, ">$main::tmpfile_sym");
+ print POSTFILE $post_data;
+ close(POSTFILE);
+
+ my $url = SymbolPageURL();
+
+ my $command_line;
+ if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
+ $url = ResolveRedirectionForCurl($url);
+ $url = AppendSymbolFilterParams($url);
+ $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
+ $url);
+ } else {
+ $url = AppendSymbolFilterParams($url);
+ $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
+ . " < " . ShellEscape($main::tmpfile_sym));
+ }
+ # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
+ my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
+ open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
+ $symbol_map = ReadSymbols(*SYMBOL{IO});
+ close(SYMBOL);
+ }
+
+ my $symbols = {};
+ foreach my $pc (@pcs) {
+ my $fullname;
+ # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
+ # Then /symbol reads the long symbols in as uint64, and outputs
+ # the result with a "0x%08llx" format which get rid of the zeroes.
+ # By removing all the leading zeroes in both $pc and the symbols from
+ # /symbol, the symbols match and are retrievable from the map.
+ my $shortpc = $pc;
+ $shortpc =~ s/^0*//;
+ # Each line may have a list of names, which includes the function
+ # and also other functions it has inlined. They are separated (in
+ # PrintSymbolizedProfile), by --, which is illegal in function names.
+ my $fullnames;
+ if (defined($symbol_map->{$shortpc})) {
+ $fullnames = $symbol_map->{$shortpc};
+ } else {
+ $fullnames = "0x" . $pc; # Just use addresses
+ }
+ my $sym = [];
+ $symbols->{$pc} = $sym;
+ foreach my $fullname (split("--", $fullnames)) {
+ my $name = ShortFunctionName($fullname);
+ push(@{$sym}, $name, "?", $fullname);
+ }
+ }
+ return $symbols;
+}
+
+sub BaseName {
+ my $file_name = shift;
+ $file_name =~ s!^.*/!!; # Remove directory name
+ return $file_name;
+}
+
+sub MakeProfileBaseName {
+ my ($binary_name, $profile_name) = @_;
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ my $binary_shortname = BaseName($binary_name);
+ return sprintf("%s.%s.%s",
+ $binary_shortname, $main::op_time, $host);
+}
+
+sub FetchDynamicProfile {
+ my $binary_name = shift;
+ my $profile_name = shift;
+ my $fetch_name_only = shift;
+ my $encourage_patience = shift;
+
+ if (!IsProfileURL($profile_name)) {
+ return $profile_name;
+ } else {
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ if ($path eq "" || $path eq "/") {
+ # Missing type specifier defaults to cpu-profile
+ $path = $PROFILE_PAGE;
+ }
+
+ my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
+
+ my $url = "$baseURL$path";
+ my $fetch_timeout = undef;
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
+ if ($path =~ m/[?]/) {
+ $url .= "&";
+ } else {
+ $url .= "?";
+ }
+ $url .= sprintf("seconds=%d", $main::opt_seconds);
+ $fetch_timeout = $main::opt_seconds * 1.01 + 60;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ $main::profile_type = 'cpu';
+ } else {
+ # For non-CPU profiles, we add a type-extension to
+ # the target profile file name.
+ my $suffix = $path;
+ $suffix =~ s,/,.,g;
+ $profile_file .= $suffix;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ if ($path =~ m/$HEAP_PAGE/) {
+ $main::profile_type = 'heap';
+ } elsif ($path =~ m/$GROWTH_PAGE/) {
+ $main::profile_type = 'growth';
+ } elsif ($path =~ m/$CONTENTION_PAGE/) {
+ $main::profile_type = 'contention';
+ }
+ }
+
+ my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
+ if (! -d $profile_dir) {
+ mkdir($profile_dir)
+ || die("Unable to create profile directory $profile_dir: $!\n");
+ }
+ my $tmp_profile = "$profile_dir/.tmp.$profile_file";
+ my $real_profile = "$profile_dir/$profile_file";
+
+ if ($fetch_name_only > 0) {
+ return $real_profile;
+ }
+
+ my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
+ my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
+ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n";
+ if ($encourage_patience) {
+ print STDERR "Be patient...\n";
+ }
+ } else {
+ print STDERR "Fetching $path profile from $url to\n ${real_profile}\n";
+ }
+
+ (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
+ (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
+ print STDERR "Wrote profile to $real_profile\n";
+ $main::collected_profile = $real_profile;
+ return $main::collected_profile;
+ }
+}
+
+# Collect profiles in parallel
+sub FetchDynamicProfiles {
+ my $items = scalar(@main::pfile_args);
+ my $levels = log($items) / log(2);
+
+ if ($items == 1) {
+ $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
+ } else {
+ # math rounding issues
+ if ((2 ** $levels) < $items) {
+ $levels++;
+ }
+ my $count = scalar(@main::pfile_args);
+ for (my $i = 0; $i < $count; $i++) {
+ $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
+ }
+ print STDERR "Fetching $count profiles, Be patient...\n";
+ FetchDynamicProfilesRecurse($levels, 0, 0);
+ $main::collected_profile = join(" \\\n ", @main::profile_files);
+ }
+}
+
+# Recursively fork a process to get enough processes
+# collecting profiles
+sub FetchDynamicProfilesRecurse {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if (my $pid = fork()) {
+ $position = 0 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ wait;
+ } else {
+ $position = 1 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ cleanup();
+ exit(0);
+ }
+}
+
+# Collect a single profile
+sub TryCollectProfile {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if ($level >= ($maxlevel - 1)) {
+ if ($position < scalar(@main::pfile_args)) {
+ FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
+ }
+ } else {
+ FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
+ }
+}
+
+##### Parsing code #####
+
+# Provide a small streaming-read module to handle very large
+# cpu-profile files. Stream in chunks along a sliding window.
+# Provides an interface to get one 'slot', correctly handling
+# endian-ness differences. A slot is one 32-bit or 64-bit word
+# (depending on the input profile). We tell endianness and bit-size
+# for the profile by looking at the first 8 bytes: in cpu profiles,
+# the second slot is always 3 (we'll accept anything that's not 0).
+BEGIN {
+ package CpuProfileStream;
+
+ sub new {
+ my ($class, $file, $fname) = @_;
+ my $self = { file => $file,
+ base => 0,
+ stride => 512 * 1024, # must be a multiple of bitsize/8
+ slots => [],
+ unpack_code => "", # N for big-endian, V for little
+ perl_is_64bit => 1, # matters if profile is 64-bit
+ };
+ bless $self, $class;
+ # Let unittests adjust the stride
+ if ($main::opt_test_stride > 0) {
+ $self->{stride} = $main::opt_test_stride;
+ }
+ # Read the first two slots to figure out bitsize and endianness.
+ my $slots = $self->{slots};
+ my $str;
+ read($self->{file}, $str, 8);
+ # Set the global $address_length based on what we see here.
+ # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
+ $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
+ if ($address_length == 8) {
+ if (substr($str, 6, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 4, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**16\n");
+ }
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # If we're a 64-bit profile, check if we're a 64-bit-capable
+ # perl. Otherwise, each slot will be represented as a float
+ # instead of an int64, losing precision and making all the
+ # 64-bit addresses wrong. We won't complain yet, but will
+ # later if we ever see a value that doesn't fit in 32 bits.
+ my $has_q = 0;
+ eval { $has_q = pack("Q", "1") ? 1 : 1; };
+ if (!$has_q) {
+ $self->{perl_is_64bit} = 0;
+ }
+ read($self->{file}, $str, 8);
+ if (substr($str, 4, 4) eq chr(0)x4) {
+ # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 0, 4) eq chr(0)x4) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**32\n");
+ }
+ my @pair = unpack($self->{unpack_code} . "*", $str);
+ # Since we know one of the pair is 0, it's fine to just add them.
+ @$slots = (0, $pair[0] + $pair[1]);
+ }
+ return $self;
+ }
+
+ # Load more data when we access slots->get(X) which is not yet in memory.
+ sub overflow {
+ my ($self) = @_;
+ my $slots = $self->{slots};
+ $self->{base} += $#$slots + 1; # skip over data we're replacing
+ my $str;
+ read($self->{file}, $str, $self->{stride});
+ if ($address_length == 8) { # the 32-bit case
+ # This is the easy case: unpack provides 32-bit unpacking primitives.
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # We need to unpack 32 bits at a time and combine.
+ my @b32_values = unpack($self->{unpack_code} . "*", $str);
+ my @b64_values = ();
+ for (my $i = 0; $i < $#b32_values; $i += 2) {
+ # TODO(csilvers): if this is a 32-bit perl, the math below
+ # could end up in a too-large int, which perl will promote
+ # to a double, losing necessary precision. Deal with that.
+ # Right now, we just die.
+ my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
+ if ($self->{unpack_code} eq 'N') { # big-endian
+ ($lo, $hi) = ($hi, $lo);
+ }
+ my $value = $lo + $hi * (2**32);
+ if (!$self->{perl_is_64bit} && # check value is exactly represented
+ (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
+ ::error("Need a 64-bit perl to process this 64-bit profile.\n");
+ }
+ push(@b64_values, $value);
+ }
+ @$slots = @b64_values;
+ }
+ }
+
+ # Access the i-th long in the file (logically), or -1 at EOF.
+ sub get {
+ my ($self, $idx) = @_;
+ my $slots = $self->{slots};
+ while ($#$slots >= 0) {
+ if ($idx < $self->{base}) {
+ # The only time we expect a reference to $slots[$i - something]
+ # after referencing $slots[$i] is reading the very first header.
+ # Since $stride > |header|, that shouldn't cause any lookback
+ # errors. And everything after the header is sequential.
+ print STDERR "Unexpected look-back reading CPU profile";
+ return -1; # shrug, don't know what better to return
+ } elsif ($idx > $self->{base} + $#$slots) {
+ $self->overflow();
+ } else {
+ return $slots->[$idx - $self->{base}];
+ }
+ }
+ # If we get here, $slots is [], which means we've reached EOF
+ return -1; # unique since slots is supposed to hold unsigned numbers
+ }
+}
+
+# Reads the top, 'header' section of a profile, and returns the last
+# line of the header, commonly called a 'header line'. The header
+# section of a profile consists of zero or more 'command' lines that
+# are instructions to jeprof, which jeprof executes when reading the
+# header. All 'command' lines start with a %. After the command
+# lines is the 'header line', which is a profile-specific line that
+# indicates what type of profile it is, and perhaps other global
+# information about the profile. For instance, here's a header line
+# for a heap profile:
+# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile
+# For historical reasons, the CPU profile does not contain a text-
+# readable header line. If the profile looks like a CPU profile,
+# this function returns "". If no header line could be found, this
+# function returns undef.
+#
+# The following commands are recognized:
+# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
+#
+# The input file should be in binmode.
+sub ReadProfileHeader {
+ local *PROFILE = shift;
+ my $firstchar = "";
+ my $line = "";
+ read(PROFILE, $firstchar, 1);
+ seek(PROFILE, -1, 1); # unread the firstchar
+ if ($firstchar !~ /[[:print:]]/) { # is not a text character
+ return "";
+ }
+ while (defined($line = <PROFILE>)) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /^%warn\s+(.*)/) { # 'warn' command
+ # Note this matches both '%warn blah\n' and '%warn\n'.
+ print STDERR "WARNING: $1\n"; # print the rest of the line
+ } elsif ($line =~ /^%/) {
+ print STDERR "Ignoring unknown command from profile header: $line";
+ } else {
+ # End of commands, must be the header line.
+ return $line;
+ }
+ }
+ return undef; # got to EOF without seeing a header line
+}
+
+sub IsSymbolizedProfileFile {
+ my $file_name = shift;
+ if (!(-e $file_name) || !(-r $file_name)) {
+ return 0;
+ }
+ # Check if the file contains a symbol-section marker.
+ open(TFILE, "<$file_name");
+ binmode TFILE;
+ my $firstline = ReadProfileHeader(*TFILE);
+ close(TFILE);
+ if (!$firstline) {
+ return 0;
+ }
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ return $firstline =~ /^--- *$symbol_marker/;
+}
+
+# Parse profile generated by common/profiler.cc and return a reference
+# to a map:
+# $result->{version} Version number of profile file
+# $result->{period} Sampling period (in microseconds)
+# $result->{profile} Profile object
+# $result->{threads} Map of thread IDs to profile objects
+# $result->{map} Memory map info from profile
+# $result->{pcs} Hash of all PC values seen, key is hex address
+sub ReadProfile {
+ my $prog = shift;
+ my $fname = shift;
+ my $result; # return value
+
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $contention_marker = $&;
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $growth_marker = $&;
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $profile_marker = $&;
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $heap_marker = $&;
+
+ # Look at first line to see if it is a heap or a CPU profile.
+ # CPU profile may start with no header at all, and just binary data
+ # (starting with \0\0\0\0) -- in that case, don't try to read the
+ # whole firstline, since it may be gigabytes(!) of data.
+ open(PROFILE, "<$fname") || error("$fname: $!\n");
+ binmode PROFILE; # New perls do UTF-8 processing
+ my $header = ReadProfileHeader(*PROFILE);
+ if (!defined($header)) { # means "at EOF"
+ error("Profile is empty.\n");
+ }
+
+ my $symbols;
+ if ($header =~ m/^--- *$symbol_marker/o) {
+ # Verify that the user asked for a symbolized profile
+ if (!$main::use_symbolized_profile) {
+ # we have both a binary and symbolized profiles, abort
+ error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " .
+ "a binary arg. Try again without passing\n $prog\n");
+ }
+ # Read the symbol section of the symbolized profile file.
+ $symbols = ReadSymbols(*PROFILE{IO});
+ # Read the next line to get the header for the remaining profile.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
+ # Skip "--- ..." line for profile types that have their own headers.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ $main::profile_type = '';
+
+ if ($header =~ m/^heap profile:.*$growth_marker/o) {
+ $main::profile_type = 'growth';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap profile:/) {
+ $main::profile_type = 'heap';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap/) {
+ $main::profile_type = 'heap';
+ $result = ReadThreadedHeapProfile($prog, $fname, $header);
+ } elsif ($header =~ m/^--- *$contention_marker/o) {
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *Stacks:/) {
+ print STDERR
+ "Old format contention profile: mistakenly reports " .
+ "condition variable signals as lock contentions.\n";
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *$profile_marker/) {
+ # the binary cpu profile data starts immediately after this line
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ } else {
+ if (defined($symbols)) {
+ # a symbolized profile contains a format we don't recognize, bail out
+ error("$fname: Cannot recognize profile section after symbols.\n");
+ }
+ # no ascii header present -- must be a CPU profile
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ }
+
+ close(PROFILE);
+
+ # if we got symbols along with the profile, return those as well
+ if (defined($symbols)) {
+ $result->{symbols} = $symbols;
+ }
+
+ return $result;
+}
+
+# Subtract one from caller pc so we map back to call instr.
+# However, don't do this if we're reading a symbolized profile
+# file, in which case the subtract-one was done when the file
+# was written.
+#
+# We apply the same logic to all readers, though ReadCPUProfile uses an
+# independent implementation.
+sub FixCallerAddresses {
+ my $stack = shift;
+ # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
+ # dumps unadjusted profiles.
+ {
+ $stack =~ /(\s)/;
+ my $delimiter = $1;
+ my @addrs = split(' ', $stack);
+ my @fixedaddrs;
+ $#fixedaddrs = $#addrs;
+ if ($#addrs >= 0) {
+ $fixedaddrs[0] = $addrs[0];
+ }
+ for (my $i = 1; $i <= $#addrs; $i++) {
+ $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
+ }
+ return join $delimiter, @fixedaddrs;
+ }
+}
+
+# CPU profile reader
+sub ReadCPUProfile {
+ my $prog = shift;
+ my $fname = shift; # just used for logging
+ local *PROFILE = shift;
+ my $version;
+ my $period;
+ my $i;
+ my $profile = {};
+ my $pcs = {};
+
+ # Parse string into array of slots.
+ my $slots = CpuProfileStream->new(*PROFILE, $fname);
+
+ # Read header. The current header version is a 5-element structure
+ # containing:
+ # 0: header count (always 0)
+ # 1: header "words" (after this one: 3)
+ # 2: format version (0)
+ # 3: sampling period (usec)
+ # 4: unused padding (always 0)
+ if ($slots->get(0) != 0 ) {
+ error("$fname: not a profile file, or old format profile file\n");
+ }
+ $i = 2 + $slots->get(1);
+ $version = $slots->get(2);
+ $period = $slots->get(3);
+ # Do some sanity checking on these header values.
+ if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
+ error("$fname: not a profile file, or corrupted profile file\n");
+ }
+
+ # Parse profile
+ while ($slots->get($i) != -1) {
+ my $n = $slots->get($i++);
+ my $d = $slots->get($i++);
+ if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth?
+ my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
+ print STDERR "At index $i (address $addr):\n";
+ error("$fname: stack trace depth >= 2**32\n");
+ }
+ if ($slots->get($i) == 0) {
+ # End of profile data marker
+ $i += $d;
+ last;
+ }
+
+ # Make key out of the stack entries
+ my @k = ();
+ for (my $j = 0; $j < $d; $j++) {
+ my $pc = $slots->get($i+$j);
+ # Subtract one from caller pc so we map back to call instr.
+ $pc--;
+ $pc = sprintf("%0*x", $address_length, $pc);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+
+ AddEntry($profile, (join "\n", @k), $n);
+ $i += $d;
+ }
+
+ # Parse map
+ my $map = '';
+ seek(PROFILE, $i * 4, 0);
+ read(PROFILE, $map, (stat PROFILE)[7]);
+
+ my $r = {};
+ $r->{version} = $version;
+ $r->{period} = $period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+
+ return $r;
+}
+
+sub HeapProfileIndex {
+ my $index = 1;
+ if ($main::opt_inuse_space) {
+ $index = 1;
+ } elsif ($main::opt_inuse_objects) {
+ $index = 0;
+ } elsif ($main::opt_alloc_space) {
+ $index = 3;
+ } elsif ($main::opt_alloc_objects) {
+ $index = 2;
+ }
+ return $index;
+}
+
+sub ReadMappedLibraries {
+ my $fh = shift;
+ my $map = "";
+ # Read the /proc/self/maps data
+ while (<$fh>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub ReadMemoryMap {
+ my $fh = shift;
+ my $map = "";
+ # Read /proc/self/maps data as formatted by DumpAddressMap()
+ my $buildvar = "";
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Parse "build=<dir>" specification if supplied
+ if (m/^\s*build=(.*)\n/) {
+ $buildvar = $1;
+ }
+
+ # Expand "$build" variable if available
+ $_ =~ s/\$build\b/$buildvar/g;
+
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub AdjustSamples {
+ my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
+ if ($sample_adjustment) {
+ if ($sampling_algorithm == 2) {
+ # Remote-heap version 2
+ # The sampling frequency is the rate of a Poisson process.
+ # This means that the probability of sampling an allocation of
+ # size X with sampling rate Y is 1 - exp(-X/Y)
+ if ($n1 != 0) {
+ my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n1 *= $scale_factor;
+ $s1 *= $scale_factor;
+ }
+ if ($n2 != 0) {
+ my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n2 *= $scale_factor;
+ $s2 *= $scale_factor;
+ }
+ } else {
+ # Remote-heap version 1
+ my $ratio;
+ $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n1 /= $ratio;
+ $s1 /= $ratio;
+ }
+ $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n2 /= $ratio;
+ $s2 /= $ratio;
+ }
+ }
+ }
+ return ($n1, $s1, $n2, $s2);
+}
+
+sub ReadHeapProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $index = HeapProfileIndex();
+
+ # Find the type of this profile. The header line looks like:
+ # heap profile: 1246: 8800744 [ 1246: 8800744] @ <heap-url>/266053
+ # There are two pairs <count: size>, the first inuse objects/space, and the
+ # second allocated objects/space. This is followed optionally by a profile
+ # type, and if that is present, optionally by a sampling frequency.
+ # For remote heap profiles (v1):
+ # The interpretation of the sampling frequency is that the profiler, for
+ # each sample, calculates a uniformly distributed random integer less than
+ # the given value, and records the next sample after that many bytes have
+ # been allocated. Therefore, the expected sample interval is half of the
+ # given frequency. By default, if not specified, the expected sample
+ # interval is 128KB. Only remote-heap-page profiles are adjusted for
+ # sample size.
+ # For remote heap profiles (v2):
+ # The sampling frequency is the rate of a Poisson process. This means that
+ # the probability of sampling an allocation of size X with sampling rate Y
+ # is 1 - exp(-X/Y)
+ # For version 2, a typical header line might look like this:
+ # heap profile: 1922: 127792360 [ 1922: 127792360] @ <heap-url>_v2/524288
+ # the trailing number (524288) is the sampling rate. (Version 1 showed
+ # double the 'rate' here)
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
+ if (defined($6) && ($6 ne '')) {
+ $type = $6;
+ my $sample_period = $8;
+ # $type is "heapprofile" for profiles generated by the
+ # heap-profiler, and either "heap" or "heap_v2" for profiles
+ # generated by sampling directly within tcmalloc. It can also
+ # be "growth" for heap-growth profiles. The first is typically
+ # found for profiles generated locally, and the others for
+ # remote profiles.
+ if (($type eq "heapprofile") || ($type !~ /heap/) ) {
+ # No need to adjust for the sampling rate with heap-profiler-derived data
+ $sampling_algorithm = 0;
+ } elsif ($type =~ /_v2/) {
+ $sampling_algorithm = 2; # version 2 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period);
+ }
+ } else {
+ $sampling_algorithm = 1; # version 1 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period)/2;
+ }
+ }
+ } else {
+ # We detect whether or not this is a remote-heap profile by checking
+ # that the total-allocated stats ($n2,$s2) are exactly the
+ # same as the in-use stats ($n1,$s1). It is remotely conceivable
+ # that a non-remote-heap profile may pass this check, but it is hard
+ # to imagine how that could happen.
+ # In this case it's so old it's guaranteed to be remote-heap version 1.
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ if (($n1 == $n2) && ($s1 == $s2)) {
+ # This is likely to be a remote-heap based sample profile
+ $sampling_algorithm = 1;
+ }
+ }
+ }
+
+ if ($sampling_algorithm > 0) {
+ # For remote-heap generated profiles, adjust the counts and sizes to
+ # account for the sample rate (we sample once every 128KB by default).
+ if ($sample_adjustment == 0) {
+ # Turn on profile adjustment.
+ $sample_adjustment = 128*1024;
+ print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
+ } else {
+ printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
+ $sample_adjustment);
+ }
+ if ($sampling_algorithm > 1) {
+ # We don't bother printing anything for the original version (version 1)
+ printf STDERR "Heap version $sampling_algorithm\n";
+ }
+ }
+
+ my $profile = {};
+ my $pcs = {};
+ my $map = "";
+
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
+ my $stack = $5;
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadThreadedHeapProfile {
+ my ($prog, $fname, $header) = @_;
+
+ my $index = HeapProfileIndex();
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ # Assuming a very specific type of header for now.
+ if ($header =~ m"^heap_v2/(\d+)") {
+ $type = "_v2";
+ $sampling_algorithm = 2;
+ $sample_adjustment = int($1);
+ }
+ if ($type ne "_v2" || !defined($sample_adjustment)) {
+ die "Threaded heap profiles require v2 sampling with a sample rate\n";
+ }
+
+ my $profile = {};
+ my $thread_profiles = {};
+ my $pcs = {};
+ my $map = "";
+ my $stack = "";
+
+ while (<PROFILE>) {
+ s/\r//g;
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # @ a1 a2 ... an
+ # t*: <count1>: <bytes1> [<count2>: <bytes2>]
+ # t1: <count1>: <bytes1> [<count2>: <bytes2>]
+ # ...
+ # tn: <count1>: <bytes1> [<count2>: <bytes2>]
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^@\s+(.*)$/) {
+ $stack = $1;
+ } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
+ if ($stack eq "") {
+ # Still in the header, so this is just a per-thread summary.
+ next;
+ }
+ my $thread = $2;
+ my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ if ($thread eq "*") {
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ } else {
+ if (!exists($thread_profiles->{$thread})) {
+ $thread_profiles->{$thread} = {};
+ }
+ AddEntries($thread_profiles->{$thread}, $pcs,
+ FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{threads} = $thread_profiles;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadSynchProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $map = '';
+ my $profile = {};
+ my $pcs = {};
+ my $sampling_period = 1;
+ my $cyclespernanosec = 2.8; # Default assumption for old binaries
+ my $seen_clockrate = 0;
+ my $line;
+
+ my $index = 0;
+ if ($main::opt_total_delay) {
+ $index = 0;
+ } elsif ($main::opt_contentions) {
+ $index = 1;
+ } elsif ($main::opt_mean_delay) {
+ $index = 2;
+ }
+
+ while ( $line = <PROFILE> ) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $count, $stack) = ($1, $2, $3);
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+ $count *= $sampling_period;
+
+ my @values = ($cycles, $count, $cycles / $count);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
+
+ } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ ||
+ $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $stack) = ($1, $2);
+ if ($cycles !~ /^\d+$/) {
+ next;
+ }
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
+
+ } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1,$2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "cycles/second") {
+ $cyclespernanosec = $value / 1e9;
+ $seen_clockrate = 1;
+ } elsif ($variable eq "sampling period") {
+ $sampling_period = $value;
+ } elsif ($variable eq "ms since reset") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } elsif ($variable eq "discarded samples") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } else {
+ printf STDERR ("Ignoring unnknown variable in /contention output: " .
+ "'%s' = '%s'\n",$variable,$value);
+ }
+ } else {
+ # Memory map entry
+ $map .= $line;
+ }
+ }
+
+ if (!$seen_clockrate) {
+ printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
+ $cyclespernanosec);
+ }
+
+ my $r = {};
+ $r->{version} = 0;
+ $r->{period} = $sampling_period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+# Given a hex value in the form "0x1abcd" or "1abcd", return either
+# "0001abcd" or "000000000001abcd", depending on the current (global)
+# address length.
+sub HexExtend {
+ my $addr = shift;
+
+ $addr =~ s/^(0x)?0*//;
+ my $zeros_needed = $address_length - length($addr);
+ if ($zeros_needed < 0) {
+ printf STDERR "Warning: address $addr is longer than address length $address_length\n";
+ return $addr;
+ }
+ return ("0" x $zeros_needed) . $addr;
+}
+
+##### Symbol extraction #####
+
+# Aggressively search the lib_prefix values for the given library
+# If all else fails, just return the name of the library unmodified.
+# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
+# it will search the following locations in this order, until it finds a file:
+# /my/path/lib/dir/mylib.so
+# /other/path/lib/dir/mylib.so
+# /my/path/dir/mylib.so
+# /other/path/dir/mylib.so
+# /my/path/mylib.so
+# /other/path/mylib.so
+# /lib/dir/mylib.so (returned as last resort)
+sub FindLibrary {
+ my $file = shift;
+ my $suffix = $file;
+
+ # Search for the library as described above
+ do {
+ foreach my $prefix (@prefix_list) {
+ my $fullpath = $prefix . $suffix;
+ if (-e $fullpath) {
+ return $fullpath;
+ }
+ }
+ } while ($suffix =~ s|^/[^/]+/|/|);
+ return $file;
+}
+
+# Return path to library with debugging symbols.
+# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+sub DebuggingLibrary {
+ my $file = shift;
+ if ($file =~ m|^/|) {
+ if (-f "/usr/lib/debug$file") {
+ return "/usr/lib/debug$file";
+ } elsif (-f "/usr/lib/debug$file.debug") {
+ return "/usr/lib/debug$file.debug";
+ }
+ }
+ return undef;
+}
+
+# Parse text section header of a library using objdump
+sub ParseTextSectionHeaderFromObjdump {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma;
+ my $file_offset;
+ # Get objdump output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Idx Name Size VMA LMA File off Algn
+ # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4
+ # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
+ # offset may still be 8. But AddressSub below will still handle that.
+ my @x = split;
+ if (($#x >= 6) && ($x[1] eq '.text')) {
+ $size = $x[2];
+ $vma = $x[3];
+ $file_offset = $x[5];
+ last;
+ }
+ }
+ close(OBJDUMP);
+
+ if (!defined($size)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+# Parse text section header of a library using otool (on OS X)
+sub ParseTextSectionHeaderFromOtool {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma = undef;
+ my $file_offset = undef;
+ # Get otool output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
+ open(OTOOL, "$command |") || error("$command: $!\n");
+ my $cmd = "";
+ my $sectname = "";
+ my $segname = "";
+ foreach my $line (<OTOOL>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Load command <#>
+ # cmd LC_SEGMENT
+ # [...]
+ # Section
+ # sectname __text
+ # segname __TEXT
+ # addr 0x000009f8
+ # size 0x00018b9e
+ # offset 2552
+ # align 2^2 (4)
+ # We will need to strip off the leading 0x from the hex addresses,
+ # and convert the offset into hex.
+ if ($line =~ /Load command/) {
+ $cmd = "";
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /Section/) {
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /cmd (\w+)/) {
+ $cmd = $1;
+ } elsif ($line =~ /sectname (\w+)/) {
+ $sectname = $1;
+ } elsif ($line =~ /segname (\w+)/) {
+ $segname = $1;
+ } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
+ $sectname eq "__text" &&
+ $segname eq "__TEXT")) {
+ next;
+ } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
+ $vma = $1;
+ } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
+ $size = $1;
+ } elsif ($line =~ /\boffset ([0-9]+)/) {
+ $file_offset = sprintf("%016x", $1);
+ }
+ if (defined($vma) && defined($size) && defined($file_offset)) {
+ last;
+ }
+ }
+ close(OTOOL);
+
+ if (!defined($vma) || !defined($size) || !defined($file_offset)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+sub ParseTextSectionHeader {
+ # obj_tool_map("otool") is only defined if we're in a Mach-O environment
+ if (defined($obj_tool_map{"otool"})) {
+ my $r = ParseTextSectionHeaderFromOtool(@_);
+ if (defined($r)){
+ return $r;
+ }
+ }
+ # If otool doesn't work, or we don't have it, fall back to objdump
+ return ParseTextSectionHeaderFromObjdump(@_);
+}
+
+# Split /proc/pid/maps dump into a list of libraries
+sub ParseLibraries {
+ return if $main::use_symbol_page; # We don't need libraries info.
+ my $prog = shift;
+ my $map = shift;
+ my $pcs = shift;
+
+ my $result = [];
+ my $h = "[a-f0-9]+";
+ my $zero_offset = HexExtend("0");
+
+ my $buildvar = "";
+ foreach my $l (split("\n", $map)) {
+ if ($l =~ m/^\s*build=(.*)$/) {
+ $buildvar = $1;
+ }
+
+ my $start;
+ my $finish;
+ my $offset;
+ my $lib;
+ if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
+ # Full line from /proc/self/maps. Example:
+ # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = HexExtend($3);
+ $lib = $4;
+ $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+ } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
+ # Cooked line from DumpAddressMap. Example:
+ # 40000000-40015000: /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = $3;
+ }
+ # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+ # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+ #
+ # Example:
+ # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+ # o.1 NCH -1
+ elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = FindLibrary($5);
+
+ } else {
+ next;
+ }
+
+ # Expand "$build" variable if available
+ $lib =~ s/\$build\b/$buildvar/g;
+
+ $lib = FindLibrary($lib);
+
+ # Check for pre-relocated libraries, which use pre-relocated symbol tables
+ # and thus require adjusting the offset that we'll use to translate
+ # VM addresses into symbol table addresses.
+ # Only do this if we're not going to fetch the symbol table from a
+ # debugging copy of the library.
+ if (!DebuggingLibrary($lib)) {
+ my $text = ParseTextSectionHeader($lib);
+ if (defined($text)) {
+ my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
+ $offset = AddressAdd($offset, $vma_offset);
+ }
+ }
+
+ if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
+ push(@{$result}, [$lib, $start, $finish, $offset]);
+ }
+
+ # Append special entry for additional library (not relocated)
+ if ($main::opt_lib ne "") {
+ my $text = ParseTextSectionHeader($main::opt_lib);
+ if (defined($text)) {
+ my $start = $text->{vma};
+ my $finish = AddressAdd($start, $text->{size});
+
+ push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
+ }
+ }
+
+ # Append special entry for the main program. This covers
+ # 0..max_pc_value_seen, so that we assume pc values not found in one
+ # of the library ranges will be treated as coming from the main
+ # program binary.
+ my $min_pc = HexExtend("0");
+ my $max_pc = $min_pc; # find the maximal PC value in any sample
+ foreach my $pc (keys(%{$pcs})) {
+ if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
+ }
+ push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
+
+ return $result;
+}
+
+# Add two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressAdd {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+
+ if ($main::opt_debug and $main::opt_test) {
+ print STDERR "AddressAdd $addr1 + $addr2 = ";
+ }
+
+ my $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2);
+ my $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ my $r = sprintf("%07x", $sum);
+
+ $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2) + $c;
+ $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ $r = sprintf("%07x", $sum) . $r;
+
+ $sum = hex($addr1) + hex($addr2) + $c;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+
+# Subtract two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressSub {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $diff;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $diff);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize borrow handling.
+ # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
+
+ my $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = hex(substr($addr2,-7));
+ $addr2 = substr($addr2,0,-7);
+ my $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ my $r = sprintf("%07x", $diff);
+
+ $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ $a2 = hex(substr($addr2,-7)) + $b;
+ $addr2 = substr($addr2,0,-7);
+ $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ $r = sprintf("%07x", $diff) . $r;
+
+ $a1 = hex($addr1);
+ $a2 = hex($addr2) + $b;
+ if ($a2 > $a1) { $a1 += 0x100; }
+ $diff = $a1 - $a2;
+ $r = sprintf("%02x", $diff) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+# Increment a hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressInc {
+ my $addr = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr)+1) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+ # We are always doing this to step through the addresses in a function,
+ # and will almost never overflow the first chunk, so we check for this
+ # case and exit early.
+
+ # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
+
+ my $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ my $r = sprintf("%07x", $sum);
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "0000000";
+ }
+
+ $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ $r = sprintf("%07x", $sum) . $r;
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "00000000000000";
+ }
+
+ $sum = hex($addr) + 1;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return $r;
+ }
+}
+
+# Extract symbols for all PC values found in profile
+sub ExtractSymbols {
+ my $libs = shift;
+ my $pcset = shift;
+
+ my $symbols = {};
+
+ # Map each PC value to the containing library. To make this faster,
+ # we sort libraries by their starting pc value (highest first), and
+ # advance through the libraries as we advance the pc. Sometimes the
+ # addresses of libraries may overlap with the addresses of the main
+ # binary, so to make sure the libraries 'win', we iterate over the
+ # libraries in reverse order (which assumes the binary doesn't start
+ # in the middle of a library, which seems a fair assumption).
+ my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings
+ foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
+ my $libname = $lib->[0];
+ my $start = $lib->[1];
+ my $finish = $lib->[2];
+ my $offset = $lib->[3];
+
+ # Use debug library if it exists
+ my $debug_libname = DebuggingLibrary($libname);
+ if ($debug_libname) {
+ $libname = $debug_libname;
+ }
+
+ # Get list of pcs that belong in this library.
+ my $contained = [];
+ my ($start_pc_index, $finish_pc_index);
+ # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
+ for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
+ $finish_pc_index--) {
+ last if $pcs[$finish_pc_index - 1] le $finish;
+ }
+ # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
+ for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
+ $start_pc_index--) {
+ last if $pcs[$start_pc_index - 1] lt $start;
+ }
+ # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
+ # in case there are overlaps in libraries and the main binary.
+ @{$contained} = splice(@pcs, $start_pc_index,
+ $finish_pc_index - $start_pc_index);
+ # Map to symbols
+ MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
+ }
+
+ return $symbols;
+}
+
+# Map list of PC values to symbols for a given image
+sub MapToSymbols {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ my $debug = 0;
+
+ # Ignore empty binaries
+ if ($#{$pclist} < 0) { return; }
+
+ # Figure out the addr2line command to use
+ my $addr2line = $obj_tool_map{"addr2line"};
+ my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
+ if (exists $obj_tool_map{"addr2line_pdb"}) {
+ $addr2line = $obj_tool_map{"addr2line_pdb"};
+ $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
+ }
+
+ # If "addr2line" isn't installed on the system at all, just use
+ # nm to get what info we can (function names, but not line numbers).
+ if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
+ MapSymbolsWithNM($image, $offset, $pclist, $symbols);
+ return;
+ }
+
+ # "addr2line -i" can produce a variable number of lines per input
+ # address, with no separator that allows us to tell when data for
+ # the next address starts. So we find the address for a special
+ # symbol (_fini) and interleave this address between all real
+ # addresses passed to addr2line. The name of this special symbol
+ # can then be used as a separator.
+ $sep_address = undef; # May be filled in by MapSymbolsWithNM()
+ my $nm_symbols = {};
+ MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
+ if (defined($sep_address)) {
+ # Only add " -i" to addr2line if the binary supports it.
+ # addr2line --help returns 0, but not if it sees an unknown flag first.
+ if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
+ $cmd .= " -i";
+ } else {
+ $sep_address = undef; # no need for sep_address if we don't support -i
+ }
+ }
+
+ # Make file with all PC values with intervening 'sep_address' so
+ # that we can reliably detect the end of inlined function list
+ open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
+ if ($debug) { print("---- $image ---\n"); }
+ for (my $i = 0; $i <= $#{$pclist}; $i++) {
+ # addr2line always reads hex addresses, and does not need '0x' prefix.
+ if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
+ printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
+ if (defined($sep_address)) {
+ printf ADDRESSES ("%s\n", $sep_address);
+ }
+ }
+ close(ADDRESSES);
+ if ($debug) {
+ print("----\n");
+ system("cat", $main::tmpfile_sym);
+ print("----\n");
+ system("$cmd < " . ShellEscape($main::tmpfile_sym));
+ print("----\n");
+ }
+
+ open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
+ || error("$cmd: $!\n");
+ my $count = 0; # Index in pclist
+ while (<SYMBOLS>) {
+ # Read fullfunction and filelineinfo from next pair of lines
+ s/\r?\n$//g;
+ my $fullfunction = $_;
+ $_ = <SYMBOLS>;
+ s/\r?\n$//g;
+ my $filelinenum = $_;
+
+ if (defined($sep_address) && $fullfunction eq $sep_symbol) {
+ # Terminating marker for data for this address
+ $count++;
+ next;
+ }
+
+ $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+
+ my $pcstr = $pclist->[$count];
+ my $function = ShortFunctionName($fullfunction);
+ my $nms = $nm_symbols->{$pcstr};
+ if (defined($nms)) {
+ if ($fullfunction eq '??') {
+ # nm found a symbol for us.
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ } else {
+ # MapSymbolsWithNM tags each routine with its starting address,
+ # useful in case the image has multiple occurrences of this
+ # routine. (It uses a syntax that resembles template paramters,
+ # that are automatically stripped out by ShortFunctionName().)
+ # addr2line does not provide the same information. So we check
+ # if nm disambiguated our symbol, and if so take the annotated
+ # (nm) version of the routine-name. TODO(csilvers): this won't
+ # catch overloaded, inlined symbols, which nm doesn't see.
+ # Better would be to do a check similar to nm's, in this fn.
+ if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ }
+ }
+ }
+
+ # Prepend to accumulated symbols for pcstr
+ # (so that caller comes before callee)
+ my $sym = $symbols->{$pcstr};
+ if (!defined($sym)) {
+ $sym = [];
+ $symbols->{$pcstr} = $sym;
+ }
+ unshift(@{$sym}, $function, $filelinenum, $fullfunction);
+ if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
+ if (!defined($sep_address)) {
+ # Inlining is off, so this entry ends immediately
+ $count++;
+ }
+ }
+ close(SYMBOLS);
+}
+
+# Use nm to map the list of referenced PCs to symbols. Return true iff we
+# are able to read procedure information via nm.
+sub MapSymbolsWithNM {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ # Get nm output sorted by increasing address
+ my $symbol_table = GetProcedureBoundaries($image, ".");
+ if (!%{$symbol_table}) {
+ return 0;
+ }
+ # Start addresses are already the right length (8 or 16 hex digits).
+ my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
+ keys(%{$symbol_table});
+
+ if ($#names < 0) {
+ # No symbols: just use addresses
+ foreach my $pc (@{$pclist}) {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ return 0;
+ }
+
+ # Sort addresses so we can do a join against nm output
+ my $index = 0;
+ my $fullname = $names[0];
+ my $name = ShortFunctionName($fullname);
+ foreach my $pc (sort { $a cmp $b } @{$pclist}) {
+ # Adjust for mapped offset
+ my $mpc = AddressSub($pc, $offset);
+ while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
+ $index++;
+ $fullname = $names[$index];
+ $name = ShortFunctionName($fullname);
+ }
+ if ($mpc lt $symbol_table->{$fullname}->[1]) {
+ $symbols->{$pc} = [$name, "?", $fullname];
+ } else {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ }
+ return 1;
+}
+
+sub ShortFunctionName {
+ my $function = shift;
+ while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types
+ while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments
+ $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type
+ return $function;
+}
+
+# Trim overly long symbols found in disassembler output
+sub CleanDisassembly {
+ my $d = shift;
+ while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
+ while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments
+ return $d;
+}
+
+# Clean file name for display
+sub CleanFileName {
+ my ($f) = @_;
+ $f =~ s|^/proc/self/cwd/||;
+ $f =~ s|^\./||;
+ return $f;
+}
+
+# Make address relative to section and clean up for display
+sub UnparseAddress {
+ my ($offset, $address) = @_;
+ $address = AddressSub($address, $offset);
+ $address =~ s/^0x//;
+ $address =~ s/^0*//;
+ return $address;
+}
+
+##### Miscellaneous #####
+
+# Find the right versions of the above object tools to use. The
+# argument is the program file being analyzed, and should be an ELF
+# 32-bit or ELF 64-bit executable file. The location of the tools
+# is determined by considering the following options in this order:
+# 1) --tools option, if set
+# 2) JEPROF_TOOLS environment variable, if set
+# 3) the environment
+sub ConfigureObjTools {
+ my $prog_file = shift;
+
+ # Check for the existence of $prog_file because /usr/bin/file does not
+ # predictably return error status in prod.
+ (-e $prog_file) || error("$prog_file does not exist.\n");
+
+ my $file_type = undef;
+ if (-e "/usr/bin/file") {
+ # Follow symlinks (at least for systems where "file" supports that).
+ my $escaped_prog_file = ShellEscape($prog_file);
+ $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
+ /usr/bin/file $escaped_prog_file`;
+ } elsif ($^O == "MSWin32") {
+ $file_type = "MS Windows";
+ } else {
+ print STDERR "WARNING: Can't determine the file type of $prog_file";
+ }
+
+ if ($file_type =~ /64-bit/) {
+ # Change $address_length to 16 if the program file is ELF 64-bit.
+ # We can't detect this from many (most?) heap or lock contention
+ # profiles, since the actual addresses referenced are generally in low
+ # memory even for 64-bit programs.
+ $address_length = 16;
+ }
+
+ if ($file_type =~ /MS Windows/) {
+ # For windows, we provide a version of nm and addr2line as part of
+ # the opensource release, which is capable of parsing
+ # Windows-style PDB executables. It should live in the path, or
+ # in the same directory as jeprof.
+ $obj_tool_map{"nm_pdb"} = "nm-pdb";
+ $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
+ }
+
+ if ($file_type =~ /Mach-O/) {
+ # OS X uses otool to examine Mach-O files, rather than objdump.
+ $obj_tool_map{"otool"} = "otool";
+ $obj_tool_map{"addr2line"} = "false"; # no addr2line
+ $obj_tool_map{"objdump"} = "false"; # no objdump
+ }
+
+ # Go fill in %obj_tool_map with the pathnames to use:
+ foreach my $tool (keys %obj_tool_map) {
+ $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
+ }
+}
+
+# Returns the path of a caller-specified object tool. If --tools or
+# JEPROF_TOOLS are specified, then returns the full path to the tool
+# with that prefix. Otherwise, returns the path unmodified (which
+# means we will look for it on PATH).
+sub ConfigureTool {
+ my $tool = shift;
+ my $path;
+
+ # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
+ # item is either a) a pathname prefix, or b) a map of the form
+ # <tool>:<path>. First we look for an entry of type (b) for our
+ # tool. If one is found, we use it. Otherwise, we consider all the
+ # pathname prefixes in turn, until one yields an existing file. If
+ # none does, we use a default path.
+ my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
+ if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
+ $path = $2;
+ # TODO(csilvers): sanity-check that $path exists? Hard if it's relative.
+ } elsif ($tools ne '') {
+ foreach my $prefix (split(',', $tools)) {
+ next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list
+ if (-x $prefix . $tool) {
+ $path = $prefix . $tool;
+ last;
+ }
+ }
+ if (!$path) {
+ error("No '$tool' found with prefix specified by " .
+ "--tools (or \$JEPROF_TOOLS) '$tools'\n");
+ }
+ } else {
+ # ... otherwise use the version that exists in the same directory as
+ # jeprof. If there's nothing there, use $PATH.
+ $0 =~ m,[^/]*$,; # this is everything after the last slash
+ my $dirname = $`; # this is everything up to and including the last slash
+ if (-x "$dirname$tool") {
+ $path = "$dirname$tool";
+ } else {
+ $path = $tool;
+ }
+ }
+ if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
+ return $path;
+}
+
+sub ShellEscape {
+ my @escaped_words = ();
+ foreach my $word (@_) {
+ my $escaped_word = $word;
+ if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist
+ $escaped_word =~ s/'/'\\''/;
+ $escaped_word = "'$escaped_word'";
+ }
+ push(@escaped_words, $escaped_word);
+ }
+ return join(" ", @escaped_words);
+}
+
+sub cleanup {
+ unlink($main::tmpfile_sym);
+ unlink(keys %main::tempnames);
+
+ # We leave any collected profiles in $HOME/jeprof in case the user wants
+ # to look at them later. We print a message informing them of this.
+ if ((scalar(@main::profile_files) > 0) &&
+ defined($main::collected_profile)) {
+ if (scalar(@main::profile_files) == 1) {
+ print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
+ }
+ print STDERR "If you want to investigate this profile further, you can do:\n";
+ print STDERR "\n";
+ print STDERR " jeprof \\\n";
+ print STDERR " $main::prog \\\n";
+ print STDERR " $main::collected_profile\n";
+ print STDERR "\n";
+ }
+}
+
+sub sighandler {
+ cleanup();
+ exit(1);
+}
+
+sub error {
+ my $msg = shift;
+ print STDERR $msg;
+ cleanup();
+ exit(1);
+}
+
+
+# Run $nm_command and get all the resulting procedure boundaries whose
+# names match "$regexp" and returns them in a hashtable mapping from
+# procedure name to a two-element vector of [start address, end address]
+sub GetProcedureBoundariesViaNm {
+ my $escaped_nm_command = shift; # shell-escaped
+ my $regexp = shift;
+
+ my $symbol_table = {};
+ open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
+ my $last_start = "0";
+ my $routine = "";
+ while (<NM>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (m/^\s*([0-9a-f]+) (.) (..*)/) {
+ my $start_val = $1;
+ my $type = $2;
+ my $this_routine = $3;
+
+ # It's possible for two symbols to share the same address, if
+ # one is a zero-length variable (like __start_google_malloc) or
+ # one symbol is a weak alias to another (like __libc_malloc).
+ # In such cases, we want to ignore all values except for the
+ # actual symbol, which in nm-speak has type "T". The logic
+ # below does this, though it's a bit tricky: what happens when
+ # we have a series of lines with the same address, is the first
+ # one gets queued up to be processed. However, it won't
+ # *actually* be processed until later, when we read a line with
+ # a different address. That means that as long as we're reading
+ # lines with the same address, we have a chance to replace that
+ # item in the queue, which we do whenever we see a 'T' entry --
+ # that is, a line with type 'T'. If we never see a 'T' entry,
+ # we'll just go ahead and process the first entry (which never
+ # got touched in the queue), and ignore the others.
+ if ($start_val eq $last_start && $type =~ /t/i) {
+ # We are the 'T' symbol at this address, replace previous symbol.
+ $routine = $this_routine;
+ next;
+ } elsif ($start_val eq $last_start) {
+ # We're not the 'T' symbol at this address, so ignore us.
+ next;
+ }
+
+ if ($this_routine eq $sep_symbol) {
+ $sep_address = HexExtend($start_val);
+ }
+
+ # Tag this routine with the starting address in case the image
+ # has multiple occurrences of this routine. We use a syntax
+ # that resembles template parameters that are automatically
+ # stripped out by ShortFunctionName()
+ $this_routine .= "<$start_val>";
+
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($start_val)];
+ }
+ $last_start = $start_val;
+ $routine = $this_routine;
+ } elsif (m/^Loaded image name: (.+)/) {
+ # The win32 nm workalike emits information about the binary it is using.
+ if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
+ } elsif (m/^PDB file name: (.+)/) {
+ # The win32 nm workalike emits information about the pdb it is using.
+ if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
+ }
+ }
+ close(NM);
+ # Handle the last line in the nm output. Unfortunately, we don't know
+ # how big this last symbol is, because we don't know how big the file
+ # is. For now, we just give it a size of 0.
+ # TODO(csilvers): do better here.
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($last_start)];
+ }
+ return $symbol_table;
+}
+
+# Gets the procedure boundaries for all routines in "$image" whose names
+# match "$regexp" and returns them in a hashtable mapping from procedure
+# name to a two-element vector of [start address, end address].
+# Will return an empty map if nm is not installed or not working properly.
+sub GetProcedureBoundaries {
+ my $image = shift;
+ my $regexp = shift;
+
+ # If $image doesn't start with /, then put ./ in front of it. This works
+ # around an obnoxious bug in our probing of nm -f behavior.
+ # "nm -f $image" is supposed to fail on GNU nm, but if:
+ #
+ # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
+ # b. you have a.out in your current directory (a not uncommon occurence)
+ #
+ # then "nm -f $image" succeeds because -f only looks at the first letter of
+ # the argument, which looks valid because it's [BbSsPp], and then since
+ # there's no image provided, it looks for a.out and finds it.
+ #
+ # This regex makes sure that $image starts with . or /, forcing the -f
+ # parsing to fail since . and / are not valid formats.
+ $image =~ s#^[^/]#./$&#;
+
+ # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+ my $debugging = DebuggingLibrary($image);
+ if ($debugging) {
+ $image = $debugging;
+ }
+
+ my $nm = $obj_tool_map{"nm"};
+ my $cppfilt = $obj_tool_map{"c++filt"};
+
+ # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
+ # binary doesn't support --demangle. In addition, for OS X we need
+ # to use the -f flag to get 'flat' nm output (otherwise we don't sort
+ # properly and get incorrect results). Unfortunately, GNU nm uses -f
+ # in an incompatible way. So first we test whether our nm supports
+ # --demangle and -f.
+ my $demangle_flag = "";
+ my $cppfilt_flag = "";
+ my $to_devnull = ">$dev_null 2>&1";
+ if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) {
+ # In this mode, we do "nm --demangle <foo>"
+ $demangle_flag = "--demangle";
+ $cppfilt_flag = "";
+ } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
+ # In this mode, we do "nm <foo> | c++filt"
+ $cppfilt_flag = " | " . ShellEscape($cppfilt);
+ };
+ my $flatten_flag = "";
+ if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
+ $flatten_flag = "-f";
+ }
+
+ # Finally, in the case $imagie isn't a debug library, we try again with
+ # -D to at least get *exported* symbols. If we can't use --demangle,
+ # we use c++filt instead, if it exists on this system.
+ my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ # 6nm is for Go binaries
+ ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
+ );
+
+ # If the executable is an MS Windows PDB-format executable, we'll
+ # have set up obj_tool_map("nm_pdb"). In this case, we actually
+ # want to use both unix nm and windows-specific nm_pdb, since
+ # PDB-format executables can apparently include dwarf .o files.
+ if (exists $obj_tool_map{"nm_pdb"}) {
+ push(@nm_commands,
+ ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
+ . " 2>$dev_null");
+ }
+
+ foreach my $nm_command (@nm_commands) {
+ my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
+ return $symbol_table if (%{$symbol_table});
+ }
+ my $symbol_table = {};
+ return $symbol_table;
+}
+
+
+# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
+# To make them more readable, we add underscores at interesting places.
+# This routine removes the underscores, producing the canonical representation
+# used by jeprof to represent addresses, particularly in the tested routines.
+sub CanonicalHex {
+ my $arg = shift;
+ return join '', (split '_',$arg);
+}
+
+
+# Unit test for AddressAdd:
+sub AddressAddUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd ($row->[0], $row->[1]);
+ if ($sum ne $row->[2]) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ my $expected = join '', (split '_',$row->[2]);
+ if ($sum ne CanonicalHex($row->[2])) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressSub:
+sub AddressSubUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub ($row->[0], $row->[1]);
+ if ($sum ne $row->[3]) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ if ($sum ne CanonicalHex($row->[3])) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressInc:
+sub AddressIncUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc ($row->[0]);
+ if ($sum ne $row->[4]) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc (CanonicalHex($row->[0]));
+ if ($sum ne CanonicalHex($row->[4])) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Driver for unit tests.
+# Currently just the address add/subtract/increment routines for 64-bit.
+sub RunUnitTests {
+ my $error_count = 0;
+
+ # This is a list of tuples [a, b, a+b, a-b, a+1]
+ my $unit_test_data_8 = [
+ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
+ [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
+ [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
+ [qw(00000001 ffffffff 00000000 00000002 00000002)],
+ [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
+ ];
+ my $unit_test_data_16 = [
+ # The implementation handles data in 7-nibble chunks, so those are the
+ # interesting boundaries.
+ [qw(aaaaaaaa 50505050
+ 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
+ [qw(50505050 aaaaaaaa
+ 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
+ [qw(ffffffff aaaaaaaa
+ 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
+ [qw(00000001 ffffffff
+ 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
+ [qw(00000001 fffffff0
+ 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
+
+ [qw(00_a00000a_aaaaaaa 50505050
+ 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
+ [qw(0f_fff0005_0505050 aaaaaaaa
+ 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
+ [qw(00_000000f_fffffff 01_800000a_aaaaaaa
+ 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
+ [qw(00_0000000_0000001 ff_fffffff_fffffff
+ 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
+ [qw(00_0000000_0000001 ff_fffffff_ffffff0
+ ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
+ ];
+
+ $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
+ if ($error_count > 0) {
+ print STDERR $error_count, " errors: FAILED\n";
+ } else {
+ print STDERR "PASS\n";
+ }
+ exit ($error_count);
+}
diff --git a/memory/jemalloc/src/build-aux/config.guess b/memory/jemalloc/src/build-aux/config.guess
new file mode 100755
index 000000000..1f5c50c0d
--- /dev/null
+++ b/memory/jemalloc/src/build-aux/config.guess
@@ -0,0 +1,1420 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-03-23'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ *:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+ ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-${LIBC}
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-${LIBC}
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-${LIBC}
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
+ fi
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+esac
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/memory/jemalloc/src/build-aux/config.sub b/memory/jemalloc/src/build-aux/config.sub
new file mode 100755
index 000000000..0ccff7706
--- /dev/null
+++ b/memory/jemalloc/src/build-aux/config.sub
@@ -0,0 +1,1797 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright 1992-2014 Free Software Foundation, Inc.
+
+timestamp='2014-05-01'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2014 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze*)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | be32 | be64 \
+ | bfin \
+ | c4x | c8051 | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | epiphany \
+ | fido | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 | nios2eb | nios2el \
+ | ns16k | ns32k \
+ | open8 | or1k | or1knd | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pyramid \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | we32k \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | aarch64-* | aarch64_be-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | k1om-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa32r6-* | mipsisa32r6el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64r6-* | mipsisa64r6el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | or1k*-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pyramid-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze*)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=-msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* | -plan9* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ # Apple iOS
+ -ios*)
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/memory/jemalloc/src/build-aux/install-sh b/memory/jemalloc/src/build-aux/install-sh
new file mode 100755
index 000000000..ebc66913e
--- /dev/null
+++ b/memory/jemalloc/src/build-aux/install-sh
@@ -0,0 +1,250 @@
+#! /bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ else
+ instcmd=mkdir
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+'
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/memory/jemalloc/src/config.stamp.in b/memory/jemalloc/src/config.stamp.in
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/memory/jemalloc/src/config.stamp.in
diff --git a/memory/jemalloc/src/configure b/memory/jemalloc/src/configure
new file mode 100755
index 000000000..2ad9b5a87
--- /dev/null
+++ b/memory/jemalloc/src/configure
@@ -0,0 +1,10773 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.69.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+ if test "x$CONFIG_SHELL" != x; then :
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME=
+PACKAGE_TARNAME=
+PACKAGE_VERSION=
+PACKAGE_STRING=
+PACKAGE_BUGREPORT=
+PACKAGE_URL=
+
+ac_unique_file="Makefile.in"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='LTLIBOBJS
+LIBOBJS
+cfgoutputs_out
+cfgoutputs_in
+cfghdrs_out
+cfghdrs_in
+enable_zone_allocator
+enable_tls
+enable_lazy_lock
+jemalloc_version_gid
+jemalloc_version_nrev
+jemalloc_version_bugfix
+jemalloc_version_minor
+jemalloc_version_major
+jemalloc_version
+enable_cache_oblivious
+enable_xmalloc
+enable_valgrind
+enable_utrace
+enable_fill
+enable_munmap
+enable_tcache
+enable_prof
+enable_stats
+enable_debug
+je_
+install_suffix
+private_namespace
+JEMALLOC_CPREFIX
+enable_code_coverage
+AUTOCONF
+LD
+RANLIB
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+enable_autogen
+RPATH_EXTRA
+LM
+CC_MM
+AROUT
+ARFLAGS
+MKLIB
+TEST_LD_MODE
+LDTARGET
+CTARGET
+PIC_CFLAGS
+SOREV
+EXTRA_LDFLAGS
+DSO_LDFLAGS
+link_whole_archive
+libprefix
+exe
+a
+o
+importlib
+so
+LD_PRELOAD_VAR
+RPATH
+abi
+AR
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+EGREP
+GREP
+CPP
+EXTRA_CFLAGS
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+XSLROOT
+XSLTPROC
+MANDIR
+DATADIR
+LIBDIR
+INCLUDEDIR
+BINDIR
+PREFIX
+abs_objroot
+objroot
+abs_srcroot
+srcroot
+rev
+CONFIG
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+runstatedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+with_xslroot
+with_rpath
+enable_autogen
+enable_code_coverage
+with_mangling
+with_jemalloc_prefix
+with_export
+with_private_namespace
+with_install_suffix
+with_malloc_conf
+enable_cc_silence
+enable_debug
+enable_ivsalloc
+enable_stats
+enable_prof
+enable_prof_libunwind
+with_static_libunwind
+enable_prof_libgcc
+enable_prof_gcc
+enable_tcache
+enable_munmap
+enable_fill
+enable_utrace
+enable_valgrind
+enable_xmalloc
+enable_cache_oblivious
+with_lg_tiny_min
+with_lg_quantum
+with_lg_page
+with_lg_page_sizes
+with_lg_size_class_group
+with_version
+enable_lazy_lock
+enable_tls
+enable_zone_allocator
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -runstatedir | --runstatedir | --runstatedi | --runstated \
+ | --runstate | --runstat | --runsta | --runst | --runs \
+ | --run | --ru | --r)
+ ac_prev=runstatedir ;;
+ -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+ | --run=* | --ru=* | --r=*)
+ runstatedir=$ac_optarg ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir runstatedir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures this package to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-autogen Automatically regenerate configure output
+ --enable-code-coverage Enable code coverage
+ --disable-cc-silence Do not silence irrelevant compiler warnings
+ --enable-debug Build debugging code (implies --enable-ivsalloc)
+ --enable-ivsalloc Validate pointers passed through the public API
+ --disable-stats Disable statistics calculation/reporting
+ --enable-prof Enable allocation profiling
+ --enable-prof-libunwind Use libunwind for backtracing
+ --disable-prof-libgcc Do not use libgcc for backtracing
+ --disable-prof-gcc Do not use gcc intrinsics for backtracing
+ --disable-tcache Disable per thread caches
+ --disable-munmap Disable VM deallocation via munmap(2)
+ --disable-fill Disable support for junk/zero filling, quarantine,
+ and redzones
+ --enable-utrace Enable utrace(2)-based tracing
+ --disable-valgrind Disable support for Valgrind
+ --enable-xmalloc Support xmalloc option
+ --disable-cache-oblivious
+ Disable support for cache-oblivious allocation
+ alignment
+ --enable-lazy-lock Enable lazy locking (only lock when multi-threaded)
+ --disable-tls Disable thread-local storage (__thread keyword)
+ --disable-zone-allocator
+ Disable zone allocator for Darwin
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-xslroot=<path> XSL stylesheet root path
+ --with-rpath=<rpath> Colon-separated rpath (ELF systems only)
+ --with-mangling=<map> Mangle symbols in <map>
+ --with-jemalloc-prefix=<prefix>
+ Prefix to prepend to all public APIs
+ --without-export disable exporting jemalloc public APIs
+ --with-private-namespace=<prefix>
+ Prefix to prepend to all library-private APIs
+ --with-install-suffix=<suffix>
+ Suffix to append to all installed files
+ --with-malloc-conf=<malloc_conf>
+ config.malloc_conf options string
+ --with-static-libunwind=<libunwind.a>
+ Path to static libunwind library; use rather than
+ dynamically linking
+ --with-lg-tiny-min=<lg-tiny-min>
+ Base 2 log of minimum tiny size class to support
+ --with-lg-quantum=<lg-quantum>
+ Base 2 log of minimum allocation alignment
+ --with-lg-page=<lg-page>
+ Base 2 log of system page size
+ --with-lg-page-sizes=<lg-page-sizes>
+ Base 2 logs of system page sizes to support
+ --with-lg-size-class-group=<lg-size-class-group>
+ Base 2 log of size classes per doubling
+ --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
+ Version string
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+configure
+generated by GNU Autoconf 2.69
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_c_try_cpp LINENO
+# ----------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_cpp
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_c_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
+# --------------------------------------------
+# Tries to find the compile-time value of EXPR in a program that includes
+# INCLUDES, setting VAR accordingly. Returns whether the value could be
+# computed
+ac_fn_c_compute_int ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= 0)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=$ac_mid; break
+else
+ as_fn_arith $ac_mid + 1 && ac_lo=$as_val
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) < 0)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_lo=$ac_mid; break
+else
+ as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ ac_lo= ac_hi=
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=$ac_mid
+else
+ as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in #((
+?*) eval "$3=\$ac_lo"; ac_retval=0 ;;
+'') ac_retval=1 ;;
+esac
+ else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+static long int longval () { return $2; }
+static unsigned long int ulongval () { return $2; }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (($2) < 0)
+ {
+ long int i = longval ();
+ if (i != ($2))
+ return 1;
+ fprintf (f, "%ld", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ($2))
+ return 1;
+ fprintf (f, "%lu", i);
+ }
+ /* Do not output a trailing newline, as this causes \r\n confusion
+ on some platforms. */
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ echo >>conftest.val; read $3 <conftest.val; ac_retval=0
+else
+ ac_retval=1
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+rm -f conftest.val
+
+ fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_compute_int
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ test -x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_c_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval \${$3+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_mongrel
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_type
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by $as_me, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+ac_aux_dir=
+for ac_dir in build-aux "$srcdir"/build-aux; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+
+
+
+
+
+
+
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+
+
+rev=2
+
+
+srcroot=$srcdir
+if test "x${srcroot}" = "x." ; then
+ srcroot=""
+else
+ srcroot="${srcroot}/"
+fi
+
+abs_srcroot="`cd \"${srcdir}\"; pwd`/"
+
+
+objroot=""
+
+abs_objroot="`pwd`/"
+
+
+if test "x$prefix" = "xNONE" ; then
+ prefix="/usr/local"
+fi
+if test "x$exec_prefix" = "xNONE" ; then
+ exec_prefix=$prefix
+fi
+PREFIX=$prefix
+
+BINDIR=`eval echo $bindir`
+BINDIR=`eval echo $BINDIR`
+
+INCLUDEDIR=`eval echo $includedir`
+INCLUDEDIR=`eval echo $INCLUDEDIR`
+
+LIBDIR=`eval echo $libdir`
+LIBDIR=`eval echo $LIBDIR`
+
+DATADIR=`eval echo $datadir`
+DATADIR=`eval echo $DATADIR`
+
+MANDIR=`eval echo $mandir`
+MANDIR=`eval echo $MANDIR`
+
+
+# Extract the first word of "xsltproc", so it can be a program name with args.
+set dummy xsltproc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_XSLTPROC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $XSLTPROC in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false"
+ ;;
+esac
+fi
+XSLTPROC=$ac_cv_path_XSLTPROC
+if test -n "$XSLTPROC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5
+$as_echo "$XSLTPROC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
+ DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
+ DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
+else
+ DEFAULT_XSLROOT=""
+fi
+
+# Check whether --with-xslroot was given.
+if test "${with_xslroot+set}" = set; then :
+ withval=$with_xslroot;
+if test "x$with_xslroot" = "xno" ; then
+ XSLROOT="${DEFAULT_XSLROOT}"
+else
+ XSLROOT="${with_xslroot}"
+fi
+
+else
+ XSLROOT="${DEFAULT_XSLROOT}"
+
+fi
+
+
+
+CFLAGS=$CFLAGS
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+if test "x$GCC" != "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5
+$as_echo_n "checking whether compiler is MSVC... " >&6; }
+if ${je_cv_msvc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#ifndef _MSC_VER
+ int fail-1;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_msvc=yes
+else
+ je_cv_msvc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5
+$as_echo "$je_cv_msvc" >&6; }
+fi
+
+je_cv_cray_prgenv_wrapper=""
+if test "x${PE_ENV}" != "x" ; then
+ case "${CC}" in
+ CC|cc)
+ je_cv_cray_prgenv_wrapper="yes"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5
+$as_echo_n "checking whether compiler is cray... " >&6; }
+if ${je_cv_cray+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#ifndef _CRAYC
+ int fail-1;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cray=yes
+else
+ je_cv_cray=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5
+$as_echo "$je_cv_cray" >&6; }
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5
+$as_echo_n "checking whether cray compiler version is 8.4... " >&6; }
+if ${je_cv_cray_84+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
+ int fail-1;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cray_84=yes
+else
+ je_cv_cray_84=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5
+$as_echo "$je_cv_cray_84" >&6; }
+fi
+
+if test "x$CFLAGS" = "x" ; then
+ no_CFLAGS="yes"
+ if test "x$GCC" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5
+$as_echo_n "checking whether compiler supports -std=gnu11... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-std=gnu11"
+else
+ CFLAGS="${CFLAGS} -std=gnu11"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-std=gnu11
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_HAS_RESTRICT 1
+_ACEOF
+
+ else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5
+$as_echo_n "checking whether compiler supports -std=gnu99... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-std=gnu99"
+else
+ CFLAGS="${CFLAGS} -std=gnu99"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-std=gnu99
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_HAS_RESTRICT 1
+_ACEOF
+
+ fi
+ fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5
+$as_echo_n "checking whether compiler supports -Wall... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Wall"
+else
+ CFLAGS="${CFLAGS} -Wall"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Wall
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror=declaration-after-statement" >&5
+$as_echo_n "checking whether compiler supports -Werror=declaration-after-statement... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror=declaration-after-statement"
+else
+ CFLAGS="${CFLAGS} -Werror=declaration-after-statement"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror=declaration-after-statement
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5
+$as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Wshorten-64-to-32"
+else
+ CFLAGS="${CFLAGS} -Wshorten-64-to-32"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Wshorten-64-to-32
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5
+$as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Wsign-compare"
+else
+ CFLAGS="${CFLAGS} -Wsign-compare"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Wsign-compare
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5
+$as_echo_n "checking whether compiler supports -pipe... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-pipe"
+else
+ CFLAGS="${CFLAGS} -pipe"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-pipe
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5
+$as_echo_n "checking whether compiler supports -g3... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-g3"
+else
+ CFLAGS="${CFLAGS} -g3"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-g3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ elif test "x$je_cv_msvc" = "xyes" ; then
+ CC="$CC -nologo"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5
+$as_echo_n "checking whether compiler supports -Zi... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Zi"
+else
+ CFLAGS="${CFLAGS} -Zi"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Zi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5
+$as_echo_n "checking whether compiler supports -MT... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-MT"
+else
+ CFLAGS="${CFLAGS} -MT"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-MT
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5
+$as_echo_n "checking whether compiler supports -W3... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-W3"
+else
+ CFLAGS="${CFLAGS} -W3"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-W3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5
+$as_echo_n "checking whether compiler supports -FS... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-FS"
+else
+ CFLAGS="${CFLAGS} -FS"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-FS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
+ fi
+ if test "x$je_cv_cray" = "xyes" ; then
+ if test "x$je_cv_cray_84" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5
+$as_echo_n "checking whether compiler supports -hipa2... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-hipa2"
+else
+ CFLAGS="${CFLAGS} -hipa2"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-hipa2
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5
+$as_echo_n "checking whether compiler supports -hnognu... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-hnognu"
+else
+ CFLAGS="${CFLAGS} -hnognu"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-hnognu
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+ if test "x$enable_cc_silence" != "xno" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5
+$as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-hnomessage=128"
+else
+ CFLAGS="${CFLAGS} -hnomessage=128"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-hnomessage=128
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5
+$as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-hnomessage=1357"
+else
+ CFLAGS="${CFLAGS} -hnomessage=1357"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-hnomessage=1357
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+ fi
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if ${ac_cv_prog_CPP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
+$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
+if ${ac_cv_c_bigendian+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_bigendian=unknown
+ # See if we're dealing with a universal compiler.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifndef __APPLE_CC__
+ not a universal capable compiler
+ #endif
+ typedef int dummy;
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ # Check for potential -arch flags. It is not universal unless
+ # there are at least two -arch flags with different values.
+ ac_arch=
+ ac_prev=
+ for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do
+ if test -n "$ac_prev"; then
+ case $ac_word in
+ i?86 | x86_64 | ppc | ppc64)
+ if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then
+ ac_arch=$ac_word
+ else
+ ac_cv_c_bigendian=universal
+ break
+ fi
+ ;;
+ esac
+ ac_prev=
+ elif test "x$ac_word" = "x-arch"; then
+ ac_prev=arch
+ fi
+ done
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if sys/param.h defines the BYTE_ORDER macro.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
+ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
+ && LITTLE_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if BYTE_ORDER != BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to _BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#ifndef _BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # Compile a test program.
+ if test "$cross_compiling" = yes; then :
+ # Try to guess by grepping values from an object file.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+short int ascii_mm[] =
+ { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
+ short int ascii_ii[] =
+ { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
+ int use_ascii (int i) {
+ return ascii_mm[i] + ascii_ii[i];
+ }
+ short int ebcdic_ii[] =
+ { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
+ short int ebcdic_mm[] =
+ { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
+ int use_ebcdic (int i) {
+ return ebcdic_mm[i] + ebcdic_ii[i];
+ }
+ extern int foo;
+
+int
+main ()
+{
+return use_ascii (foo) == use_ebcdic (foo);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
+ ac_cv_c_bigendian=yes
+ fi
+ if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
+ if test "$ac_cv_c_bigendian" = unknown; then
+ ac_cv_c_bigendian=no
+ else
+ # finding both strings is unlikely to happen, but who knows?
+ ac_cv_c_bigendian=unknown
+ fi
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+
+ /* Are we little or big endian? From Harbison&Steele. */
+ union
+ {
+ long int l;
+ char c[sizeof (long int)];
+ } u;
+ u.l = 1;
+ return u.c[sizeof (long int) - 1] == 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ac_cv_c_bigendian=no
+else
+ ac_cv_c_bigendian=yes
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5
+$as_echo "$ac_cv_c_bigendian" >&6; }
+ case $ac_cv_c_bigendian in #(
+ yes)
+ ac_cv_big_endian=1;; #(
+ no)
+ ac_cv_big_endian=0 ;; #(
+ universal)
+
+$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
+
+ ;; #(
+ *)
+ as_fn_error $? "unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ esac
+
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_BIG_ENDIAN
+_ACEOF
+
+fi
+
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
+fi
+
+if test "x${je_cv_msvc}" = "xyes" ; then
+ LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5
+$as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; }
+else
+ # The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5
+$as_echo_n "checking size of void *... " >&6; }
+if ${ac_cv_sizeof_void_p+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_void_p" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (void *)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_void_p=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5
+$as_echo "$ac_cv_sizeof_void_p" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_VOID_P $ac_cv_sizeof_void_p
+_ACEOF
+
+
+ if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+ elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
+ else
+ as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
+ fi
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_PTR $LG_SIZEOF_PTR
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5
+$as_echo_n "checking size of int... " >&6; }
+if ${ac_cv_sizeof_int+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_int" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (int)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_int=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5
+$as_echo "$ac_cv_sizeof_int" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_INT $ac_cv_sizeof_int
+_ACEOF
+
+
+if test "x${ac_cv_sizeof_int}" = "x8" ; then
+ LG_SIZEOF_INT=3
+elif test "x${ac_cv_sizeof_int}" = "x4" ; then
+ LG_SIZEOF_INT=2
+else
+ as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_INT $LG_SIZEOF_INT
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5
+$as_echo_n "checking size of long... " >&6; }
+if ${ac_cv_sizeof_long+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_long" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_long=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5
+$as_echo "$ac_cv_sizeof_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG $ac_cv_sizeof_long
+_ACEOF
+
+
+if test "x${ac_cv_sizeof_long}" = "x8" ; then
+ LG_SIZEOF_LONG=3
+elif test "x${ac_cv_sizeof_long}" = "x4" ; then
+ LG_SIZEOF_LONG=2
+else
+ as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_LONG $LG_SIZEOF_LONG
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
+$as_echo_n "checking size of long long... " >&6; }
+if ${ac_cv_sizeof_long_long+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_long_long" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long long)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_long_long=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5
+$as_echo "$ac_cv_sizeof_long_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long
+_ACEOF
+
+
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+ LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+ LG_SIZEOF_LONG_LONG=2
+else
+ as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5
+$as_echo_n "checking size of intmax_t... " >&6; }
+if ${ac_cv_sizeof_intmax_t+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_intmax_t" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (intmax_t)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_intmax_t=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5
+$as_echo "$ac_cv_sizeof_intmax_t" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t
+_ACEOF
+
+
+if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
+ LG_SIZEOF_INTMAX_T=4
+elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
+ LG_SIZEOF_INTMAX_T=3
+elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
+ LG_SIZEOF_INTMAX_T=2
+else
+ as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5
+fi
+cat >>confdefs.h <<_ACEOF
+#define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T
+_ACEOF
+
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if ${ac_cv_build+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if ${ac_cv_host+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+CPU_SPINWAIT=""
+case "${host_cpu}" in
+ i686|x86_64)
+ if test "x${je_cv_msvc}" = "xyes" ; then
+ if ${je_cv_pause_msvc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5
+$as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; }
+if ${je_cv_pause_msvc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+_mm_pause(); return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_pause_msvc=yes
+else
+ je_cv_pause_msvc=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5
+$as_echo "$je_cv_pause_msvc" >&6; }
+
+fi
+
+ if test "x${je_cv_pause_msvc}" = "xyes" ; then
+ CPU_SPINWAIT='_mm_pause()'
+ fi
+ else
+ if ${je_cv_pause+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
+$as_echo_n "checking whether pause instruction is compilable... " >&6; }
+if ${je_cv_pause+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+__asm__ volatile("pause"); return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_pause=yes
+else
+ je_cv_pause=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
+$as_echo "$je_cv_pause" >&6; }
+
+fi
+
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
+ fi
+ fi
+ ;;
+ powerpc)
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_ALTIVEC
+_ACEOF
+
+ ;;
+ *)
+ ;;
+esac
+cat >>confdefs.h <<_ACEOF
+#define CPU_SPINWAIT $CPU_SPINWAIT
+_ACEOF
+
+
+LD_PRELOAD_VAR="LD_PRELOAD"
+so="so"
+importlib="${so}"
+o="$ac_objext"
+a="a"
+exe="$ac_exeext"
+libprefix="lib"
+link_whole_archive="0"
+DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
+RPATH='-Wl,-rpath,$(1)'
+SOREV="${so}.${rev}"
+PIC_CFLAGS='-fPIC -DPIC'
+CTARGET='-o $@'
+LDTARGET='-o $@'
+TEST_LD_MODE=
+EXTRA_LDFLAGS=
+ARFLAGS='crus'
+AROUT=' $@'
+CC_MM=1
+
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ TEST_LD_MODE='-dynamic'
+fi
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ CC_MM=
+fi
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ar; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AR"; then
+ ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AR="${ac_tool_prefix}ar"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+$as_echo "$AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_AR"; then
+ ac_ct_AR=$AR
+ # Extract the first word of "ar", so it can be a program name with args.
+set dummy ar; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AR"; then
+ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_AR="ar"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_AR" = x; then
+ AR=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AR=$ac_ct_AR
+ fi
+else
+ AR="$ac_cv_prog_AR"
+fi
+
+
+CFLAGS="$CFLAGS"
+default_munmap="1"
+maps_coalesce="1"
+case "${host}" in
+ *-*-darwin* | *-*-ios*)
+ abi="macho"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ RPATH=""
+ LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
+ so="dylib"
+ importlib="${so}"
+ force_tls="0"
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
+ SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
+ ;;
+ *-*-freebsd*)
+ abi="elf"
+ $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h
+
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ force_lazy_lock="1"
+ ;;
+ *-*-dragonfly*)
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ ;;
+ *-*-openbsd*)
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ ;;
+ *-*-linux*)
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+ abi="elf"
+ $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h
+
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h
+
+ $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
+
+ $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h
+
+ default_munmap="0"
+ ;;
+ *-*-netbsd*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5
+$as_echo_n "checking ABI... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __ELF__
+/* ELF */
+#else
+#error aout
+#endif
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ abi="elf"
+else
+ abi="aout"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5
+$as_echo "$abi" >&6; }
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ ;;
+ *-*-solaris2*)
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ RPATH='-Wl,-R,$(1)'
+ CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
+ LIBS="$LIBS -lposix4 -lsocket -lnsl"
+ ;;
+ *-ibm-aix*)
+ if "$LG_SIZEOF_PTR" = "8"; then
+ LD_PRELOAD_VAR="LDR_PRELOAD64"
+ else
+ LD_PRELOAD_VAR="LDR_PRELOAD"
+ fi
+ abi="xcoff"
+ ;;
+ *-*-mingw* | *-*-cygwin*)
+ abi="pecoff"
+ force_tls="0"
+ maps_coalesce="0"
+ RPATH=""
+ so="dll"
+ if test "x$je_cv_msvc" = "xyes" ; then
+ importlib="lib"
+ DSO_LDFLAGS="-LD"
+ EXTRA_LDFLAGS="-link -DEBUG"
+ CTARGET='-Fo$@'
+ LDTARGET='-Fe$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
+ CC_MM=
+ else
+ importlib="${so}"
+ DSO_LDFLAGS="-shared"
+ link_whole_archive="1"
+ fi
+ a="lib"
+ libprefix=""
+ SOREV="${so}"
+ PIC_CFLAGS=""
+ ;;
+ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5
+$as_echo "Unsupported operating system: ${host}" >&6; }
+ abi="elf"
+ ;;
+esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+for ac_header in malloc.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default"
+if test "x$ac_cv_header_malloc_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_MALLOC_H 1
+_ACEOF
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5
+$as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+
+ JEMALLOC_USABLE_SIZE_CONST=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+
+done
+
+cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST
+_ACEOF
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5
+$as_echo_n "checking for library containing log... " >&6; }
+if ${ac_cv_search_log+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char log ();
+int
+main ()
+{
+return log ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' m; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_log=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_log+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_log+:} false; then :
+
+else
+ ac_cv_search_log=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5
+$as_echo "$ac_cv_search_log" >&6; }
+ac_res=$ac_cv_search_log
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+else
+ as_fn_error $? "Missing math functions" "$LINENO" 5
+fi
+
+if test "x$ac_cv_search_log" != "xnone required" ; then
+ LM="$ac_cv_search_log"
+else
+ LM=
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
+$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; }
+if ${je_cv_attribute+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+static __attribute__((unused)) void foo(void){}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_attribute=yes
+else
+ je_cv_attribute=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5
+$as_echo "$je_cv_attribute" >&6; }
+
+if test "x${je_cv_attribute}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h
+
+ if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5
+$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-fvisibility=hidden"
+else
+ CFLAGS="${CFLAGS} -fvisibility=hidden"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-fvisibility=hidden
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
+$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-herror_on_warning"
+else
+ CFLAGS="${CFLAGS} -herror_on_warning"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-herror_on_warning
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5
+$as_echo_n "checking whether tls_model attribute is compilable... " >&6; }
+if ${je_cv_tls_model+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+static __thread int
+ __attribute__((tls_model("initial-exec"), unused)) foo;
+ foo = 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_tls_model=yes
+else
+ je_cv_tls_model=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5
+$as_echo "$je_cv_tls_model" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_tls_model}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h
+
+else
+ $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
+$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-herror_on_warning"
+else
+ CFLAGS="${CFLAGS} -herror_on_warning"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-herror_on_warning
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5
+$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; }
+if ${je_cv_alloc_size+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(size_t size) __attribute__((alloc_size(1)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_alloc_size=yes
+else
+ je_cv_alloc_size=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5
+$as_echo "$je_cv_alloc_size" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
+$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-herror_on_warning"
+else
+ CFLAGS="${CFLAGS} -herror_on_warning"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-herror_on_warning
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_gnu_printf+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_format_gnu_printf=yes
+else
+ je_cv_format_gnu_printf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5
+$as_echo "$je_cv_format_gnu_printf" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
+$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-herror_on_warning"
+else
+ CFLAGS="${CFLAGS} -herror_on_warning"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-herror_on_warning
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_printf+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_format_printf=yes
+else
+ je_cv_format_printf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5
+$as_echo "$je_cv_format_printf" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h
+
+fi
+
+
+# Check whether --with-rpath was given.
+if test "${with_rpath+set}" = set; then :
+ withval=$with_rpath; if test "x$with_rpath" = "xno" ; then
+ RPATH_EXTRA=
+else
+ RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
+fi
+else
+ RPATH_EXTRA=
+
+fi
+
+
+
+# Check whether --enable-autogen was given.
+if test "${enable_autogen+set}" = set; then :
+ enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then
+ enable_autogen="0"
+else
+ enable_autogen="1"
+fi
+
+else
+ enable_autogen="0"
+
+fi
+
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if ${ac_cv_path_install+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+ ./ | .// | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+# Extract the first word of "ld", so it can be a program name with args.
+set dummy ld; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_LD+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $LD in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_LD="$LD" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_LD" && ac_cv_path_LD="false"
+ ;;
+esac
+fi
+LD=$ac_cv_path_LD
+if test -n "$LD"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+# Extract the first word of "autoconf", so it can be a program name with args.
+set dummy autoconf; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_AUTOCONF+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $AUTOCONF in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false"
+ ;;
+esac
+fi
+AUTOCONF=$ac_cv_path_AUTOCONF
+if test -n "$AUTOCONF"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5
+$as_echo "$AUTOCONF" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
+
+ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign"
+if test "x$ac_cv_func_memalign" = xyes; then :
+ $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h
+
+ public_syms="${public_syms} memalign"
+fi
+
+ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc"
+if test "x$ac_cv_func_valloc" = xyes; then :
+ $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h
+
+ public_syms="${public_syms} valloc"
+fi
+
+
+GCOV_FLAGS=
+# Check whether --enable-code-coverage was given.
+if test "${enable_code_coverage+set}" = set; then :
+ enableval=$enable_code_coverage; if test "x$enable_code_coverage" = "xno" ; then
+ enable_code_coverage="0"
+else
+ enable_code_coverage="1"
+fi
+
+else
+ enable_code_coverage="0"
+
+fi
+
+if test "x$enable_code_coverage" = "x1" ; then
+ deoptimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
+ if test "x${deoptimize}" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O0" >&5
+$as_echo_n "checking whether compiler supports -O0... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-O0"
+else
+ CFLAGS="${CFLAGS} -O0"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-O0
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fprofile-arcs -ftest-coverage" >&5
+$as_echo_n "checking whether compiler supports -fprofile-arcs -ftest-coverage... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-fprofile-arcs -ftest-coverage"
+else
+ CFLAGS="${CFLAGS} -fprofile-arcs -ftest-coverage"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-fprofile-arcs -ftest-coverage
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
+ $as_echo "#define JEMALLOC_CODE_COVERAGE " >>confdefs.h
+
+fi
+
+
+
+# Check whether --with-mangling was given.
+if test "${with_mangling+set}" = set; then :
+ withval=$with_mangling; mangling_map="$with_mangling"
+else
+ mangling_map=""
+fi
+
+
+
+# Check whether --with-jemalloc_prefix was given.
+if test "${with_jemalloc_prefix+set}" = set; then :
+ withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix"
+else
+ if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
+ JEMALLOC_PREFIX=""
+else
+ JEMALLOC_PREFIX="je_"
+fi
+
+fi
+
+if test "x$JEMALLOC_PREFIX" != "x" ; then
+ JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_PREFIX "$JEMALLOC_PREFIX"
+_ACEOF
+
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX"
+_ACEOF
+
+fi
+
+
+
+# Check whether --with-export was given.
+if test "${with_export+set}" = set; then :
+ withval=$with_export; if test "x$with_export" = "xno"; then
+ $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h
+
+fi
+
+fi
+
+
+
+# Check whether --with-private_namespace was given.
+if test "${with_private_namespace+set}" = set; then :
+ withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"
+else
+ JEMALLOC_PRIVATE_NAMESPACE="je_"
+
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE
+_ACEOF
+
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
+
+
+
+# Check whether --with-install_suffix was given.
+if test "${with_install_suffix+set}" = set; then :
+ withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix"
+else
+ INSTALL_SUFFIX=
+
+fi
+
+install_suffix="$INSTALL_SUFFIX"
+
+
+
+# Check whether --with-malloc_conf was given.
+if test "${with_malloc_conf+set}" = set; then :
+ withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"
+else
+ JEMALLOC_CONFIG_MALLOC_CONF=""
+
+fi
+
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf"
+_ACEOF
+
+
+je_="je_"
+
+
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
+
+cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
+cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
+
+cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
+
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
+
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
+
+# Check whether --enable-cc-silence was given.
+if test "${enable_cc_silence+set}" = set; then :
+ enableval=$enable_cc_silence; if test "x$enable_cc_silence" = "xno" ; then
+ enable_cc_silence="0"
+else
+ enable_cc_silence="1"
+fi
+
+else
+ enable_cc_silence="1"
+
+fi
+
+if test "x$enable_cc_silence" = "x1" ; then
+ $as_echo "#define JEMALLOC_CC_SILENCE " >>confdefs.h
+
+fi
+
+# Check whether --enable-debug was given.
+if test "${enable_debug+set}" = set; then :
+ enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then
+ enable_debug="0"
+else
+ enable_debug="1"
+fi
+
+else
+ enable_debug="0"
+
+fi
+
+if test "x$enable_debug" = "x1" ; then
+ $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h
+
+fi
+if test "x$enable_debug" = "x1" ; then
+ $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h
+
+ enable_ivsalloc="1"
+fi
+
+
+# Check whether --enable-ivsalloc was given.
+if test "${enable_ivsalloc+set}" = set; then :
+ enableval=$enable_ivsalloc; if test "x$enable_ivsalloc" = "xno" ; then
+ enable_ivsalloc="0"
+else
+ enable_ivsalloc="1"
+fi
+
+else
+ enable_ivsalloc="0"
+
+fi
+
+if test "x$enable_ivsalloc" = "x1" ; then
+ $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h
+
+fi
+
+if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
+ optimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes"
+ if test "x${optimize}" = "xyes" ; then
+ if test "x$GCC" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5
+$as_echo_n "checking whether compiler supports -O3... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-O3"
+else
+ CFLAGS="${CFLAGS} -O3"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-O3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5
+$as_echo_n "checking whether compiler supports -funroll-loops... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-funroll-loops"
+else
+ CFLAGS="${CFLAGS} -funroll-loops"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-funroll-loops
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ elif test "x$je_cv_msvc" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5
+$as_echo_n "checking whether compiler supports -O2... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-O2"
+else
+ CFLAGS="${CFLAGS} -O2"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-O2
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5
+$as_echo_n "checking whether compiler supports -O... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-O"
+else
+ CFLAGS="${CFLAGS} -O"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-O
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+ fi
+fi
+
+# Check whether --enable-stats was given.
+if test "${enable_stats+set}" = set; then :
+ enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then
+ enable_stats="0"
+else
+ enable_stats="1"
+fi
+
+else
+ enable_stats="1"
+
+fi
+
+if test "x$enable_stats" = "x1" ; then
+ $as_echo "#define JEMALLOC_STATS " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-prof was given.
+if test "${enable_prof+set}" = set; then :
+ enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then
+ enable_prof="0"
+else
+ enable_prof="1"
+fi
+
+else
+ enable_prof="0"
+
+fi
+
+if test "x$enable_prof" = "x1" ; then
+ backtrace_method=""
+else
+ backtrace_method="N/A"
+fi
+
+# Check whether --enable-prof-libunwind was given.
+if test "${enable_prof_libunwind+set}" = set; then :
+ enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then
+ enable_prof_libunwind="0"
+else
+ enable_prof_libunwind="1"
+fi
+
+else
+ enable_prof_libunwind="0"
+
+fi
+
+
+# Check whether --with-static_libunwind was given.
+if test "${with_static_libunwind+set}" = set; then :
+ withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then
+ LUNWIND="-lunwind"
+else
+ if test ! -f "$with_static_libunwind" ; then
+ as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5
+ fi
+ LUNWIND="$with_static_libunwind"
+fi
+else
+ LUNWIND="-lunwind"
+
+fi
+
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
+ for ac_header in libunwind.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default"
+if test "x$ac_cv_header_libunwind_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBUNWIND_H 1
+_ACEOF
+
+else
+ enable_prof_libunwind="0"
+fi
+
+done
+
+ if test "x$LUNWIND" = "x-lunwind" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5
+$as_echo_n "checking for unw_backtrace in -lunwind... " >&6; }
+if ${ac_cv_lib_unwind_unw_backtrace+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lunwind $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char unw_backtrace ();
+int
+main ()
+{
+return unw_backtrace ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_unwind_unw_backtrace=yes
+else
+ ac_cv_lib_unwind_unw_backtrace=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5
+$as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; }
+if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then :
+ LIBS="$LIBS $LUNWIND"
+else
+ enable_prof_libunwind="0"
+fi
+
+ else
+ LIBS="$LIBS $LUNWIND"
+ fi
+ if test "x${enable_prof_libunwind}" = "x1" ; then
+ backtrace_method="libunwind"
+ $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h
+
+ fi
+fi
+
+# Check whether --enable-prof-libgcc was given.
+if test "${enable_prof_libgcc+set}" = set; then :
+ enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then
+ enable_prof_libgcc="0"
+else
+ enable_prof_libgcc="1"
+fi
+
+else
+ enable_prof_libgcc="1"
+
+fi
+
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ for ac_header in unwind.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default"
+if test "x$ac_cv_header_unwind_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_UNWIND_H 1
+_ACEOF
+
+else
+ enable_prof_libgcc="0"
+fi
+
+done
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5
+$as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; }
+if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lgcc $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char _Unwind_Backtrace ();
+int
+main ()
+{
+return _Unwind_Backtrace ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_gcc__Unwind_Backtrace=yes
+else
+ ac_cv_lib_gcc__Unwind_Backtrace=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5
+$as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; }
+if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then :
+ LIBS="$LIBS -lgcc"
+else
+ enable_prof_libgcc="0"
+fi
+
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ backtrace_method="libgcc"
+ $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h
+
+ fi
+else
+ enable_prof_libgcc="0"
+fi
+
+# Check whether --enable-prof-gcc was given.
+if test "${enable_prof_gcc+set}" = set; then :
+ enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then
+ enable_prof_gcc="0"
+else
+ enable_prof_gcc="1"
+fi
+
+else
+ enable_prof_gcc="1"
+
+fi
+
+if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5
+$as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-fno-omit-frame-pointer"
+else
+ CFLAGS="${CFLAGS} -fno-omit-frame-pointer"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-fno-omit-frame-pointer
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ backtrace_method="gcc intrinsics"
+ $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h
+
+else
+ enable_prof_gcc="0"
+fi
+
+if test "x$backtrace_method" = "x" ; then
+ backtrace_method="none (disabling profiling)"
+ enable_prof="0"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5
+$as_echo_n "checking configured backtracing method... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5
+$as_echo "$backtrace_method" >&6; }
+if test "x$enable_prof" = "x1" ; then
+ if test "x$LM" != "x" ; then
+ LIBS="$LIBS $LM"
+ fi
+
+ $as_echo "#define JEMALLOC_PROF " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-tcache was given.
+if test "${enable_tcache+set}" = set; then :
+ enableval=$enable_tcache; if test "x$enable_tcache" = "xno" ; then
+ enable_tcache="0"
+else
+ enable_tcache="1"
+fi
+
+else
+ enable_tcache="1"
+
+fi
+
+if test "x$enable_tcache" = "x1" ; then
+ $as_echo "#define JEMALLOC_TCACHE " >>confdefs.h
+
+fi
+
+
+if test "x${maps_coalesce}" = "x1" ; then
+ $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h
+
+fi
+
+# Check whether --enable-munmap was given.
+if test "${enable_munmap+set}" = set; then :
+ enableval=$enable_munmap; if test "x$enable_munmap" = "xno" ; then
+ enable_munmap="0"
+else
+ enable_munmap="1"
+fi
+
+else
+ enable_munmap="${default_munmap}"
+
+fi
+
+if test "x$enable_munmap" = "x1" ; then
+ $as_echo "#define JEMALLOC_MUNMAP " >>confdefs.h
+
+fi
+
+
+have_dss="1"
+ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk"
+if test "x$ac_cv_func_sbrk" = xyes; then :
+ have_sbrk="1"
+else
+ have_sbrk="0"
+fi
+
+if test "x$have_sbrk" = "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5
+$as_echo "Disabling dss allocation because sbrk is deprecated" >&6; }
+ have_dss="0"
+ fi
+else
+ have_dss="0"
+fi
+
+if test "x$have_dss" = "x1" ; then
+ $as_echo "#define JEMALLOC_DSS " >>confdefs.h
+
+fi
+
+# Check whether --enable-fill was given.
+if test "${enable_fill+set}" = set; then :
+ enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then
+ enable_fill="0"
+else
+ enable_fill="1"
+fi
+
+else
+ enable_fill="1"
+
+fi
+
+if test "x$enable_fill" = "x1" ; then
+ $as_echo "#define JEMALLOC_FILL " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-utrace was given.
+if test "${enable_utrace+set}" = set; then :
+ enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then
+ enable_utrace="0"
+else
+ enable_utrace="1"
+fi
+
+else
+ enable_utrace="0"
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5
+$as_echo_n "checking whether utrace(2) is compilable... " >&6; }
+if ${je_cv_utrace+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ktrace.h>
+
+int
+main ()
+{
+
+ utrace((void *)0, 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_utrace=yes
+else
+ je_cv_utrace=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5
+$as_echo "$je_cv_utrace" >&6; }
+
+if test "x${je_cv_utrace}" = "xno" ; then
+ enable_utrace="0"
+fi
+if test "x$enable_utrace" = "x1" ; then
+ $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-valgrind was given.
+if test "${enable_valgrind+set}" = set; then :
+ enableval=$enable_valgrind; if test "x$enable_valgrind" = "xno" ; then
+ enable_valgrind="0"
+else
+ enable_valgrind="1"
+fi
+
+else
+ enable_valgrind="1"
+
+fi
+
+if test "x$enable_valgrind" = "x1" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether valgrind is compilable" >&5
+$as_echo_n "checking whether valgrind is compilable... " >&6; }
+if ${je_cv_valgrind+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+
+#if !defined(VALGRIND_RESIZEINPLACE_BLOCK)
+# error "Incompatible Valgrind version"
+#endif
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_valgrind=yes
+else
+ je_cv_valgrind=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_valgrind" >&5
+$as_echo "$je_cv_valgrind" >&6; }
+
+ if test "x${je_cv_valgrind}" = "xno" ; then
+ enable_valgrind="0"
+ fi
+ if test "x$enable_valgrind" = "x1" ; then
+ $as_echo "#define JEMALLOC_VALGRIND " >>confdefs.h
+
+ fi
+fi
+
+
+# Check whether --enable-xmalloc was given.
+if test "${enable_xmalloc+set}" = set; then :
+ enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then
+ enable_xmalloc="0"
+else
+ enable_xmalloc="1"
+fi
+
+else
+ enable_xmalloc="0"
+
+fi
+
+if test "x$enable_xmalloc" = "x1" ; then
+ $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-cache-oblivious was given.
+if test "${enable_cache_oblivious+set}" = set; then :
+ enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+
+else
+ enable_cache_oblivious="1"
+
+fi
+
+if test "x$enable_cache_oblivious" = "x1" ; then
+ $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h
+
+fi
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5
+$as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; }
+if ${je_cv_gcc_builtin_unreachable+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+void foo (void) {
+ __builtin_unreachable();
+}
+
+int
+main ()
+{
+
+ {
+ foo();
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_gcc_builtin_unreachable=yes
+else
+ je_cv_gcc_builtin_unreachable=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5
+$as_echo "$je_cv_gcc_builtin_unreachable" >&6; }
+
+if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h
+
+else
+ $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5
+$as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; }
+if ${je_cv_gcc_builtin_ffsl+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+
+int
+main ()
+{
+
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_gcc_builtin_ffsl=yes
+else
+ je_cv_gcc_builtin_ffsl=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5
+$as_echo "$je_cv_gcc_builtin_ffsl" >&6; }
+
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h
+
+else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5
+$as_echo_n "checking whether a program using ffsl is compilable... " >&6; }
+if ${je_cv_function_ffsl+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+
+int
+main ()
+{
+
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_function_ffsl=yes
+else
+ je_cv_function_ffsl=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5
+$as_echo "$je_cv_function_ffsl" >&6; }
+
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h
+
+ else
+ as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5
+ fi
+fi
+
+
+# Check whether --with-lg_tiny_min was given.
+if test "${with_lg_tiny_min+set}" = set; then :
+ withval=$with_lg_tiny_min; LG_TINY_MIN="$with_lg_tiny_min"
+else
+ LG_TINY_MIN="3"
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define LG_TINY_MIN $LG_TINY_MIN
+_ACEOF
+
+
+
+# Check whether --with-lg_quantum was given.
+if test "${with_lg_quantum+set}" = set; then :
+ withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum"
+else
+ LG_QUANTA="3 4"
+fi
+
+if test "x$with_lg_quantum" != "x" ; then
+ cat >>confdefs.h <<_ACEOF
+#define LG_QUANTUM $with_lg_quantum
+_ACEOF
+
+fi
+
+
+# Check whether --with-lg_page was given.
+if test "${with_lg_page+set}" = set; then :
+ withval=$with_lg_page; LG_PAGE="$with_lg_page"
+else
+ LG_PAGE="detect"
+fi
+
+if test "x$LG_PAGE" = "xdetect"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5
+$as_echo_n "checking LG_PAGE... " >&6; }
+if ${je_cv_lg_page+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ je_cv_lg_page=12
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <strings.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <stdio.h>
+
+int
+main ()
+{
+
+ int result;
+ FILE *f;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwPageSize;
+#else
+ result = sysconf(_SC_PAGESIZE);
+#endif
+ if (result == -1) {
+ return 1;
+ }
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
+
+ f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ fprintf(f, "%d", result);
+ fclose(f);
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ je_cv_lg_page=`cat conftest.out`
+else
+ je_cv_lg_page=undefined
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5
+$as_echo "$je_cv_lg_page" >&6; }
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+ cat >>confdefs.h <<_ACEOF
+#define LG_PAGE $LG_PAGE
+_ACEOF
+
+else
+ as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5
+fi
+
+
+# Check whether --with-lg_page_sizes was given.
+if test "${with_lg_page_sizes+set}" = set; then :
+ withval=$with_lg_page_sizes; LG_PAGE_SIZES="$with_lg_page_sizes"
+else
+ LG_PAGE_SIZES="$LG_PAGE"
+fi
+
+
+
+# Check whether --with-lg_size_class_group was given.
+if test "${with_lg_size_class_group+set}" = set; then :
+ withval=$with_lg_size_class_group; LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"
+else
+ LG_SIZE_CLASS_GROUP="2"
+fi
+
+
+
+
+# Check whether --with-version was given.
+if test "${with_version+set}" = set; then :
+ withval=$with_version;
+ echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null
+ if test $? -ne 0 ; then
+ as_fn_error $? "${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>" "$LINENO" 5
+ fi
+ echo "$with_version" > "${objroot}VERSION"
+
+else
+
+ if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ done
+ fi
+ rm -f "${objroot}VERSION.tmp"
+
+fi
+
+
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
+$as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; }
+ echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
+jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
+jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
+jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'`
+jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'`
+jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'`
+
+
+
+
+
+
+
+
+if test "x$abi" != "xpecoff" ; then
+ for ac_header in pthread.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default"
+if test "x$ac_cv_header_pthread_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_PTHREAD_H 1
+_ACEOF
+
+else
+ as_fn_error $? "pthread.h is missing" "$LINENO" 5
+fi
+
+done
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5
+$as_echo_n "checking for pthread_create in -lpthread... " >&6; }
+if ${ac_cv_lib_pthread_pthread_create+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char pthread_create ();
+int
+main ()
+{
+return pthread_create ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_pthread_pthread_create=yes
+else
+ ac_cv_lib_pthread_pthread_create=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5
+$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; }
+if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then :
+ LIBS="$LIBS -lpthread"
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5
+$as_echo_n "checking for library containing pthread_create... " >&6; }
+if ${ac_cv_search_pthread_create+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char pthread_create ();
+int
+main ()
+{
+return pthread_create ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' ; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_pthread_create=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_pthread_create+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_pthread_create+:} false; then :
+
+else
+ ac_cv_search_pthread_create=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5
+$as_echo "$ac_cv_search_pthread_create" >&6; }
+ac_res=$ac_cv_search_pthread_create
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+else
+ as_fn_error $? "libpthread is missing" "$LINENO" 5
+fi
+
+fi
+
+fi
+
+CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_clock_gettime+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+fi
+
+
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ if test "$ac_cv_search_clock_gettime" != "-lrt"; then
+ SAVED_CFLAGS="${CFLAGS}"
+
+ unset ac_cv_search_clock_gettime
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5
+$as_echo_n "checking whether compiler supports -dynamic... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-dynamic"
+else
+ CFLAGS="${CFLAGS} -dynamic"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-dynamic
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_clock_gettime+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+fi
+
+
+ CFLAGS="${SAVED_CFLAGS}"
+ fi
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5
+$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; }
+if ${je_cv_clock_monotonic_coarse+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <time.h>
+
+int
+main ()
+{
+
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_clock_monotonic_coarse=yes
+else
+ je_cv_clock_monotonic_coarse=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5
+$as_echo "$je_cv_clock_monotonic_coarse" >&6; }
+
+if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5
+$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; }
+if ${je_cv_clock_monotonic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <unistd.h>
+#include <time.h>
+
+int
+main ()
+{
+
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
+# error _POSIX_MONOTONIC_CLOCK missing/invalid
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_clock_monotonic=yes
+else
+ je_cv_clock_monotonic=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5
+$as_echo "$je_cv_clock_monotonic" >&6; }
+
+if test "x${je_cv_clock_monotonic}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5
+$as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; }
+if ${je_cv_mach_absolute_time+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <mach/mach_time.h>
+
+int
+main ()
+{
+
+ mach_absolute_time();
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_mach_absolute_time=yes
+else
+ je_cv_mach_absolute_time=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5
+$as_echo "$je_cv_mach_absolute_time" >&6; }
+
+if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h
+
+fi
+
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5
+$as_echo_n "checking whether syscall(2) is compilable... " >&6; }
+if ${je_cv_syscall+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+int
+main ()
+{
+
+ syscall(SYS_write, 2, "hello", 5);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_syscall=yes
+else
+ je_cv_syscall=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5
+$as_echo "$je_cv_syscall" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x$je_cv_syscall" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_SYSCALL " >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv"
+if test "x$ac_cv_func_secure_getenv" = xyes; then :
+ have_secure_getenv="1"
+else
+ have_secure_getenv="0"
+
+fi
+
+if test "x$have_secure_getenv" = "x1" ; then
+ $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid"
+if test "x$ac_cv_func_issetugid" = xyes; then :
+ have_issetugid="1"
+else
+ have_issetugid="0"
+
+fi
+
+if test "x$have_issetugid" = "x1" ; then
+ $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup"
+if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then :
+ have__malloc_thread_cleanup="1"
+else
+ have__malloc_thread_cleanup="0"
+
+fi
+
+if test "x$have__malloc_thread_cleanup" = "x1" ; then
+ $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h
+
+ force_tls="1"
+fi
+
+ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb"
+if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then :
+ have__pthread_mutex_init_calloc_cb="1"
+else
+ have__pthread_mutex_init_calloc_cb="0"
+
+fi
+
+if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
+ $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h
+
+fi
+
+# Check whether --enable-lazy_lock was given.
+if test "${enable_lazy_lock+set}" = set; then :
+ enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then
+ enable_lazy_lock="0"
+else
+ enable_lazy_lock="1"
+fi
+
+else
+ enable_lazy_lock=""
+
+fi
+
+if test "x${enable_lazy_lock}" = "x" ; then
+ if test "x${force_lazy_lock}" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
+$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; }
+ enable_lazy_lock="1"
+ else
+ enable_lazy_lock="0"
+ fi
+fi
+if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5
+$as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; }
+ enable_lazy_lock="0"
+fi
+if test "x$enable_lazy_lock" = "x1" ; then
+ if test "x$abi" != "xpecoff" ; then
+ for ac_header in dlfcn.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default"
+if test "x$ac_cv_header_dlfcn_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_DLFCN_H 1
+_ACEOF
+
+else
+ as_fn_error $? "dlfcn.h is missing" "$LINENO" 5
+fi
+
+done
+
+ ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym"
+if test "x$ac_cv_func_dlsym" = xyes; then :
+
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5
+$as_echo_n "checking for dlsym in -ldl... " >&6; }
+if ${ac_cv_lib_dl_dlsym+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlsym ();
+int
+main ()
+{
+return dlsym ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_dl_dlsym=yes
+else
+ ac_cv_lib_dl_dlsym=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5
+$as_echo "$ac_cv_lib_dl_dlsym" >&6; }
+if test "x$ac_cv_lib_dl_dlsym" = xyes; then :
+ LIBS="$LIBS -ldl"
+else
+ as_fn_error $? "libdl is missing" "$LINENO" 5
+fi
+
+
+fi
+
+ fi
+ $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-tls was given.
+if test "${enable_tls+set}" = set; then :
+ enableval=$enable_tls; if test "x$enable_tls" = "xno" ; then
+ enable_tls="0"
+else
+ enable_tls="1"
+fi
+
+else
+ enable_tls=""
+
+fi
+
+if test "x${enable_tls}" = "x" ; then
+ if test "x${force_tls}" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
+$as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; }
+ enable_tls="1"
+ elif test "x${force_tls}" = "x0" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
+$as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; }
+ enable_tls="0"
+ else
+ enable_tls="1"
+ fi
+fi
+if test "x${enable_tls}" = "x1" ; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
+$as_echo_n "checking for TLS... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ __thread int x;
+
+int
+main ()
+{
+
+ x = 42;
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ enable_tls="0"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ enable_tls="0"
+fi
+
+if test "x${enable_tls}" = "x1" ; then
+ if test "x${force_tls}" = "x0" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5
+$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;}
+ fi
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_TLS
+_ACEOF
+
+elif test "x${force_tls}" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5
+$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;}
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5
+$as_echo_n "checking whether C11 atomics is compilable... " >&6; }
+if ${je_cv_c11atomics+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+
+int
+main ()
+{
+
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return (r == 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_c11atomics=yes
+else
+ je_cv_c11atomics=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11atomics" >&5
+$as_echo "$je_cv_c11atomics" >&6; }
+
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether atomic(9) is compilable" >&5
+$as_echo_n "checking whether atomic(9) is compilable... " >&6; }
+if ${je_cv_atomic9+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <inttypes.h>
+
+int
+main ()
+{
+
+ {
+ uint32_t x32 = 0;
+ volatile uint32_t *x32p = &x32;
+ atomic_fetchadd_32(x32p, 1);
+ }
+ {
+ unsigned long xlong = 0;
+ volatile unsigned long *xlongp = &xlong;
+ atomic_fetchadd_long(xlongp, 1);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_atomic9=yes
+else
+ je_cv_atomic9=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_atomic9" >&5
+$as_echo "$je_cv_atomic9" >&6; }
+
+if test "x${je_cv_atomic9}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_ATOMIC9 1" >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5
+$as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; }
+if ${je_cv_osatomic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+
+int
+main ()
+{
+
+ {
+ int32_t x32 = 0;
+ volatile int32_t *x32p = &x32;
+ OSAtomicAdd32(1, x32p);
+ }
+ {
+ int64_t x64 = 0;
+ volatile int64_t *x64p = &x64;
+ OSAtomicAdd64(1, x64p);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_osatomic=yes
+else
+ je_cv_osatomic=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5
+$as_echo "$je_cv_osatomic" >&6; }
+
+if test "x${je_cv_osatomic}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5
+$as_echo_n "checking whether madvise(2) is compilable... " >&6; }
+if ${je_cv_madvise+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <sys/mman.h>
+
+int
+main ()
+{
+
+ {
+ madvise((void *)0, 0, 0);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_madvise=yes
+else
+ je_cv_madvise=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5
+$as_echo "$je_cv_madvise" >&6; }
+
+if test "x${je_cv_madvise}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h
+
+fi
+
+
+
+
+if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 32-bit __sync_{add,sub}_and_fetch()" >&5
+$as_echo_n "checking whether to force 32-bit __sync_{add,sub}_and_fetch()... " >&6; }
+if ${je_cv_sync_compare_and_swap_4+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdint.h>
+
+int
+main ()
+{
+
+ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+ {
+ uint32_t x32 = 0;
+ __sync_add_and_fetch(&x32, 42);
+ __sync_sub_and_fetch(&x32, 1);
+ }
+ #else
+ #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 is defined, no need to force
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_sync_compare_and_swap_4=yes
+else
+ je_cv_sync_compare_and_swap_4=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_4" >&5
+$as_echo "$je_cv_sync_compare_and_swap_4" >&6; }
+
+ if test "x${je_cv_sync_compare_and_swap_4}" = "xyes" ; then
+ $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 " >>confdefs.h
+
+ fi
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 64-bit __sync_{add,sub}_and_fetch()" >&5
+$as_echo_n "checking whether to force 64-bit __sync_{add,sub}_and_fetch()... " >&6; }
+if ${je_cv_sync_compare_and_swap_8+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdint.h>
+
+int
+main ()
+{
+
+ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
+ {
+ uint64_t x64 = 0;
+ __sync_add_and_fetch(&x64, 42);
+ __sync_sub_and_fetch(&x64, 1);
+ }
+ #else
+ #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 is defined, no need to force
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_sync_compare_and_swap_8=yes
+else
+ je_cv_sync_compare_and_swap_8=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_8" >&5
+$as_echo "$je_cv_sync_compare_and_swap_8" >&6; }
+
+ if test "x${je_cv_sync_compare_and_swap_8}" = "xyes" ; then
+ $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 " >>confdefs.h
+
+ fi
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5
+$as_echo_n "checking for __builtin_clz... " >&6; }
+if ${je_cv_builtin_clz+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_builtin_clz=yes
+else
+ je_cv_builtin_clz=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5
+$as_echo "$je_cv_builtin_clz" >&6; }
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5
+$as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; }
+if ${je_cv_os_unfair_lock+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <os/lock.h>
+
+int
+main ()
+{
+
+ os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
+ os_unfair_lock_lock(&lock);
+ os_unfair_lock_unlock(&lock);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_os_unfair_lock=yes
+else
+ je_cv_os_unfair_lock=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5
+$as_echo "$je_cv_os_unfair_lock" >&6; }
+
+if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5
+$as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; }
+if ${je_cv_osspin+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+
+int
+main ()
+{
+
+ OSSpinLock lock = 0;
+ OSSpinLockLock(&lock);
+ OSSpinLockUnlock(&lock);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_osspin=yes
+else
+ je_cv_osspin=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osspin" >&5
+$as_echo "$je_cv_osspin" >&6; }
+
+if test "x${je_cv_osspin}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_OSSPIN " >>confdefs.h
+
+fi
+
+
+# Check whether --enable-zone-allocator was given.
+if test "${enable_zone_allocator+set}" = set; then :
+ enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+
+else
+ if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+
+
+fi
+
+
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5
+ fi
+ $as_echo "#define JEMALLOC_ZONE " >>confdefs.h
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking malloc zone version" >&5
+$as_echo_n "checking malloc zone version... " >&6; }
+
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=3
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=5
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=7
+else
+ JEMALLOC_ZONE_VERSION=
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=8
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc/malloc.h>
+int
+main ()
+{
+static int foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1]
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ JEMALLOC_ZONE_VERSION=9
+else
+ JEMALLOC_ZONE_VERSION=
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; }
+ as_fn_error $? "Unsupported malloc zone version" "$LINENO" 5
+ fi
+ if test "${JEMALLOC_ZONE_VERSION}" = 9; then
+ JEMALLOC_ZONE_VERSION=8
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: > 8" >&5
+$as_echo "> 8" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JEMALLOC_ZONE_VERSION" >&5
+$as_echo "$JEMALLOC_ZONE_VERSION" >&6; }
+ fi
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_ZONE_VERSION $JEMALLOC_ZONE_VERSION
+_ACEOF
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5
+$as_echo_n "checking whether glibc malloc hook is compilable... " >&6; }
+if ${je_cv_glibc_malloc_hook+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stddef.h>
+
+extern void (* __free_hook)(void *ptr);
+extern void *(* __malloc_hook)(size_t size);
+extern void *(* __realloc_hook)(void *ptr, size_t size);
+
+int
+main ()
+{
+
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_glibc_malloc_hook=yes
+else
+ je_cv_glibc_malloc_hook=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5
+$as_echo "$je_cv_glibc_malloc_hook" >&6; }
+
+if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5
+$as_echo_n "checking whether glibc memalign hook is compilable... " >&6; }
+if ${je_cv_glibc_memalign_hook+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stddef.h>
+
+extern void *(* __memalign_hook)(size_t alignment, size_t size);
+
+int
+main ()
+{
+
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_glibc_memalign_hook=yes
+else
+ je_cv_glibc_memalign_hook=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5
+$as_echo "$je_cv_glibc_memalign_hook" >&6; }
+
+if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5
+$as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; }
+if ${je_cv_pthread_mutex_adaptive_np+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <pthread.h>
+
+int
+main ()
+{
+
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_pthread_mutex_adaptive_np=yes
+else
+ je_cv_pthread_mutex_adaptive_np=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5
+$as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; }
+
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
+$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
+if ${ac_cv_header_stdbool_h+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdbool.h>
+ #ifndef bool
+ "error: bool is not defined"
+ #endif
+ #ifndef false
+ "error: false is not defined"
+ #endif
+ #if false
+ "error: false is not 0"
+ #endif
+ #ifndef true
+ "error: true is not defined"
+ #endif
+ #if true != 1
+ "error: true is not 1"
+ #endif
+ #ifndef __bool_true_false_are_defined
+ "error: __bool_true_false_are_defined is not defined"
+ #endif
+
+ struct s { _Bool s: 1; _Bool t; } s;
+
+ char a[true == 1 ? 1 : -1];
+ char b[false == 0 ? 1 : -1];
+ char c[__bool_true_false_are_defined == 1 ? 1 : -1];
+ char d[(bool) 0.5 == true ? 1 : -1];
+ /* See body of main program for 'e'. */
+ char f[(_Bool) 0.0 == false ? 1 : -1];
+ char g[true];
+ char h[sizeof (_Bool)];
+ char i[sizeof s.t];
+ enum { j = false, k = true, l = false * true, m = true * 256 };
+ /* The following fails for
+ HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
+ _Bool n[m];
+ char o[sizeof n == m * sizeof n[0] ? 1 : -1];
+ char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
+ /* Catch a bug in an HP-UX C compiler. See
+ http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
+ http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
+ */
+ _Bool q = true;
+ _Bool *pq = &q;
+
+int
+main ()
+{
+
+ bool e = &s;
+ *pq |= q;
+ *pq |= ! q;
+ /* Refer to every declared value, to avoid compiler optimizations. */
+ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
+ + !m + !n + !o + !p + !q + !pq);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_header_stdbool_h=yes
+else
+ ac_cv_header_stdbool_h=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
+$as_echo "$ac_cv_header_stdbool_h" >&6; }
+ ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
+if test "x$ac_cv_type__Bool" = xyes; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BOOL 1
+_ACEOF
+
+
+fi
+
+
+if test $ac_cv_header_stdbool_h = yes; then
+
+$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
+
+fi
+
+
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/private_namespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/private_unnamespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/size_classes.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h"
+
+
+
+
+ac_config_headers="$ac_config_headers $cfghdrs_tup"
+
+
+
+ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof"
+
+
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ if test "x$cache_file" != "x/dev/null"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to the package provider."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+config.status
+configured by $0, generated by GNU Autoconf 2.69,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ SHELL="${SHELL}"
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ LG_QUANTA="${LG_QUANTA}"
+ LG_TINY_MIN=${LG_TINY_MIN}
+ LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+ LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
+
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "include/jemalloc/internal/private_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_namespace.h" ;;
+ "include/jemalloc/internal/private_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_unnamespace.h" ;;
+ "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;;
+ "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;;
+ "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;;
+ "include/jemalloc/internal/size_classes.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/size_classes.h" ;;
+ "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;;
+ "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;;
+ "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;;
+ "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;;
+ "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;;
+ "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;;
+ "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;;
+ "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;;
+ "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;;
+ "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;;
+ "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp= ac_tmp=
+ trap 'exit_status=$?
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$ac_tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
+ case $ac_file in
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+ ;;
+
+ :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "include/jemalloc/internal/private_namespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
+ ;;
+ "include/jemalloc/internal/private_unnamespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
+ ;;
+ "include/jemalloc/internal/public_symbols.txt":C)
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
+ echo "${n}:${m}" >> "${f}"
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+ ;;
+ "include/jemalloc/internal/public_namespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+ ;;
+ "include/jemalloc/internal/public_unnamespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+ ;;
+ "include/jemalloc/internal/size_classes.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
+ ;;
+ "include/jemalloc/jemalloc_protos_jet.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+ ;;
+ "include/jemalloc/jemalloc_rename.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+ ;;
+ "include/jemalloc/jemalloc_mangle.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+ ;;
+ "include/jemalloc/jemalloc_mangle_jet.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+ ;;
+ "include/jemalloc/jemalloc.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+ ;;
+
+ esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
+$as_echo "===============================================================================" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5
+$as_echo "jemalloc version : ${jemalloc_version}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5
+$as_echo "library revision : ${rev}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5
+$as_echo "CONFIG : ${CONFIG}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5
+$as_echo "CC : ${CC}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5
+$as_echo "CFLAGS : ${CFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5
+$as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5
+$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5
+$as_echo "LDFLAGS : ${LDFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5
+$as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5
+$as_echo "LIBS : ${LIBS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5
+$as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5
+$as_echo "XSLTPROC : ${XSLTPROC}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5
+$as_echo "XSLROOT : ${XSLROOT}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5
+$as_echo "PREFIX : ${PREFIX}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5
+$as_echo "BINDIR : ${BINDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5
+$as_echo "DATADIR : ${DATADIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5
+$as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5
+$as_echo "LIBDIR : ${LIBDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5
+$as_echo "MANDIR : ${MANDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5
+$as_echo "srcroot : ${srcroot}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5
+$as_echo "abs_srcroot : ${abs_srcroot}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5
+$as_echo "objroot : ${objroot}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5
+$as_echo "abs_objroot : ${abs_objroot}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5
+$as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5
+$as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5
+$as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5
+$as_echo "install_suffix : ${install_suffix}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf : ${config_malloc_conf}" >&5
+$as_echo "malloc_conf : ${config_malloc_conf}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5
+$as_echo "autogen : ${enable_autogen}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence : ${enable_cc_silence}" >&5
+$as_echo "cc-silence : ${enable_cc_silence}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5
+$as_echo "debug : ${enable_debug}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: code-coverage : ${enable_code_coverage}" >&5
+$as_echo "code-coverage : ${enable_code_coverage}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5
+$as_echo "stats : ${enable_stats}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5
+$as_echo "prof : ${enable_prof}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5
+$as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5
+$as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5
+$as_echo "prof-gcc : ${enable_prof_gcc}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: tcache : ${enable_tcache}" >&5
+$as_echo "tcache : ${enable_tcache}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5
+$as_echo "fill : ${enable_fill}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5
+$as_echo "utrace : ${enable_utrace}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: valgrind : ${enable_valgrind}" >&5
+$as_echo "valgrind : ${enable_valgrind}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5
+$as_echo "xmalloc : ${enable_xmalloc}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: munmap : ${enable_munmap}" >&5
+$as_echo "munmap : ${enable_munmap}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5
+$as_echo "lazy_lock : ${enable_lazy_lock}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: tls : ${enable_tls}" >&5
+$as_echo "tls : ${enable_tls}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5
+$as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
+$as_echo "===============================================================================" >&6; }
diff --git a/memory/jemalloc/src/configure.ac b/memory/jemalloc/src/configure.ac
new file mode 100644
index 000000000..104fd994d
--- /dev/null
+++ b/memory/jemalloc/src/configure.ac
@@ -0,0 +1,1970 @@
+dnl Process this file with autoconf to produce a configure script.
+AC_INIT([Makefile.in])
+
+AC_CONFIG_AUX_DIR([build-aux])
+
+dnl ============================================================================
+dnl Custom macro definitions.
+
+dnl JE_CFLAGS_APPEND(cflag)
+AC_DEFUN([JE_CFLAGS_APPEND],
+[
+AC_MSG_CHECKING([whether compiler supports $1])
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="$1"
+else
+ CFLAGS="${CFLAGS} $1"
+fi
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+]], [[
+ return 0;
+]])],
+ [je_cv_cflags_appended=$1]
+ AC_MSG_RESULT([yes]),
+ [je_cv_cflags_appended=]
+ AC_MSG_RESULT([no])
+ [CFLAGS="${TCFLAGS}"]
+)
+])
+
+dnl JE_COMPILABLE(label, hcode, mcode, rvar)
+dnl
+dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
+dnl cause failure.
+AC_DEFUN([JE_COMPILABLE],
+[
+AC_CACHE_CHECK([whether $1 is compilable],
+ [$4],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2],
+ [$3])],
+ [$4=yes],
+ [$4=no])])
+])
+
+dnl ============================================================================
+
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+AC_SUBST([CONFIG])
+
+dnl Library revision.
+rev=2
+AC_SUBST([rev])
+
+srcroot=$srcdir
+if test "x${srcroot}" = "x." ; then
+ srcroot=""
+else
+ srcroot="${srcroot}/"
+fi
+AC_SUBST([srcroot])
+abs_srcroot="`cd \"${srcdir}\"; pwd`/"
+AC_SUBST([abs_srcroot])
+
+objroot=""
+AC_SUBST([objroot])
+abs_objroot="`pwd`/"
+AC_SUBST([abs_objroot])
+
+dnl Munge install path variables.
+if test "x$prefix" = "xNONE" ; then
+ prefix="/usr/local"
+fi
+if test "x$exec_prefix" = "xNONE" ; then
+ exec_prefix=$prefix
+fi
+PREFIX=$prefix
+AC_SUBST([PREFIX])
+BINDIR=`eval echo $bindir`
+BINDIR=`eval echo $BINDIR`
+AC_SUBST([BINDIR])
+INCLUDEDIR=`eval echo $includedir`
+INCLUDEDIR=`eval echo $INCLUDEDIR`
+AC_SUBST([INCLUDEDIR])
+LIBDIR=`eval echo $libdir`
+LIBDIR=`eval echo $LIBDIR`
+AC_SUBST([LIBDIR])
+DATADIR=`eval echo $datadir`
+DATADIR=`eval echo $DATADIR`
+AC_SUBST([DATADIR])
+MANDIR=`eval echo $mandir`
+MANDIR=`eval echo $MANDIR`
+AC_SUBST([MANDIR])
+
+dnl Support for building documentation.
+AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
+if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
+ DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
+ DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
+else
+ dnl Documentation building will fail if this default gets used.
+ DEFAULT_XSLROOT=""
+fi
+AC_ARG_WITH([xslroot],
+ [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [
+if test "x$with_xslroot" = "xno" ; then
+ XSLROOT="${DEFAULT_XSLROOT}"
+else
+ XSLROOT="${with_xslroot}"
+fi
+],
+ XSLROOT="${DEFAULT_XSLROOT}"
+)
+AC_SUBST([XSLROOT])
+
+dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise,
+dnl just prevent autoconf from molesting CFLAGS.
+CFLAGS=$CFLAGS
+AC_PROG_CC
+
+if test "x$GCC" != "xyes" ; then
+ AC_CACHE_CHECK([whether compiler is MSVC],
+ [je_cv_msvc],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _MSC_VER
+ int fail[-1];
+#endif
+])],
+ [je_cv_msvc=yes],
+ [je_cv_msvc=no])])
+fi
+
+dnl check if a cray prgenv wrapper compiler is being used
+je_cv_cray_prgenv_wrapper=""
+if test "x${PE_ENV}" != "x" ; then
+ case "${CC}" in
+ CC|cc)
+ je_cv_cray_prgenv_wrapper="yes"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+AC_CACHE_CHECK([whether compiler is cray],
+ [je_cv_cray],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _CRAYC
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray=yes],
+ [je_cv_cray=no])])
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ AC_CACHE_CHECK([whether cray compiler version is 8.4],
+ [je_cv_cray_84],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray_84=yes],
+ [je_cv_cray_84=no])])
+fi
+
+if test "x$CFLAGS" = "x" ; then
+ no_CFLAGS="yes"
+ if test "x$GCC" = "xyes" ; then
+dnl JE_CFLAGS_APPEND([-std=gnu99])
+ JE_CFLAGS_APPEND([-std=gnu11])
+ if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
+ else
+ JE_CFLAGS_APPEND([-std=gnu99])
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
+ fi
+ fi
+ JE_CFLAGS_APPEND([-Wall])
+ JE_CFLAGS_APPEND([-Werror=declaration-after-statement])
+ JE_CFLAGS_APPEND([-Wshorten-64-to-32])
+ JE_CFLAGS_APPEND([-Wsign-compare])
+ JE_CFLAGS_APPEND([-pipe])
+ JE_CFLAGS_APPEND([-g3])
+ elif test "x$je_cv_msvc" = "xyes" ; then
+ CC="$CC -nologo"
+ JE_CFLAGS_APPEND([-Zi])
+ JE_CFLAGS_APPEND([-MT])
+ JE_CFLAGS_APPEND([-W3])
+ JE_CFLAGS_APPEND([-FS])
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
+ fi
+ if test "x$je_cv_cray" = "xyes" ; then
+ dnl cray compiler 8.4 has an inlining bug
+ if test "x$je_cv_cray_84" = "xyes" ; then
+ JE_CFLAGS_APPEND([-hipa2])
+ JE_CFLAGS_APPEND([-hnognu])
+ fi
+ if test "x$enable_cc_silence" != "xno" ; then
+ dnl ignore unreachable code warning
+ JE_CFLAGS_APPEND([-hnomessage=128])
+ dnl ignore redefinition of "malloc", "free", etc warning
+ JE_CFLAGS_APPEND([-hnomessage=1357])
+ fi
+ fi
+fi
+AC_SUBST([EXTRA_CFLAGS])
+AC_PROG_CPP
+
+AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
+fi
+
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
+fi
+
+if test "x${je_cv_msvc}" = "xyes" ; then
+ LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+ AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
+else
+ AC_CHECK_SIZEOF([void *])
+ if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+ elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
+ else
+ AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
+ fi
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
+
+AC_CHECK_SIZEOF([int])
+if test "x${ac_cv_sizeof_int}" = "x8" ; then
+ LG_SIZEOF_INT=3
+elif test "x${ac_cv_sizeof_int}" = "x4" ; then
+ LG_SIZEOF_INT=2
+else
+ AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT])
+
+AC_CHECK_SIZEOF([long])
+if test "x${ac_cv_sizeof_long}" = "x8" ; then
+ LG_SIZEOF_LONG=3
+elif test "x${ac_cv_sizeof_long}" = "x4" ; then
+ LG_SIZEOF_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG])
+
+AC_CHECK_SIZEOF([long long])
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+ LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+ LG_SIZEOF_LONG_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG])
+
+AC_CHECK_SIZEOF([intmax_t])
+if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
+ LG_SIZEOF_INTMAX_T=4
+elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
+ LG_SIZEOF_INTMAX_T=3
+elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
+ LG_SIZEOF_INTMAX_T=2
+else
+ AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T])
+
+AC_CANONICAL_HOST
+dnl CPU-specific settings.
+CPU_SPINWAIT=""
+case "${host_cpu}" in
+ i686|x86_64)
+ if test "x${je_cv_msvc}" = "xyes" ; then
+ AC_CACHE_VAL([je_cv_pause_msvc],
+ [JE_COMPILABLE([pause instruction MSVC], [],
+ [[_mm_pause(); return 0;]],
+ [je_cv_pause_msvc])])
+ if test "x${je_cv_pause_msvc}" = "xyes" ; then
+ CPU_SPINWAIT='_mm_pause()'
+ fi
+ else
+ AC_CACHE_VAL([je_cv_pause],
+ [JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])])
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
+ fi
+ fi
+ ;;
+ powerpc)
+ AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
+ ;;
+ *)
+ ;;
+esac
+AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
+
+LD_PRELOAD_VAR="LD_PRELOAD"
+so="so"
+importlib="${so}"
+o="$ac_objext"
+a="a"
+exe="$ac_exeext"
+libprefix="lib"
+link_whole_archive="0"
+DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
+RPATH='-Wl,-rpath,$(1)'
+SOREV="${so}.${rev}"
+PIC_CFLAGS='-fPIC -DPIC'
+CTARGET='-o $@'
+LDTARGET='-o $@'
+TEST_LD_MODE=
+EXTRA_LDFLAGS=
+ARFLAGS='crus'
+AROUT=' $@'
+CC_MM=1
+
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ TEST_LD_MODE='-dynamic'
+fi
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ CC_MM=
+fi
+
+AN_MAKEVAR([AR], [AC_PROG_AR])
+AN_PROGRAM([ar], [AC_PROG_AR])
+AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
+AC_PROG_AR
+
+dnl Platform-specific settings. abi and RPATH can probably be determined
+dnl programmatically, but doing so is error-prone, which makes it generally
+dnl not worth the trouble.
+dnl
+dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
+dnl definitions need to be seen before any headers are included, which is a pain
+dnl to make happen otherwise.
+CFLAGS="$CFLAGS"
+default_munmap="1"
+maps_coalesce="1"
+case "${host}" in
+ *-*-darwin* | *-*-ios*)
+ abi="macho"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ RPATH=""
+ LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
+ so="dylib"
+ importlib="${so}"
+ force_tls="0"
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
+ SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
+ ;;
+ *-*-freebsd*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ force_lazy_lock="1"
+ ;;
+ *-*-dragonfly*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ ;;
+ *-*-openbsd*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ ;;
+ *-*-linux*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+ abi="elf"
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
+ AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
+ default_munmap="0"
+ ;;
+ *-*-netbsd*)
+ AC_MSG_CHECKING([ABI])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[#ifdef __ELF__
+/* ELF */
+#else
+#error aout
+#endif
+]])],
+ [abi="elf"],
+ [abi="aout"])
+ AC_MSG_RESULT([$abi])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ ;;
+ *-*-solaris2*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ RPATH='-Wl,-R,$(1)'
+ dnl Solaris needs this for sigwait().
+ CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
+ LIBS="$LIBS -lposix4 -lsocket -lnsl"
+ ;;
+ *-ibm-aix*)
+ if "$LG_SIZEOF_PTR" = "8"; then
+ dnl 64bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD64"
+ else
+ dnl 32bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD"
+ fi
+ abi="xcoff"
+ ;;
+ *-*-mingw* | *-*-cygwin*)
+ abi="pecoff"
+ force_tls="0"
+ maps_coalesce="0"
+ RPATH=""
+ so="dll"
+ if test "x$je_cv_msvc" = "xyes" ; then
+ importlib="lib"
+ DSO_LDFLAGS="-LD"
+ EXTRA_LDFLAGS="-link -DEBUG"
+ CTARGET='-Fo$@'
+ LDTARGET='-Fe$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
+ CC_MM=
+ else
+ importlib="${so}"
+ DSO_LDFLAGS="-shared"
+ link_whole_archive="1"
+ fi
+ a="lib"
+ libprefix=""
+ SOREV="${so}"
+ PIC_CFLAGS=""
+ ;;
+ *)
+ AC_MSG_RESULT([Unsupported operating system: ${host}])
+ abi="elf"
+ ;;
+esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+AC_CHECK_HEADERS([malloc.h], [
+ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+ [#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+ ],
+ [])],[
+ AC_MSG_RESULT([yes])
+ ],[
+ JEMALLOC_USABLE_SIZE_CONST=
+ AC_MSG_RESULT([no])
+ ])
+])
+AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
+AC_SUBST([abi])
+AC_SUBST([RPATH])
+AC_SUBST([LD_PRELOAD_VAR])
+AC_SUBST([so])
+AC_SUBST([importlib])
+AC_SUBST([o])
+AC_SUBST([a])
+AC_SUBST([exe])
+AC_SUBST([libprefix])
+AC_SUBST([link_whole_archive])
+AC_SUBST([DSO_LDFLAGS])
+AC_SUBST([EXTRA_LDFLAGS])
+AC_SUBST([SOREV])
+AC_SUBST([PIC_CFLAGS])
+AC_SUBST([CTARGET])
+AC_SUBST([LDTARGET])
+AC_SUBST([TEST_LD_MODE])
+AC_SUBST([MKLIB])
+AC_SUBST([ARFLAGS])
+AC_SUBST([AROUT])
+AC_SUBST([CC_MM])
+
+dnl Determine whether libm must be linked to use e.g. log(3).
+AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
+if test "x$ac_cv_search_log" != "xnone required" ; then
+ LM="$ac_cv_search_log"
+else
+ LM=
+fi
+AC_SUBST(LM)
+
+JE_COMPILABLE([__attribute__ syntax],
+ [static __attribute__((unused)) void foo(void){}],
+ [],
+ [je_cv_attribute])
+if test "x${je_cv_attribute}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
+ if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+ JE_CFLAGS_APPEND([-fvisibility=hidden])
+ fi
+fi
+dnl Check for tls_model attribute support (clang 3.0 still lacks support).
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_CFLAGS_APPEND([-herror_on_warning])
+JE_COMPILABLE([tls_model attribute], [],
+ [static __thread int
+ __attribute__((tls_model("initial-exec"), unused)) foo;
+ foo = 0;],
+ [je_cv_tls_model])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_tls_model}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_TLS_MODEL],
+ [__attribute__((tls_model("initial-exec")))])
+else
+ AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
+fi
+dnl Check for alloc_size attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_CFLAGS_APPEND([-herror_on_warning])
+JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
+ [void *foo(size_t size) __attribute__((alloc_size(1)));],
+ [je_cv_alloc_size])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
+fi
+dnl Check for format(gnu_printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_CFLAGS_APPEND([-herror_on_warning])
+JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
+ [je_cv_format_gnu_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
+fi
+dnl Check for format(printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_CFLAGS_APPEND([-herror_on_warning])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
+ [je_cv_format_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
+fi
+
+dnl Support optional additions to rpath.
+AC_ARG_WITH([rpath],
+ [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
+if test "x$with_rpath" = "xno" ; then
+ RPATH_EXTRA=
+else
+ RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
+fi,
+ RPATH_EXTRA=
+)
+AC_SUBST([RPATH_EXTRA])
+
+dnl Disable rules that do automatic regeneration of configure output by default.
+AC_ARG_ENABLE([autogen],
+ [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])],
+if test "x$enable_autogen" = "xno" ; then
+ enable_autogen="0"
+else
+ enable_autogen="1"
+fi
+,
+enable_autogen="0"
+)
+AC_SUBST([enable_autogen])
+
+AC_PROG_INSTALL
+AC_PROG_RANLIB
+AC_PATH_PROG([LD], [ld], [false], [$PATH])
+AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
+
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
+
+dnl Check for allocator-related functions that should be wrapped.
+AC_CHECK_FUNC([memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ])
+ public_syms="${public_syms} memalign"])
+AC_CHECK_FUNC([valloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
+ public_syms="${public_syms} valloc"])
+
+dnl Do not compute test code coverage by default.
+GCOV_FLAGS=
+AC_ARG_ENABLE([code-coverage],
+ [AS_HELP_STRING([--enable-code-coverage],
+ [Enable code coverage])],
+[if test "x$enable_code_coverage" = "xno" ; then
+ enable_code_coverage="0"
+else
+ enable_code_coverage="1"
+fi
+],
+[enable_code_coverage="0"]
+)
+if test "x$enable_code_coverage" = "x1" ; then
+ deoptimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
+ if test "x${deoptimize}" = "xyes" ; then
+ JE_CFLAGS_APPEND([-O0])
+ fi
+ JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage])
+ EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
+ AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ])
+fi
+AC_SUBST([enable_code_coverage])
+
+dnl Perform no name mangling by default.
+AC_ARG_WITH([mangling],
+ [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
+ [mangling_map="$with_mangling"], [mangling_map=""])
+
+dnl Do not prefix public APIs by default.
+AC_ARG_WITH([jemalloc_prefix],
+ [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])],
+ [JEMALLOC_PREFIX="$with_jemalloc_prefix"],
+ [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
+ JEMALLOC_PREFIX=""
+else
+ JEMALLOC_PREFIX="je_"
+fi]
+)
+if test "x$JEMALLOC_PREFIX" != "x" ; then
+ JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
+ AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
+ AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
+fi
+AC_SUBST([JEMALLOC_CPREFIX])
+
+AC_ARG_WITH([export],
+ [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
+ [if test "x$with_export" = "xno"; then
+ AC_DEFINE([JEMALLOC_EXPORT],[])
+fi]
+)
+
+dnl Mangle library-private APIs.
+AC_ARG_WITH([private_namespace],
+ [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
+ [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
+ [JEMALLOC_PRIVATE_NAMESPACE="je_"]
+)
+AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE])
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
+AC_SUBST([private_namespace])
+
+dnl Do not add suffix to installed files by default.
+AC_ARG_WITH([install_suffix],
+ [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])],
+ [INSTALL_SUFFIX="$with_install_suffix"],
+ [INSTALL_SUFFIX=]
+)
+install_suffix="$INSTALL_SUFFIX"
+AC_SUBST([install_suffix])
+
+dnl Specify default malloc_conf.
+AC_ARG_WITH([malloc_conf],
+ [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
+ [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
+ [JEMALLOC_CONFIG_MALLOC_CONF=""]
+)
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"])
+
+dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
+dnl jemalloc_protos_jet.h easy.
+je_="je_"
+AC_SUBST([je_])
+
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
+
+cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
+cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
+
+cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
+
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
+
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
+
+dnl Silence irrelevant compiler warnings by default.
+AC_ARG_ENABLE([cc-silence],
+ [AS_HELP_STRING([--disable-cc-silence],
+ [Do not silence irrelevant compiler warnings])],
+[if test "x$enable_cc_silence" = "xno" ; then
+ enable_cc_silence="0"
+else
+ enable_cc_silence="1"
+fi
+],
+[enable_cc_silence="1"]
+)
+if test "x$enable_cc_silence" = "x1" ; then
+ AC_DEFINE([JEMALLOC_CC_SILENCE], [ ])
+fi
+
+dnl Do not compile with debugging by default.
+AC_ARG_ENABLE([debug],
+ [AS_HELP_STRING([--enable-debug],
+ [Build debugging code (implies --enable-ivsalloc)])],
+[if test "x$enable_debug" = "xno" ; then
+ enable_debug="0"
+else
+ enable_debug="1"
+fi
+],
+[enable_debug="0"]
+)
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ])
+fi
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ])
+ enable_ivsalloc="1"
+fi
+AC_SUBST([enable_debug])
+
+dnl Do not validate pointers by default.
+AC_ARG_ENABLE([ivsalloc],
+ [AS_HELP_STRING([--enable-ivsalloc],
+ [Validate pointers passed through the public API])],
+[if test "x$enable_ivsalloc" = "xno" ; then
+ enable_ivsalloc="0"
+else
+ enable_ivsalloc="1"
+fi
+],
+[enable_ivsalloc="0"]
+)
+if test "x$enable_ivsalloc" = "x1" ; then
+ AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
+fi
+
+dnl Only optimize if not debugging.
+if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
+ dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS.
+ optimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes"
+ if test "x${optimize}" = "xyes" ; then
+ if test "x$GCC" = "xyes" ; then
+ JE_CFLAGS_APPEND([-O3])
+ JE_CFLAGS_APPEND([-funroll-loops])
+ elif test "x$je_cv_msvc" = "xyes" ; then
+ JE_CFLAGS_APPEND([-O2])
+ else
+ JE_CFLAGS_APPEND([-O])
+ fi
+ fi
+fi
+
+dnl Enable statistics calculation by default.
+AC_ARG_ENABLE([stats],
+ [AS_HELP_STRING([--disable-stats],
+ [Disable statistics calculation/reporting])],
+[if test "x$enable_stats" = "xno" ; then
+ enable_stats="0"
+else
+ enable_stats="1"
+fi
+],
+[enable_stats="1"]
+)
+if test "x$enable_stats" = "x1" ; then
+ AC_DEFINE([JEMALLOC_STATS], [ ])
+fi
+AC_SUBST([enable_stats])
+
+dnl Do not enable profiling by default.
+AC_ARG_ENABLE([prof],
+ [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])],
+[if test "x$enable_prof" = "xno" ; then
+ enable_prof="0"
+else
+ enable_prof="1"
+fi
+],
+[enable_prof="0"]
+)
+if test "x$enable_prof" = "x1" ; then
+ backtrace_method=""
+else
+ backtrace_method="N/A"
+fi
+
+AC_ARG_ENABLE([prof-libunwind],
+ [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])],
+[if test "x$enable_prof_libunwind" = "xno" ; then
+ enable_prof_libunwind="0"
+else
+ enable_prof_libunwind="1"
+fi
+],
+[enable_prof_libunwind="0"]
+)
+AC_ARG_WITH([static_libunwind],
+ [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>],
+ [Path to static libunwind library; use rather than dynamically linking])],
+if test "x$with_static_libunwind" = "xno" ; then
+ LUNWIND="-lunwind"
+else
+ if test ! -f "$with_static_libunwind" ; then
+ AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind])
+ fi
+ LUNWIND="$with_static_libunwind"
+fi,
+ LUNWIND="-lunwind"
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
+ AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
+ if test "x$LUNWIND" = "x-lunwind" ; then
+ AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"],
+ [enable_prof_libunwind="0"])
+ else
+ LIBS="$LIBS $LUNWIND"
+ fi
+ if test "x${enable_prof_libunwind}" = "x1" ; then
+ backtrace_method="libunwind"
+ AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ])
+ fi
+fi
+
+AC_ARG_ENABLE([prof-libgcc],
+ [AS_HELP_STRING([--disable-prof-libgcc],
+ [Do not use libgcc for backtracing])],
+[if test "x$enable_prof_libgcc" = "xno" ; then
+ enable_prof_libgcc="0"
+else
+ enable_prof_libgcc="1"
+fi
+],
+[enable_prof_libgcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
+ AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"])
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ backtrace_method="libgcc"
+ AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
+ fi
+else
+ enable_prof_libgcc="0"
+fi
+
+AC_ARG_ENABLE([prof-gcc],
+ [AS_HELP_STRING([--disable-prof-gcc],
+ [Do not use gcc intrinsics for backtracing])],
+[if test "x$enable_prof_gcc" = "xno" ; then
+ enable_prof_gcc="0"
+else
+ enable_prof_gcc="1"
+fi
+],
+[enable_prof_gcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ JE_CFLAGS_APPEND([-fno-omit-frame-pointer])
+ backtrace_method="gcc intrinsics"
+ AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
+else
+ enable_prof_gcc="0"
+fi
+
+if test "x$backtrace_method" = "x" ; then
+ backtrace_method="none (disabling profiling)"
+ enable_prof="0"
+fi
+AC_MSG_CHECKING([configured backtracing method])
+AC_MSG_RESULT([$backtrace_method])
+if test "x$enable_prof" = "x1" ; then
+ dnl Heap profiling uses the log(3) function.
+ if test "x$LM" != "x" ; then
+ LIBS="$LIBS $LM"
+ fi
+
+ AC_DEFINE([JEMALLOC_PROF], [ ])
+fi
+AC_SUBST([enable_prof])
+
+dnl Enable thread-specific caching by default.
+AC_ARG_ENABLE([tcache],
+ [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])],
+[if test "x$enable_tcache" = "xno" ; then
+ enable_tcache="0"
+else
+ enable_tcache="1"
+fi
+],
+[enable_tcache="1"]
+)
+if test "x$enable_tcache" = "x1" ; then
+ AC_DEFINE([JEMALLOC_TCACHE], [ ])
+fi
+AC_SUBST([enable_tcache])
+
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
+fi
+
+dnl Enable VM deallocation via munmap() by default.
+AC_ARG_ENABLE([munmap],
+ [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
+[if test "x$enable_munmap" = "xno" ; then
+ enable_munmap="0"
+else
+ enable_munmap="1"
+fi
+],
+[enable_munmap="${default_munmap}"]
+)
+if test "x$enable_munmap" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MUNMAP], [ ])
+fi
+AC_SUBST([enable_munmap])
+
+dnl Enable allocation from DSS if supported by the OS.
+have_dss="1"
+dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
+AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
+if test "x$have_sbrk" = "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
+ AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
+ have_dss="0"
+ fi
+else
+ have_dss="0"
+fi
+
+if test "x$have_dss" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DSS], [ ])
+fi
+
+dnl Support the junk/zero filling option by default.
+AC_ARG_ENABLE([fill],
+ [AS_HELP_STRING([--disable-fill],
+ [Disable support for junk/zero filling, quarantine, and redzones])],
+[if test "x$enable_fill" = "xno" ; then
+ enable_fill="0"
+else
+ enable_fill="1"
+fi
+],
+[enable_fill="1"]
+)
+if test "x$enable_fill" = "x1" ; then
+ AC_DEFINE([JEMALLOC_FILL], [ ])
+fi
+AC_SUBST([enable_fill])
+
+dnl Disable utrace(2)-based tracing by default.
+AC_ARG_ENABLE([utrace],
+ [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])],
+[if test "x$enable_utrace" = "xno" ; then
+ enable_utrace="0"
+else
+ enable_utrace="1"
+fi
+],
+[enable_utrace="0"]
+)
+JE_COMPILABLE([utrace(2)], [
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ktrace.h>
+], [
+ utrace((void *)0, 0);
+], [je_cv_utrace])
+if test "x${je_cv_utrace}" = "xno" ; then
+ enable_utrace="0"
+fi
+if test "x$enable_utrace" = "x1" ; then
+ AC_DEFINE([JEMALLOC_UTRACE], [ ])
+fi
+AC_SUBST([enable_utrace])
+
+dnl Support Valgrind by default.
+AC_ARG_ENABLE([valgrind],
+ [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])],
+[if test "x$enable_valgrind" = "xno" ; then
+ enable_valgrind="0"
+else
+ enable_valgrind="1"
+fi
+],
+[enable_valgrind="1"]
+)
+if test "x$enable_valgrind" = "x1" ; then
+ JE_COMPILABLE([valgrind], [
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+
+#if !defined(VALGRIND_RESIZEINPLACE_BLOCK)
+# error "Incompatible Valgrind version"
+#endif
+], [], [je_cv_valgrind])
+ if test "x${je_cv_valgrind}" = "xno" ; then
+ enable_valgrind="0"
+ fi
+ if test "x$enable_valgrind" = "x1" ; then
+ AC_DEFINE([JEMALLOC_VALGRIND], [ ])
+ fi
+fi
+AC_SUBST([enable_valgrind])
+
+dnl Do not support the xmalloc option by default.
+AC_ARG_ENABLE([xmalloc],
+ [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
+[if test "x$enable_xmalloc" = "xno" ; then
+ enable_xmalloc="0"
+else
+ enable_xmalloc="1"
+fi
+],
+[enable_xmalloc="0"]
+)
+if test "x$enable_xmalloc" = "x1" ; then
+ AC_DEFINE([JEMALLOC_XMALLOC], [ ])
+fi
+AC_SUBST([enable_xmalloc])
+
+dnl Support cache-oblivious allocation alignment by default.
+AC_ARG_ENABLE([cache-oblivious],
+ [AS_HELP_STRING([--disable-cache-oblivious],
+ [Disable support for cache-oblivious allocation alignment])],
+[if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+],
+[enable_cache_oblivious="1"]
+)
+if test "x$enable_cache_oblivious" = "x1" ; then
+ AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ])
+fi
+AC_SUBST([enable_cache_oblivious])
+
+
+
+JE_COMPILABLE([a program using __builtin_unreachable], [
+void foo (void) {
+ __builtin_unreachable();
+}
+], [
+ {
+ foo();
+ }
+], [je_cv_gcc_builtin_unreachable])
+if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable])
+else
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort])
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found.
+dnl One of those two functions should (theoretically) exist on all platforms
+dnl that jemalloc currently has a chance of functioning on without modification.
+dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
+dnl ffsl() or __builtin_ffsl() are defined, respectively.
+JE_COMPILABLE([a program using __builtin_ffsl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_ffsl])
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
+else
+ JE_COMPILABLE([a program using ffsl], [
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+ ], [
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+ ], [je_cv_function_ffsl])
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
+ else
+ AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
+ fi
+fi
+
+AC_ARG_WITH([lg_tiny_min],
+ [AS_HELP_STRING([--with-lg-tiny-min=<lg-tiny-min>],
+ [Base 2 log of minimum tiny size class to support])],
+ [LG_TINY_MIN="$with_lg_tiny_min"],
+ [LG_TINY_MIN="3"])
+AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN])
+
+AC_ARG_WITH([lg_quantum],
+ [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
+ [Base 2 log of minimum allocation alignment])],
+ [LG_QUANTA="$with_lg_quantum"],
+ [LG_QUANTA="3 4"])
+if test "x$with_lg_quantum" != "x" ; then
+ AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum])
+fi
+
+AC_ARG_WITH([lg_page],
+ [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
+ [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
+if test "x$LG_PAGE" = "xdetect"; then
+ AC_CACHE_CHECK([LG_PAGE],
+ [je_cv_lg_page],
+ AC_RUN_IFELSE([AC_LANG_PROGRAM(
+[[
+#include <strings.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <stdio.h>
+]],
+[[
+ int result;
+ FILE *f;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwPageSize;
+#else
+ result = sysconf(_SC_PAGESIZE);
+#endif
+ if (result == -1) {
+ return 1;
+ }
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
+
+ f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ fprintf(f, "%d", result);
+ fclose(f);
+
+ return 0;
+]])],
+ [je_cv_lg_page=`cat conftest.out`],
+ [je_cv_lg_page=undefined],
+ [je_cv_lg_page=12]))
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+ AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE])
+else
+ AC_MSG_ERROR([cannot determine value for LG_PAGE])
+fi
+
+AC_ARG_WITH([lg_page_sizes],
+ [AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
+ [Base 2 logs of system page sizes to support])],
+ [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
+
+AC_ARG_WITH([lg_size_class_group],
+ [AS_HELP_STRING([--with-lg-size-class-group=<lg-size-class-group>],
+ [Base 2 log of size classes per doubling])],
+ [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"],
+ [LG_SIZE_CLASS_GROUP="2"])
+
+dnl ============================================================================
+dnl jemalloc configuration.
+dnl
+
+AC_ARG_WITH([version],
+ [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
+ [Version string])],
+ [
+ echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
+ if test $? -ne 0 ; then
+ AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>])
+ fi
+ echo "$with_version" > "${objroot}VERSION"
+ ], [
+ dnl Set VERSION if source directory is inside a git repository.
+ if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ dnl Pattern globs aren't powerful enough to match both single- and
+ dnl double-digit version numbers, so iterate over patterns to support up
+ dnl to version 99.99.99 without any accidental matches.
+ for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ done
+ fi
+ rm -f "${objroot}VERSION.tmp"
+ ])
+
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ AC_MSG_RESULT(
+ [Missing VERSION file, and unable to generate it; creating bogus VERSION])
+ echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
+jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
+jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
+jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
+jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'`
+jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'`
+AC_SUBST([jemalloc_version])
+AC_SUBST([jemalloc_version_major])
+AC_SUBST([jemalloc_version_minor])
+AC_SUBST([jemalloc_version_bugfix])
+AC_SUBST([jemalloc_version_nrev])
+AC_SUBST([jemalloc_version_gid])
+
+dnl ============================================================================
+dnl Configure pthreads.
+
+if test "x$abi" != "xpecoff" ; then
+ AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
+ dnl Some systems may embed pthreads functionality in libc; check for libpthread
+ dnl first, but try libc too before failing.
+ AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"],
+ [AC_SEARCH_LIBS([pthread_create], , ,
+ AC_MSG_ERROR([libpthread is missing]))])
+fi
+
+CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+
+dnl Check whether clock_gettime(2) is in libc or librt.
+AC_SEARCH_LIBS([clock_gettime], [rt])
+
+dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
+dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ if test "$ac_cv_search_clock_gettime" != "-lrt"; then
+ SAVED_CFLAGS="${CFLAGS}"
+
+ unset ac_cv_search_clock_gettime
+ JE_CFLAGS_APPEND([-dynamic])
+ AC_SEARCH_LIBS([clock_gettime], [rt])
+
+ CFLAGS="${SAVED_CFLAGS}"
+ fi
+fi
+
+dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+], [je_cv_clock_monotonic_coarse])
+if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE])
+fi
+
+dnl check for CLOCK_MONOTONIC.
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
+#include <unistd.h>
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
+# error _POSIX_MONOTONIC_CLOCK missing/invalid
+#endif
+], [je_cv_clock_monotonic])
+if test "x${je_cv_clock_monotonic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC])
+fi
+
+dnl Check for mach_absolute_time().
+JE_COMPILABLE([mach_absolute_time()], [
+#include <mach/mach_time.h>
+], [
+ mach_absolute_time();
+], [je_cv_mach_absolute_time])
+if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME])
+fi
+
+dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS X
+dnl 10.12's deprecation warning prevents use.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([syscall(2)], [
+#include <sys/syscall.h>
+#include <unistd.h>
+], [
+ syscall(SYS_write, 2, "hello", 5);
+],
+ [je_cv_syscall])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x$je_cv_syscall" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SYSCALL], [ ])
+fi
+
+dnl Check if the GNU-specific secure_getenv function exists.
+AC_CHECK_FUNC([secure_getenv],
+ [have_secure_getenv="1"],
+ [have_secure_getenv="0"]
+ )
+if test "x$have_secure_getenv" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
+fi
+
+dnl Check if the Solaris/BSD issetugid function exists.
+AC_CHECK_FUNC([issetugid],
+ [have_issetugid="1"],
+ [have_issetugid="0"]
+ )
+if test "x$have_issetugid" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ])
+fi
+
+dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use
+dnl it rather than pthreads TSD cleanup functions to support cleanup during
+dnl thread exit, in order to avoid pthreads library recursion during
+dnl bootstrapping.
+AC_CHECK_FUNC([_malloc_thread_cleanup],
+ [have__malloc_thread_cleanup="1"],
+ [have__malloc_thread_cleanup="0"]
+ )
+if test "x$have__malloc_thread_cleanup" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ])
+ force_tls="1"
+fi
+
+dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If
+dnl so, mutex initialization causes allocation, and we need to implement this
+dnl callback function in order to prevent recursive allocation.
+AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
+ [have__pthread_mutex_init_calloc_cb="1"],
+ [have__pthread_mutex_init_calloc_cb="0"]
+ )
+if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
+fi
+
+dnl Disable lazy locking by default.
+AC_ARG_ENABLE([lazy_lock],
+ [AS_HELP_STRING([--enable-lazy-lock],
+ [Enable lazy locking (only lock when multi-threaded)])],
+[if test "x$enable_lazy_lock" = "xno" ; then
+ enable_lazy_lock="0"
+else
+ enable_lazy_lock="1"
+fi
+],
+[enable_lazy_lock=""]
+)
+if test "x${enable_lazy_lock}" = "x" ; then
+ if test "x${force_lazy_lock}" = "x1" ; then
+ AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
+ enable_lazy_lock="1"
+ else
+ enable_lazy_lock="0"
+ fi
+fi
+if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
+ AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
+ enable_lazy_lock="0"
+fi
+if test "x$enable_lazy_lock" = "x1" ; then
+ if test "x$abi" != "xpecoff" ; then
+ AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])])
+ AC_CHECK_FUNC([dlsym], [],
+ [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"],
+ [AC_MSG_ERROR([libdl is missing])])
+ ])
+ fi
+ AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
+fi
+AC_SUBST([enable_lazy_lock])
+
+AC_ARG_ENABLE([tls],
+ [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])],
+if test "x$enable_tls" = "xno" ; then
+ enable_tls="0"
+else
+ enable_tls="1"
+fi
+,
+enable_tls=""
+)
+if test "x${enable_tls}" = "x" ; then
+ if test "x${force_tls}" = "x1" ; then
+ AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
+ enable_tls="1"
+ elif test "x${force_tls}" = "x0" ; then
+ AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
+ enable_tls="0"
+ else
+ enable_tls="1"
+ fi
+fi
+if test "x${enable_tls}" = "x1" ; then
+AC_MSG_CHECKING([for TLS])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+ __thread int x;
+]], [[
+ x = 42;
+
+ return 0;
+]])],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ enable_tls="0")
+else
+ enable_tls="0"
+fi
+AC_SUBST([enable_tls])
+if test "x${enable_tls}" = "x1" ; then
+ if test "x${force_tls}" = "x0" ; then
+ AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
+ fi
+ AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
+elif test "x${force_tls}" = "x1" ; then
+ AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
+fi
+
+dnl ============================================================================
+dnl Check for C11 atomics.
+
+JE_COMPILABLE([C11 atomics], [
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+], [
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return (r == 0);
+], [je_cv_c11atomics])
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_C11ATOMICS])
+fi
+
+dnl ============================================================================
+dnl Check for atomic(9) operations as provided on FreeBSD.
+
+JE_COMPILABLE([atomic(9)], [
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <inttypes.h>
+], [
+ {
+ uint32_t x32 = 0;
+ volatile uint32_t *x32p = &x32;
+ atomic_fetchadd_32(x32p, 1);
+ }
+ {
+ unsigned long xlong = 0;
+ volatile unsigned long *xlongp = &xlong;
+ atomic_fetchadd_long(xlongp, 1);
+ }
+], [je_cv_atomic9])
+if test "x${je_cv_atomic9}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_ATOMIC9])
+fi
+
+dnl ============================================================================
+dnl Check for atomic(3) operations as provided on Darwin.
+
+JE_COMPILABLE([Darwin OSAtomic*()], [
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+], [
+ {
+ int32_t x32 = 0;
+ volatile int32_t *x32p = &x32;
+ OSAtomicAdd32(1, x32p);
+ }
+ {
+ int64_t x64 = 0;
+ volatile int64_t *x64p = &x64;
+ OSAtomicAdd64(1, x64p);
+ }
+], [je_cv_osatomic])
+if test "x${je_cv_osatomic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OSATOMIC], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for madvise(2).
+
+JE_COMPILABLE([madvise(2)], [
+#include <sys/mman.h>
+], [
+ {
+ madvise((void *)0, 0, 0);
+ }
+], [je_cv_madvise])
+if test "x${je_cv_madvise}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
+fi
+
+dnl ============================================================================
+dnl Check whether __sync_{add,sub}_and_fetch() are available despite
+dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined.
+
+AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[
+ AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()],
+ [je_cv_sync_compare_and_swap_$2],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([
+ #include <stdint.h>
+ ],
+ [
+ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2
+ {
+ uint$1_t x$1 = 0;
+ __sync_add_and_fetch(&x$1, 42);
+ __sync_sub_and_fetch(&x$1, 1);
+ }
+ #else
+ #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force
+ #endif
+ ])],
+ [je_cv_sync_compare_and_swap_$2=yes],
+ [je_cv_sync_compare_and_swap_$2=no])])
+
+ if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then
+ AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ])
+ fi
+])
+
+if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then
+ JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4)
+ JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8)
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_clz() and __builtin_clzl().
+
+AC_CACHE_CHECK([for __builtin_clz],
+ [je_cv_builtin_clz],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
+ [
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+ ])],
+ [je_cv_builtin_clz=yes],
+ [je_cv_builtin_clz=no])])
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for os_unfair_lock operations as provided on Darwin.
+
+JE_COMPILABLE([Darwin os_unfair_lock_*()], [
+#include <os/lock.h>
+], [
+ os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
+ os_unfair_lock_lock(&lock);
+ os_unfair_lock_unlock(&lock);
+], [je_cv_os_unfair_lock])
+if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for spinlock(3) operations as provided on Darwin.
+
+JE_COMPILABLE([Darwin OSSpin*()], [
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+], [
+ OSSpinLock lock = 0;
+ OSSpinLockLock(&lock);
+ OSSpinLockUnlock(&lock);
+], [je_cv_osspin])
+if test "x${je_cv_osspin}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OSSPIN], [ ])
+fi
+
+dnl ============================================================================
+dnl Darwin-related configuration.
+
+AC_ARG_ENABLE([zone-allocator],
+ [AS_HELP_STRING([--disable-zone-allocator],
+ [Disable zone allocator for Darwin])],
+[if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+],
+[if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+]
+)
+AC_SUBST([enable_zone_allocator])
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
+ fi
+ AC_DEFINE([JEMALLOC_ZONE], [ ])
+
+ dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
+ dnl releases. malloc_zone_t and malloc_introspection_t have new fields in
+ dnl 10.6, which is the only source-level indication of the change.
+ AC_MSG_CHECKING([malloc zone version])
+ AC_DEFUN([JE_ZONE_PROGRAM],
+ [AC_LANG_PROGRAM(
+ [#include <malloc/malloc.h>],
+ [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
+ )])
+
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=]
+ )])],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[
+ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=]
+ )])])])])
+ if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then
+ AC_MSG_RESULT([unsupported])
+ AC_MSG_ERROR([Unsupported malloc zone version])
+ fi
+ if test "${JEMALLOC_ZONE_VERSION}" = 9; then
+ JEMALLOC_ZONE_VERSION=8
+ AC_MSG_RESULT([> 8])
+ else
+ AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION])
+ fi
+ AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION])
+fi
+
+dnl ============================================================================
+dnl Check for glibc malloc hooks
+
+JE_COMPILABLE([glibc malloc hook], [
+#include <stddef.h>
+
+extern void (* __free_hook)(void *ptr);
+extern void *(* __malloc_hook)(size_t size);
+extern void *(* __realloc_hook)(void *ptr, size_t size);
+], [
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+], [je_cv_glibc_malloc_hook])
+if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
+fi
+
+JE_COMPILABLE([glibc memalign hook], [
+#include <stddef.h>
+
+extern void *(* __memalign_hook)(size_t alignment, size_t size);
+], [
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+], [je_cv_glibc_memalign_hook])
+if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
+fi
+
+JE_COMPILABLE([pthreads adaptive mutexes], [
+#include <pthread.h>
+], [
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+], [je_cv_pthread_mutex_adaptive_np])
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+
+dnl ============================================================================
+dnl Define commands that generate output files.
+
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
+ echo "${n}:${m}" >> "${f}"
+ dnl Remove name from public_syms so that it isn't redefined later.
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
+], [
+ SHELL="${SHELL}"
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ LG_QUANTA="${LG_QUANTA}"
+ LG_TINY_MIN=${LG_TINY_MIN}
+ LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+ LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
+])
+
+dnl Process .in files.
+AC_SUBST([cfghdrs_in])
+AC_SUBST([cfghdrs_out])
+AC_CONFIG_HEADERS([$cfghdrs_tup])
+
+dnl ============================================================================
+dnl Generate outputs.
+
+AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
+AC_SUBST([cfgoutputs_in])
+AC_SUBST([cfgoutputs_out])
+AC_OUTPUT
+
+dnl ============================================================================
+dnl Print out the results of configuration.
+AC_MSG_RESULT([===============================================================================])
+AC_MSG_RESULT([jemalloc version : ${jemalloc_version}])
+AC_MSG_RESULT([library revision : ${rev}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([CONFIG : ${CONFIG}])
+AC_MSG_RESULT([CC : ${CC}])
+AC_MSG_RESULT([CFLAGS : ${CFLAGS}])
+AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
+AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
+AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
+AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
+AC_MSG_RESULT([LIBS : ${LIBS}])
+AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
+AC_MSG_RESULT([XSLROOT : ${XSLROOT}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([PREFIX : ${PREFIX}])
+AC_MSG_RESULT([BINDIR : ${BINDIR}])
+AC_MSG_RESULT([DATADIR : ${DATADIR}])
+AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}])
+AC_MSG_RESULT([LIBDIR : ${LIBDIR}])
+AC_MSG_RESULT([MANDIR : ${MANDIR}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([srcroot : ${srcroot}])
+AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}])
+AC_MSG_RESULT([objroot : ${objroot}])
+AC_MSG_RESULT([abs_objroot : ${abs_objroot}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}])
+AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
+AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
+AC_MSG_RESULT([install_suffix : ${install_suffix}])
+AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
+AC_MSG_RESULT([autogen : ${enable_autogen}])
+AC_MSG_RESULT([cc-silence : ${enable_cc_silence}])
+AC_MSG_RESULT([debug : ${enable_debug}])
+AC_MSG_RESULT([code-coverage : ${enable_code_coverage}])
+AC_MSG_RESULT([stats : ${enable_stats}])
+AC_MSG_RESULT([prof : ${enable_prof}])
+AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
+AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
+AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
+AC_MSG_RESULT([tcache : ${enable_tcache}])
+AC_MSG_RESULT([fill : ${enable_fill}])
+AC_MSG_RESULT([utrace : ${enable_utrace}])
+AC_MSG_RESULT([valgrind : ${enable_valgrind}])
+AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
+AC_MSG_RESULT([munmap : ${enable_munmap}])
+AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
+AC_MSG_RESULT([tls : ${enable_tls}])
+AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
+AC_MSG_RESULT([===============================================================================])
diff --git a/memory/jemalloc/src/coverage.sh b/memory/jemalloc/src/coverage.sh
new file mode 100755
index 000000000..6d1362a8c
--- /dev/null
+++ b/memory/jemalloc/src/coverage.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set -e
+
+objdir=$1
+suffix=$2
+shift 2
+objs=$@
+
+gcov -b -p -f -o "${objdir}" ${objs}
+
+# Move gcov outputs so that subsequent gcov invocations won't clobber results
+# for the same sources with different compilation flags.
+for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
+ mv "${f}" "${f}.${suffix}"
+done
diff --git a/memory/jemalloc/src/doc/html.xsl.in b/memory/jemalloc/src/doc/html.xsl.in
new file mode 100644
index 000000000..ec4fa6552
--- /dev/null
+++ b/memory/jemalloc/src/doc/html.xsl.in
@@ -0,0 +1,5 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:import href="@XSLROOT@/html/docbook.xsl"/>
+ <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+ <xsl:output method="xml" encoding="utf-8"/>
+</xsl:stylesheet>
diff --git a/memory/jemalloc/src/doc/jemalloc.xml.in b/memory/jemalloc/src/doc/jemalloc.xml.in
new file mode 100644
index 000000000..3d2e721d3
--- /dev/null
+++ b/memory/jemalloc/src/doc/jemalloc.xml.in
@@ -0,0 +1,2966 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<?xml-stylesheet type="text/xsl"
+ href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
+ "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
+]>
+
+<refentry>
+ <refentryinfo>
+ <title>User Manual</title>
+ <productname>jemalloc</productname>
+ <releaseinfo role="version">@jemalloc_version@</releaseinfo>
+ <authorgroup>
+ <author>
+ <firstname>Jason</firstname>
+ <surname>Evans</surname>
+ <personblurb>Author</personblurb>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+ <refmeta>
+ <refentrytitle>JEMALLOC</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+ <refnamediv>
+ <refdescriptor>jemalloc</refdescriptor>
+ <refname>jemalloc</refname>
+ <!-- Each refname causes a man page file to be created. Only if this were
+ the system malloc(3) implementation would these files be appropriate.
+ <refname>malloc</refname>
+ <refname>calloc</refname>
+ <refname>posix_memalign</refname>
+ <refname>aligned_alloc</refname>
+ <refname>realloc</refname>
+ <refname>free</refname>
+ <refname>mallocx</refname>
+ <refname>rallocx</refname>
+ <refname>xallocx</refname>
+ <refname>sallocx</refname>
+ <refname>dallocx</refname>
+ <refname>sdallocx</refname>
+ <refname>nallocx</refname>
+ <refname>mallctl</refname>
+ <refname>mallctlnametomib</refname>
+ <refname>mallctlbymib</refname>
+ <refname>malloc_stats_print</refname>
+ <refname>malloc_usable_size</refname>
+ -->
+ <refpurpose>general purpose memory allocation functions</refpurpose>
+ </refnamediv>
+ <refsect1 id="library">
+ <title>LIBRARY</title>
+ <para>This manual describes jemalloc @jemalloc_version@. More information
+ can be found at the <ulink
+ url="http://jemalloc.net/">jemalloc website</ulink>.</para>
+ </refsect1>
+ <refsynopsisdiv>
+ <title>SYNOPSIS</title>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
+ <refsect2>
+ <title>Standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>malloc</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>calloc</function></funcdef>
+ <paramdef>size_t <parameter>number</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>posix_memalign</function></funcdef>
+ <paramdef>void **<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>aligned_alloc</function></funcdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>realloc</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>free</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>mallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>rallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>xallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>extra</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>sallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>dallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>sdallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>nallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctl</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlnametomib</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>size_t *<parameter>mibp</parameter></paramdef>
+ <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlbymib</function></funcdef>
+ <paramdef>const size_t *<parameter>mib</parameter></paramdef>
+ <paramdef>size_t <parameter>miblen</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>malloc_stats_print</function></funcdef>
+ <paramdef>void <parameter>(*write_cb)</parameter>
+ <funcparams>void *, const char *</funcparams>
+ </paramdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>opts</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+ <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>(*malloc_message)</function></funcdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>s</parameter></paramdef>
+ </funcprototype>
+ <para><type>const char *</type><varname>malloc_conf</varname>;</para>
+ </refsect2>
+ </funcsynopsis>
+ </refsynopsisdiv>
+ <refsect1 id="description">
+ <title>DESCRIPTION</title>
+ <refsect2>
+ <title>Standard API</title>
+
+ <para>The <function>malloc()</function> function allocates
+ <parameter>size</parameter> bytes of uninitialized memory. The allocated
+ space is suitably aligned (after possible pointer coercion) for storage
+ of any type of object.</para>
+
+ <para>The <function>calloc()</function> function allocates
+ space for <parameter>number</parameter> objects, each
+ <parameter>size</parameter> bytes in length. The result is identical to
+ calling <function>malloc()</function> with an argument of
+ <parameter>number</parameter> * <parameter>size</parameter>, with the
+ exception that the allocated memory is explicitly initialized to zero
+ bytes.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>, and returns the allocation in the value
+ pointed to by <parameter>ptr</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.</para>
+
+ <para>The <function>aligned_alloc()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2. Behavior is
+ undefined if <parameter>size</parameter> is not an integral multiple of
+ <parameter>alignment</parameter>.</para>
+
+ <para>The <function>realloc()</function> function changes the
+ size of the previously allocated memory referenced by
+ <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The
+ contents of the memory are unchanged up to the lesser of the new and old
+ sizes. If the new size is larger, the contents of the newly allocated
+ portion of the memory are undefined. Upon success, the memory referenced
+ by <parameter>ptr</parameter> is freed and a pointer to the newly
+ allocated memory is returned. Note that
+ <function>realloc()</function> may move the memory allocation,
+ resulting in a different return value than <parameter>ptr</parameter>.
+ If <parameter>ptr</parameter> is <constant>NULL</constant>, the
+ <function>realloc()</function> function behaves identically to
+ <function>malloc()</function> for the specified size.</para>
+
+ <para>The <function>free()</function> function causes the
+ allocated memory referenced by <parameter>ptr</parameter> to be made
+ available for future allocations. If <parameter>ptr</parameter> is
+ <constant>NULL</constant>, no action occurs.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function>,
+ <function>rallocx()</function>,
+ <function>xallocx()</function>,
+ <function>sallocx()</function>,
+ <function>dallocx()</function>,
+ <function>sdallocx()</function>, and
+ <function>nallocx()</function> functions all have a
+ <parameter>flags</parameter> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code language="C">|</code>) operations to
+ specify one or more of the following:
+ <variablelist>
+ <varlistentry id="MALLOCX_LG_ALIGN">
+ <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <code language="C">(1 &lt;&lt;
+ <parameter>la</parameter>)</code>. This macro does not validate
+ that <parameter>la</parameter> is within the valid
+ range.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ALIGN">
+ <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <parameter>a</parameter>, where
+ <parameter>a</parameter> is a power of two. This macro does not
+ validate that <parameter>a</parameter> is a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ZERO">
+ <term><constant>MALLOCX_ZERO</constant></term>
+
+ <listitem><para>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_TCACHE">
+ <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the thread-specific cache (tcache) specified by
+ the identifier <parameter>tc</parameter>, which must have been
+ acquired via the <link
+ linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
+ mallctl. This macro does not validate that
+ <parameter>tc</parameter> specifies a valid
+ identifier.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOC_TCACHE_NONE">
+ <term><constant>MALLOCX_TCACHE_NONE</constant></term>
+
+ <listitem><para>Do not use a thread-specific cache (tcache). Unless
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
+ <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
+ automatically managed tcache will be used under many circumstances.
+ This macro cannot be used in the same <parameter>flags</parameter>
+ argument as
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ARENA">
+ <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the arena specified by the index
+ <parameter>a</parameter>. This macro has no effect for regions that
+ were allocated via an arena other than the one specified. This
+ macro does not validate that <parameter>a</parameter> specifies an
+ arena index in the valid range.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>mallocx()</function> function allocates at
+ least <parameter>size</parameter> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>rallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> to be at least
+ <parameter>size</parameter> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>xallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> in place to be at least
+ <parameter>size</parameter> bytes, and returns the real size of the
+ allocation. If <parameter>extra</parameter> is non-zero, an attempt is
+ made to resize the allocation to be at least <code
+ language="C">(<parameter>size</parameter> +
+ <parameter>extra</parameter>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if <code
+ language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
+ &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation at <parameter>ptr</parameter>.</para>
+
+ <para>The <function>dallocx()</function> function causes the
+ memory referenced by <parameter>ptr</parameter> to be made available for
+ future allocations.</para>
+
+ <para>The <function>sdallocx()</function> function is an
+ extension of <function>dallocx()</function> with a
+ <parameter>size</parameter> parameter to allow the caller to pass in the
+ allocation size as an optimization. The minimum valid input size is the
+ original requested size of the allocation, and the maximum valid input
+ size is the corresponding value returned by
+ <function>nallocx()</function> or
+ <function>sallocx()</function>.</para>
+
+ <para>The <function>nallocx()</function> function allocates no
+ memory, but it performs the same size computation as the
+ <function>mallocx()</function> function, and returns the real
+ size of the allocation that would result from the equivalent
+ <function>mallocx()</function> function call, or
+ <constant>0</constant> if the inputs exceed the maximum supported size
+ class and/or alignment. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>mallctl()</function> function provides a
+ general interface for introspecting the memory allocator, as well as
+ setting modifiable parameters and triggering actions. The
+ period-separated <parameter>name</parameter> argument specifies a
+ location in a tree-structured namespace; see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
+ documentation on the tree contents. To read a value, pass a pointer via
+ <parameter>oldp</parameter> to adequate space to contain the value, and a
+ pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
+ <constant>NULL</constant> and <constant>NULL</constant>. Similarly, to
+ write a value, pass a pointer to the value via
+ <parameter>newp</parameter>, and its length via
+ <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
+ and <constant>0</constant>.</para>
+
+ <para>The <function>mallctlnametomib()</function> function
+ provides a way to avoid repeated name lookups for applications that
+ repeatedly query the same portion of the namespace, by translating a name
+ to a <quote>Management Information Base</quote> (MIB) that can be passed
+ repeatedly to <function>mallctlbymib()</function>. Upon
+ successful return from <function>mallctlnametomib()</function>,
+ <parameter>mibp</parameter> contains an array of
+ <parameter>*miblenp</parameter> integers, where
+ <parameter>*miblenp</parameter> is the lesser of the number of components
+ in <parameter>name</parameter> and the input value of
+ <parameter>*miblenp</parameter>. Thus it is possible to pass a
+ <parameter>*miblenp</parameter> that is smaller than the number of
+ period-separated name components, which results in a partial MIB that can
+ be used as the basis for constructing a complete MIB. For name
+ components that are integers (e.g. the 2 in
+ <link
+ linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
+ the corresponding MIB component will always be that integer. Therefore,
+ it is legitimate to construct code like the following: <programlisting
+ language="C"><![CDATA[
+unsigned nbins, i;
+size_t mib[4];
+size_t len, miblen;
+
+len = sizeof(nbins);
+mallctl("arenas.nbins", &nbins, &len, NULL, 0);
+
+miblen = 4;
+mallctlnametomib("arenas.bin.0.size", mib, &miblen);
+for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
+ /* Do something with bin_size... */
+}]]></programlisting></para>
+
+ <para>The <function>malloc_stats_print()</function> function writes
+ summary statistics via the <parameter>write_cb</parameter> callback
+ function pointer and <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or <function>malloc_message()</function>
+ if <parameter>write_cb</parameter> is <constant>NULL</constant>. The
+ statistics are presented in human-readable form unless <quote>J</quote> is
+ specified as a character within the <parameter>opts</parameter> string, in
+ which case the statistics are presented in <ulink
+ url="http://www.json.org/">JSON format</ulink>. This function can be
+ called repeatedly. General information that never changes during
+ execution can be omitted by specifying <quote>g</quote> as a character
+ within the <parameter>opts</parameter> string. Note that
+ <function>malloc_message()</function> uses the
+ <function>mallctl*()</function> functions internally, so inconsistent
+ statistics can be reported if multiple threads use these functions
+ simultaneously. If <option>--enable-stats</option> is specified during
+ configuration, <quote>m</quote> and <quote>a</quote> can be specified to
+ omit merged arena and per arena statistics, respectively;
+ <quote>b</quote>, <quote>l</quote>, and <quote>h</quote> can be specified
+ to omit per size class statistics for bins, large objects, and huge
+ objects, respectively. Unrecognized characters are silently ignored.
+ Note that thread caching may prevent some statistics from being completely
+ up to date, since extra locking would be required to merge counters that
+ track thread cache operations.</para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. The return value may be larger than the size
+ that was requested during allocation. The
+ <function>malloc_usable_size()</function> function is not a
+ mechanism for in-place <function>realloc()</function>; rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <function>malloc_usable_size()</function> should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="tuning">
+ <title>TUNING</title>
+ <para>Once, when the first call is made to one of the memory allocation
+ routines, the allocator initializes its internals based in part on various
+ options that can be specified at compile- or run-time.</para>
+
+ <para>The string specified via <option>--with-malloc-conf</option>, the
+ string pointed to by the global variable <varname>malloc_conf</varname>, the
+ <quote>name</quote> of the file referenced by the symbolic link named
+ <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
+ environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
+ that order, from left to right as options. Note that
+ <varname>malloc_conf</varname> may be read before
+ <function>main()</function> is entered, so the declaration of
+ <varname>malloc_conf</varname> should specify an initializer that contains
+ the final value to be read by jemalloc. <option>--with-malloc-conf</option>
+ and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
+ <filename class="symlink">/etc/malloc.conf</filename> and
+ <envar>MALLOC_CONF</envar> can be safely set any time prior to program
+ invocation.</para>
+
+ <para>An options string is a comma-separated list of option:value pairs.
+ There is one key corresponding to each <link
+ linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
+ documentation). For example, <literal>abort:true,narenas:1</literal> sets
+ the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
+ linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options. Some
+ options have boolean values (true/false), others have integer values (base
+ 8, 10, or 16, depending on prefix), and yet others have raw string
+ values.</para>
+ </refsect1>
+ <refsect1 id="implementation_notes">
+ <title>IMPLEMENTATION NOTES</title>
+ <para>Traditionally, allocators have used
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
+ suboptimal for several reasons, including race conditions, increased
+ fragmentation, and artificial limitations on maximum usable memory. If
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system, this allocator uses both
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
+ otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is used.</para>
+
+ <para>This allocator uses multiple arenas in order to reduce lock
+ contention for threaded programs on multi-processor systems. This works
+ well with regard to threading scalability, but incurs some costs. There is
+ a small fixed per-arena overhead, and additionally, arenas manage memory
+ completely independently of each other, which means a small fixed increase
+ in overall memory fragmentation. These overheads are not generally an
+ issue, given the number of arenas normally used. Note that using
+ substantially more arenas than the default is not likely to improve
+ performance, mainly due to reduced cache performance. However, it may make
+ sense to reduce the number of arenas if an application does not make much
+ use of the allocation functions.</para>
+
+ <para>In addition to multiple arenas, unless
+ <option>--disable-tcache</option> is specified during configuration, this
+ allocator supports thread-specific caching for small and large objects, in
+ order to make it possible to completely avoid synchronization for most
+ allocation requests. Such caching allows very fast allocation in the
+ common case, but it increases memory usage and fragmentation, since a
+ bounded number of objects can remain allocated in each thread cache.</para>
+
+ <para>Memory is conceptually broken into equal-sized chunks, where the chunk
+ size is a power of two that is greater than the page size. Chunks are
+ always aligned to multiples of the chunk size. This alignment makes it
+ possible to find metadata for user objects very quickly. User objects are
+ broken into three categories according to size: small, large, and huge.
+ Multiple small and large objects can reside within a single chunk, whereas
+ huge objects each have one or more chunks backing them. Each chunk that
+ contains small and/or large objects tracks its contents as runs of
+ contiguous pages (unused, backing a set of small objects, or backing one
+ large object). The combination of chunk alignment and chunk page maps makes
+ it possible to determine all metadata regarding small and large allocations
+ in constant time.</para>
+
+ <para>Small objects are managed in groups by page runs. Each run maintains
+ a bitmap to track which regions are in use. Allocation requests that are no
+ more than half the quantum (8 or 16, depending on architecture) are rounded
+ up to the nearest power of two that is at least <code
+ language="C">sizeof(<type>double</type>)</code>. All other object size
+ classes are multiples of the quantum, spaced such that there are four size
+ classes for each doubling in size, which limits internal fragmentation to
+ approximately 20% for all but the smallest size classes. Small size classes
+ are smaller than four times the page size, large size classes are smaller
+ than the chunk size (see the <link
+ linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and
+ huge size classes extend from the chunk size up to the largest size class
+ that does not exceed <constant>PTRDIFF_MAX</constant>.</para>
+
+ <para>Allocations are packed tightly together, which can be an issue for
+ multi-threaded applications. If you need to assure that allocations do not
+ suffer from cacheline sharing, round your allocation requests up to the
+ nearest multiple of the cacheline size, or specify cacheline alignment when
+ allocating.</para>
+
+ <para>The <function>realloc()</function>,
+ <function>rallocx()</function>, and
+ <function>xallocx()</function> functions may resize allocations
+ without moving them under limited circumstances. Unlike the
+ <function>*allocx()</function> API, the standard API does not
+ officially round up the usable size of an allocation to the nearest size
+ class, so technically it is necessary to call
+ <function>realloc()</function> to grow e.g. a 9-byte allocation to
+ 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
+ trivially succeeds in place as long as the pre-size and post-size both round
+ up to the same size class. No other API guarantees are made regarding
+ in-place resizing, but the current implementation also tries to resize large
+ and huge allocations in place, as long as the pre-size and post-size are
+ both large or both huge. In such cases shrinkage always succeeds for large
+ size classes, but for huge size classes the chunk allocator must support
+ splitting (see <link
+ linkend="arena.i.chunk_hooks"><mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl></link>).
+ Growth only succeeds if the trailing memory is currently available, and
+ additionally for huge size classes the chunk allocator must support
+ merging.</para>
+
+ <para>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
+ 64-bit system, the size classes in each category are as shown in <xref
+ linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
+
+ <table xml:id="size_classes" frame="all">
+ <title>Size classes</title>
+ <tgroup cols="3" colsep="1" rowsep="1">
+ <colspec colname="c1" align="left"/>
+ <colspec colname="c2" align="right"/>
+ <colspec colname="c3" align="left"/>
+ <thead>
+ <row>
+ <entry>Category</entry>
+ <entry>Spacing</entry>
+ <entry>Size</entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry morerows="8">Small</entry>
+ <entry>lg</entry>
+ <entry>[8]</entry>
+ </row>
+ <row>
+ <entry>16</entry>
+ <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
+ </row>
+ <row>
+ <entry>32</entry>
+ <entry>[160, 192, 224, 256]</entry>
+ </row>
+ <row>
+ <entry>64</entry>
+ <entry>[320, 384, 448, 512]</entry>
+ </row>
+ <row>
+ <entry>128</entry>
+ <entry>[640, 768, 896, 1024]</entry>
+ </row>
+ <row>
+ <entry>256</entry>
+ <entry>[1280, 1536, 1792, 2048]</entry>
+ </row>
+ <row>
+ <entry>512</entry>
+ <entry>[2560, 3072, 3584, 4096]</entry>
+ </row>
+ <row>
+ <entry>1 KiB</entry>
+ <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
+ </row>
+ <row>
+ <entry>2 KiB</entry>
+ <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="7">Large</entry>
+ <entry>2 KiB</entry>
+ <entry>[16 KiB]</entry>
+ </row>
+ <row>
+ <entry>4 KiB</entry>
+ <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
+ </row>
+ <row>
+ <entry>8 KiB</entry>
+ <entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
+ </row>
+ <row>
+ <entry>16 KiB</entry>
+ <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
+ </row>
+ <row>
+ <entry>32 KiB</entry>
+ <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
+ </row>
+ <row>
+ <entry>64 KiB</entry>
+ <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
+ </row>
+ <row>
+ <entry>128 KiB</entry>
+ <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
+ </row>
+ <row>
+ <entry>256 KiB</entry>
+ <entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="8">Huge</entry>
+ <entry>256 KiB</entry>
+ <entry>[2 MiB]</entry>
+ </row>
+ <row>
+ <entry>512 KiB</entry>
+ <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
+ </row>
+ <row>
+ <entry>1 MiB</entry>
+ <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
+ </row>
+ <row>
+ <entry>2 MiB</entry>
+ <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
+ </row>
+ <row>
+ <entry>4 MiB</entry>
+ <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
+ </row>
+ <row>
+ <entry>8 MiB</entry>
+ <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
+ </row>
+ <row>
+ <entry>...</entry>
+ <entry>...</entry>
+ </row>
+ <row>
+ <entry>512 PiB</entry>
+ <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
+ </row>
+ <row>
+ <entry>1 EiB</entry>
+ <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+ <refsect1 id="mallctl_namespace">
+ <title>MALLCTL NAMESPACE</title>
+ <para>The following names are defined in the namespace accessible via the
+ <function>mallctl*()</function> functions. Value types are
+ specified in parentheses, their readable/writable statuses are encoded as
+ <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
+ <literal>--</literal>, and required build configuration flags follow, if
+ any. A name element encoded as <literal>&lt;i&gt;</literal> or
+ <literal>&lt;j&gt;</literal> indicates an integer component, where the
+ integer varies from 0 to some upper value that must be determined via
+ introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>,
+ <literal>&lt;i&gt;</literal> equal to <link
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> can be
+ used to access the summation of statistics from all arenas. Take special
+ note of the <link linkend="epoch"><mallctl>epoch</mallctl></link> mallctl,
+ which controls refreshing of cached dynamic statistics.</para>
+
+ <variablelist>
+ <varlistentry id="version">
+ <term>
+ <mallctl>version</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Return the jemalloc version string.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="epoch">
+ <term>
+ <mallctl>epoch</mallctl>
+ (<type>uint64_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>If a value is passed in, refresh the data from which
+ the <function>mallctl*()</function> functions report values,
+ and increment the epoch. Return the current epoch. This is useful for
+ detecting whether another thread caused a refresh.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.cache_oblivious">
+ <term>
+ <mallctl>config.cache_oblivious</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-cache-oblivious</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.debug">
+ <term>
+ <mallctl>config.debug</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-debug</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.fill">
+ <term>
+ <mallctl>config.fill</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-fill</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.lazy_lock">
+ <term>
+ <mallctl>config.lazy_lock</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-lazy-lock</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.malloc_conf">
+ <term>
+ <mallctl>config.malloc_conf</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Embedded configure-time-specified run-time options
+ string, empty unless <option>--with-malloc-conf</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.munmap">
+ <term>
+ <mallctl>config.munmap</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-munmap</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof">
+ <term>
+ <mallctl>config.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libgcc">
+ <term>
+ <mallctl>config.prof_libgcc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--disable-prof-libgcc</option> was not
+ specified during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libunwind">
+ <term>
+ <mallctl>config.prof_libunwind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof-libunwind</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.stats">
+ <term>
+ <mallctl>config.stats</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-stats</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.tcache">
+ <term>
+ <mallctl>config.tcache</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--disable-tcache</option> was not specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.tls">
+ <term>
+ <mallctl>config.tls</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--disable-tls</option> was not specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.utrace">
+ <term>
+ <mallctl>config.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-utrace</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.valgrind">
+ <term>
+ <mallctl>config.valgrind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-valgrind</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.xmalloc">
+ <term>
+ <mallctl>config.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-xmalloc</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.abort">
+ <term>
+ <mallctl>opt.abort</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Abort-on-warning enabled/disabled. If true, most
+ warnings are fatal. The process will call
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> in these cases. This option is
+ disabled by default unless <option>--enable-debug</option> is
+ specified during configuration, in which case it is enabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.dss">
+ <term>
+ <mallctl>opt.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. The following
+ settings are supported if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system: <quote>disabled</quote>, <quote>primary</quote>, and
+ <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
+ supported. The default is <quote>secondary</quote> if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system; <quote>disabled</quote> otherwise.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_chunk">
+ <term>
+ <mallctl>opt.lg_chunk</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Virtual memory chunk size (log base 2). If a chunk
+ size outside the supported size range is specified, the size is
+ silently clipped to the minimum/maximum supported size. The default
+ chunk size is 2 MiB (2^21).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.narenas">
+ <term>
+ <mallctl>opt.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum number of arenas to use for automatic
+ multiplexing of threads and arenas. The default is four times the
+ number of CPUs, or one if there is a single CPU.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.purge">
+ <term>
+ <mallctl>opt.purge</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Purge mode is &ldquo;ratio&rdquo; (default) or
+ &ldquo;decay&rdquo;. See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for details of the ratio mode. See <link
+ linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+ details of the decay mode.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_dirty_mult">
+ <term>
+ <mallctl>opt.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Per-arena minimum ratio (log base 2) of active to dirty
+ pages. Some dirty unused pages may be allowed to accumulate, within
+ the limit set by the ratio (or one chunk worth of dirty pages,
+ whichever is greater), before informing the kernel about some of those
+ pages via <citerefentry><refentrytitle>madvise</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> or a similar system call. This
+ provides the kernel with sufficient information to recycle dirty pages
+ if physical memory becomes scarce and the pages remain unused. The
+ default minimum ratio is 8:1 (2^3:1); an option value of -1 will
+ disable dirty page purging. See <link
+ linkend="arenas.lg_dirty_mult"><mallctl>arenas.lg_dirty_mult</mallctl></link>
+ and <link
+ linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+ for related dynamic control options.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.decay_time">
+ <term>
+ <mallctl>opt.decay_time</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in seconds from the creation of a set
+ of unused dirty pages until an equivalent set of unused dirty pages is
+ purged and/or reused. The pages are incrementally purged according to a
+ sigmoidal decay curve that starts and ends with zero purge rate. A
+ decay time of 0 causes all unused dirty pages to be purged immediately
+ upon creation. A decay time of -1 disables purging. The default decay
+ time is 10 seconds. See <link
+ linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
+ and <link
+ linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+ for related dynamic control options.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_print">
+ <term>
+ <mallctl>opt.stats_print</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Enable/disable statistics printing at exit. If
+ enabled, the <function>malloc_stats_print()</function>
+ function is called at program exit via an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function. If
+ <option>--enable-stats</option> is specified during configuration, this
+ has the potential to cause deadlock for a multi-threaded process that
+ exits while one or more threads are executing in the memory allocation
+ functions. Furthermore, <function>atexit()</function> may
+ allocate memory during application initialization and then deadlock
+ internally when jemalloc in turn calls
+ <function>atexit()</function>, so this option is not
+ universally usable (though the application can register its own
+ <function>atexit()</function> function with equivalent
+ functionality). Therefore, this option should only be used with care;
+ it is primarily intended as a performance tuning aid during application
+ development. This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.junk">
+ <term>
+ <mallctl>opt.junk</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte
+ of uninitialized allocated memory will be initialized to
+ <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated
+ memory will be initialized to <literal>0x5a</literal>. If set to
+ <quote>true</quote>, both allocated and deallocated memory will be
+ initialized, and if set to <quote>false</quote>, junk filling be
+ disabled entirely. This is intended for debugging and will impact
+ performance negatively. This option is <quote>false</quote> by default
+ unless <option>--enable-debug</option> is specified during
+ configuration, in which case it is <quote>true</quote> by default unless
+ running inside <ulink
+ url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.quarantine">
+ <term>
+ <mallctl>opt.quarantine</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Per thread quarantine size in bytes. If non-zero, each
+ thread maintains a FIFO object quarantine that stores up to the
+ specified number of bytes of memory. The quarantined memory is not
+ freed until it is released from quarantine, though it is immediately
+ junk-filled if the <link
+ linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
+ enabled. This feature is of particular use in combination with <ulink
+ url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts
+ to access quarantined objects. This is intended for debugging and will
+ impact performance negatively. The default quarantine size is 0 unless
+ running inside Valgrind, in which case the default is 16
+ MiB.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.redzone">
+ <term>
+ <mallctl>opt.redzone</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Redzones enabled/disabled. If enabled, small
+ allocations have redzones before and after them. Furthermore, if the
+ <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
+ enabled, the redzones are checked for corruption during deallocation.
+ However, the primary intended purpose of this feature is to be used in
+ combination with <ulink url="http://valgrind.org/">Valgrind</ulink>,
+ which needs redzones in order to do effective buffer overflow/underflow
+ detection. This option is intended for debugging and will impact
+ performance negatively. This option is disabled by
+ default unless running inside Valgrind.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.zero">
+ <term>
+ <mallctl>opt.zero</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Zero filling enabled/disabled. If enabled, each byte
+ of uninitialized allocated memory will be initialized to 0. Note that
+ this initialization only happens once for each byte, so
+ <function>realloc()</function> and
+ <function>rallocx()</function> calls do not zero memory that
+ was previously allocated. This is intended for debugging and will
+ impact performance negatively. This option is disabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.utrace">
+ <term>
+ <mallctl>opt.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-utrace</option>]
+ </term>
+ <listitem><para>Allocation tracing based on
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> enabled/disabled. This option
+ is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.xmalloc">
+ <term>
+ <mallctl>opt.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-xmalloc</option>]
+ </term>
+ <listitem><para>Abort-on-out-of-memory enabled/disabled. If enabled,
+ rather than returning failure for any allocation function, display a
+ diagnostic message on <constant>STDERR_FILENO</constant> and cause the
+ program to drop core (using
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>). If an application is
+ designed to depend on this behavior, set the option at compile time by
+ including the following in the source code:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "xmalloc:true";]]></programlisting>
+ This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.tcache">
+ <term>
+ <mallctl>opt.tcache</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Thread-specific caching (tcache) enabled/disabled. When
+ there are multiple threads, each thread uses a tcache for objects up to
+ a certain size. Thread-specific caching allows many allocations to be
+ satisfied without performing any thread synchronization, at the cost of
+ increased memory use. See the <link
+ linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
+ option for related tuning information. This option is enabled by
+ default unless running inside <ulink
+ url="http://valgrind.org/">Valgrind</ulink>, in which case it is
+ forcefully disabled.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_tcache_max">
+ <term>
+ <mallctl>opt.lg_tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Maximum size class (log base 2) to cache in the
+ thread-specific cache (tcache). At a minimum, all small size classes
+ are cached, and at a maximum all large size classes are cached. The
+ default maximum is 32 KiB (2^15).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof">
+ <term>
+ <mallctl>opt.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Memory profiling enabled/disabled. If enabled, profile
+ memory allocation activity. See the <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for on-the-fly activation/deactivation. See the <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ option for probabilistic sampling control. See the <link
+ linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
+ option for control of cumulative sample reporting. See the <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for information on interval-triggered profile dumping, the <link
+ linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
+ option for information on high-water-triggered profile dumping, and the
+ <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
+ option for final profile dumping. Profile output is compatible with
+ the <command>jeprof</command> command, which is based on the
+ <command>pprof</command> that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>. See <link linkend="heap_profile_format">HEAP PROFILE
+ FORMAT</link> for heap profile format documentation.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_prefix">
+ <term>
+ <mallctl>opt.prof_prefix</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Filename prefix for profile dumps. If the prefix is
+ set to the empty string, no automatic dumps will occur; this is
+ primarily useful for disabling the automatic final heap dump (which
+ also disables leak reporting, if enabled). The default prefix is
+ <filename>jeprof</filename>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_active">
+ <term>
+ <mallctl>opt.prof_active</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Profiling activated/deactivated. This is a secondary
+ control mechanism that makes it possible to start the application with
+ profiling enabled (see the <link
+ linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
+ inactive, then toggle profiling at any time during program execution
+ with the <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
+ This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_thread_active_init">
+ <term>
+ <mallctl>opt.prof_thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. The initial setting for newly created threads
+ can also be changed during execution via the <link
+ linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
+ mallctl. This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_sample">
+ <term>
+ <mallctl>opt.lg_prof_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between allocation
+ samples, as measured in bytes of allocation activity. Increasing the
+ sampling interval decreases profile fidelity, but also decreases the
+ computational overhead. The default sample interval is 512 KiB (2^19
+ B).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_accum">
+ <term>
+ <mallctl>opt.prof_accum</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reporting of cumulative object/byte counts in profile
+ dumps enabled/disabled. If this option is enabled, every unique
+ backtrace must be stored for the duration of execution. Depending on
+ the application, this can impose a large memory overhead, and the
+ cumulative counts are not always of interest. This option is disabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_interval">
+ <term>
+ <mallctl>opt.lg_prof_interval</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between memory profile
+ dumps, as measured in bytes of allocation activity. The actual
+ interval between dumps may be sporadic because decentralized allocation
+ counters are used to avoid synchronization bottlenecks. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option. By default, interval-triggered profile dumping is disabled
+ (encoded as -1).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_gdump">
+ <term>
+ <mallctl>opt.prof_gdump</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Set the initial state of <link
+ linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
+ enabled triggers a memory profile dump every time the total virtual
+ memory exceeds the previous maximum. This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_final">
+ <term>
+ <mallctl>opt.prof_final</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to dump final memory
+ usage to a file named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option. Note that <function>atexit()</function> may allocate
+ memory during application initialization and then deadlock internally
+ when jemalloc in turn calls <function>atexit()</function>, so
+ this option is not universally usable (though the application can
+ register its own <function>atexit()</function> function with
+ equivalent functionality). This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_leak">
+ <term>
+ <mallctl>opt.prof_leak</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Leak reporting enabled/disabled. If enabled, use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
+ detected by allocation sampling. See the
+ <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
+ information on analyzing heap profile output. This option is disabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.arena">
+ <term>
+ <mallctl>thread.arena</mallctl>
+ (<type>unsigned</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the arena associated with the calling
+ thread. If the specified arena was not initialized beforehand (see the
+ <link
+ linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link>
+ mallctl), it will be automatically initialized as a side effect of
+ calling this interface.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocated">
+ <term>
+ <mallctl>thread.allocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever allocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocatedp">
+ <term>
+ <mallctl>thread.allocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocated">
+ <term>
+ <mallctl>thread.deallocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever deallocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocatedp">
+ <term>
+ <mallctl>thread.deallocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.enabled">
+ <term>
+ <mallctl>thread.tcache.enabled</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Enable/disable calling thread's tcache. The tcache is
+ implicitly flushed as a side effect of becoming
+ disabled (see <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.flush">
+ <term>
+ <mallctl>thread.tcache.flush</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Flush calling thread's thread-specific cache (tcache).
+ This interface releases all cached objects and internal data structures
+ associated with the calling thread's tcache. Ordinarily, this interface
+ need not be called, since automatic periodic incremental garbage
+ collection occurs, and the thread cache is automatically discarded when
+ a thread exits. However, garbage collection is triggered by allocation
+ activity, so it is possible for a thread that stops
+ allocating/deallocating to retain its cache indefinitely, in which case
+ the developer may find manual flushing useful.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.name">
+ <term>
+ <mallctl>thread.prof.name</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal> or
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get/set the descriptive name associated with the calling
+ thread in memory profile dumps. An internal copy of the name string is
+ created, so the input string need not be maintained after this interface
+ completes execution. The output string of this interface should be
+ copied for non-ephemeral uses, because multiple implementation details
+ can cause asynchronous string deallocation. Furthermore, each
+ invocation of this interface can only read or write; simultaneous
+ read/write is not supported due to string lifetime limitations. The
+ name string must be nil-terminated and comprised only of characters in
+ the sets recognized
+ by <citerefentry><refentrytitle>isgraph</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>isblank</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.active">
+ <term>
+ <mallctl>thread.prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active for the
+ calling thread. This is an activation mechanism in addition to <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
+ be active for the calling thread to sample. This flag is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.create">
+ <term>
+ <mallctl>tcache.create</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Create an explicit thread-specific cache (tcache) and
+ return an identifier that can be passed to the <link
+ linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
+ macro to explicitly use the specified cache rather than the
+ automatically managed one that is used by default. Each explicit cache
+ can be used by only one thread at a time; the application must assure
+ that this constraint holds.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.flush">
+ <term>
+ <mallctl>tcache.flush</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache). The
+ same considerations apply to this interface as to <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
+ except that the tcache will never be automatically discarded.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.destroy">
+ <term>
+ <mallctl>tcache.destroy</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache) and
+ make the identifier available for use during a future tcache creation.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.purge">
+ <term>
+ <mallctl>arena.&lt;i&gt;.purge</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
+ all arenas if &lt;i&gt; equals <link
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.decay">
+ <term>
+ <mallctl>arena.&lt;i&gt;.decay</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Trigger decay-based purging of unused dirty pages for
+ arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <link
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
+ The proportion of unused dirty pages to be purged depends on the current
+ time; see <link
+ linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.reset">
+ <term>
+ <mallctl>arena.&lt;i&gt;.reset</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Discard all of the arena's extant allocations. This
+ interface can only be used with arenas created via <link
+ linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link>. None
+ of the arena's discarded/cached allocations may accessed afterward. As
+ part of this requirement, all thread caches which were used to
+ allocate/deallocate in conjunction with the arena must be flushed
+ beforehand. This interface cannot be used if running inside Valgrind,
+ nor if the <link linkend="opt.quarantine">quarantine</link> size is
+ non-zero.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.dss">
+ <term>
+ <mallctl>arena.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Set the precedence of dss allocation as related to mmap
+ allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
+ <link
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
+ <link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+ settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.lg_dirty_mult">
+ <term>
+ <mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena minimum ratio (log base 2) of active
+ to dirty pages for arena &lt;i&gt;. Each time this interface is set and
+ the ratio is increased, pages are synchronously purged as necessary to
+ impose the new ratio. See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.decay_time">
+ <term>
+ <mallctl>arena.&lt;i&gt;.decay_time</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena approximate time in seconds from the
+ creation of a set of unused dirty pages until an equivalent set of
+ unused dirty pages is purged and/or reused. Each time this interface is
+ set, all currently unused dirty pages are considered to have fully
+ decayed, which causes immediate purging of all unused dirty pages unless
+ the decay time is set to -1 (i.e. purging disabled). See <link
+ linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+ additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.chunk_hooks">
+ <term>
+ <mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
+ (<type>chunk_hooks_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the chunk management hook functions for arena
+ &lt;i&gt;. The functions must be capable of operating on all extant
+ chunks associated with arena &lt;i&gt;, usually by passing unknown
+ chunks to the replaced functions. In practice, it is feasible to
+ control allocation for arenas created via <link
+ linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
+ that all chunks originate from an application-supplied chunk allocator
+ (by setting custom chunk hook functions just after arena creation), but
+ the automatically created arenas may have already created chunks prior
+ to the application having an opportunity to take over chunk
+ allocation.</para>
+
+ <programlisting language="C"><![CDATA[
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;]]></programlisting>
+ <para>The <type>chunk_hooks_t</type> structure comprises function
+ pointers which are described individually below. jemalloc uses these
+ functions to manage chunk lifetime, which starts off with allocation of
+ mapped committed memory, in the simplest case followed by deallocation.
+ However, there are performance and platform reasons to retain chunks for
+ later reuse. Cleanup attempts cascade from deallocation to decommit to
+ purging, which gives the chunk management functions opportunities to
+ reject the most permanent cleanup operations in favor of less permanent
+ (and often less costly) operations. The chunk splitting and merging
+ operations can also be opted out of, but this is mainly intended to
+ support platforms on which virtual memory mappings provided by the
+ operating system kernel do not automatically coalesce and split, e.g.
+ Windows.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>bool *<parameter>zero</parameter></paramdef>
+ <paramdef>bool *<parameter>commit</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk allocation function conforms to the
+ <type>chunk_alloc_t</type> type and upon success returns a pointer to
+ <parameter>size</parameter> bytes of mapped memory on behalf of arena
+ <parameter>arena_ind</parameter> such that the chunk's base address is a
+ multiple of <parameter>alignment</parameter>, as well as setting
+ <parameter>*zero</parameter> to indicate whether the chunk is zeroed and
+ <parameter>*commit</parameter> to indicate whether the chunk is
+ committed. Upon error the function returns <constant>NULL</constant>
+ and leaves <parameter>*zero</parameter> and
+ <parameter>*commit</parameter> unmodified. The
+ <parameter>size</parameter> parameter is always a multiple of the chunk
+ size. The <parameter>alignment</parameter> parameter is always a power
+ of two at least as large as the chunk size. Zeroing is mandatory if
+ <parameter>*zero</parameter> is true upon function entry. Committing is
+ mandatory if <parameter>*commit</parameter> is true upon function entry.
+ If <parameter>chunk</parameter> is not <constant>NULL</constant>, the
+ returned pointer must be <parameter>chunk</parameter> on success or
+ <constant>NULL</constant> on error. Committed memory may be committed
+ in absolute terms as on a system that does not overcommit, or in
+ implicit terms as on a system that overcommits and satisfies physical
+ memory needs on demand via soft page faults. Note that replacing the
+ default chunk allocation function makes the arena's <link
+ linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
+ setting irrelevant.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ A chunk deallocation function conforms to the
+ <type>chunk_dalloc_t</type> type and deallocates a
+ <parameter>chunk</parameter> of given <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates opt-out from
+ deallocation; the virtual memory mapping associated with the chunk
+ remains mapped, in the same commit state, and available for future use,
+ in which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_commit_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk commit function conforms to the
+ <type>chunk_commit_t</type> type and commits zeroed physical memory to
+ back pages within a <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success.
+ Committed memory may be committed in absolute terms as on a system that
+ does not overcommit, or in implicit terms as on a system that
+ overcommits and satisfies physical memory needs on demand via soft page
+ faults. If the function returns true, this indicates insufficient
+ physical memory to satisfy the request.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_decommit_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk decommit function conforms to the
+ <type>chunk_decommit_t</type> type and decommits any physical memory
+ that is backing pages within a <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success, in which
+ case the pages will be committed via the chunk commit function before
+ being reused. If the function returns true, this indicates opt-out from
+ decommit; the memory remains committed and available for future use, in
+ which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t<parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk purge function conforms to the <type>chunk_purge_t</type>
+ type and optionally discards physical pages within the virtual memory
+ mapping associated with <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false if pages within the
+ purged virtual memory range will be zero-filled the next time they are
+ accessed.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_split_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk split function conforms to the <type>chunk_split_t</type>
+ type and optionally splits <parameter>chunk</parameter> of given
+ <parameter>size</parameter> into two adjacent chunks, the first of
+ <parameter>size_a</parameter> bytes, and the second of
+ <parameter>size_b</parameter> bytes, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the chunk
+ remains unsplit and therefore should continue to be operated on as a
+ whole.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_merge_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>void *<parameter>chunk_b</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk merge function conforms to the <type>chunk_merge_t</type>
+ type and optionally merges adjacent chunks,
+ <parameter>chunk_a</parameter> of given <parameter>size_a</parameter>
+ and <parameter>chunk_b</parameter> of given
+ <parameter>size_b</parameter> into one contiguous chunk, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the chunks
+ remain distinct mappings and therefore should continue to be operated on
+ independently.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.narenas">
+ <term>
+ <mallctl>arenas.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Current limit on number of arenas.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.initialized">
+ <term>
+ <mallctl>arenas.initialized</mallctl>
+ (<type>bool *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>An array of <link
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>
+ booleans. Each boolean indicates whether the corresponding arena is
+ initialized.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lg_dirty_mult">
+ <term>
+ <mallctl>arenas.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena minimum ratio (log base 2) of
+ active to dirty pages, used to initialize <link
+ linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.decay_time">
+ <term>
+ <mallctl>arenas.decay_time</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena approximate time in seconds
+ from the creation of a set of unused dirty pages until an equivalent set
+ of unused dirty pages is purged and/or reused, used to initialize <link
+ linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
+ additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.quantum">
+ <term>
+ <mallctl>arenas.quantum</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Quantum size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.page">
+ <term>
+ <mallctl>arenas.page</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Page size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.tcache_max">
+ <term>
+ <mallctl>arenas.tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Maximum thread-cached size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nbins">
+ <term>
+ <mallctl>arenas.nbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bin size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nhbins">
+ <term>
+ <mallctl>arenas.nhbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Total number of thread cache bin size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.nregs">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
+ (<type>uint32_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of regions per page run.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.run_size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.run_size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bytes per page run.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nlruns">
+ <term>
+ <mallctl>arenas.nlruns</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of large size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lrun.i.size">
+ <term>
+ <mallctl>arenas.lrun.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by this large size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nhchunks">
+ <term>
+ <mallctl>arenas.nhchunks</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of huge size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.hchunk.i.size">
+ <term>
+ <mallctl>arenas.hchunk.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by this huge size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.extend">
+ <term>
+ <mallctl>arenas.extend</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Extend the array of arenas by appending a new arena,
+ and returning the new arena index.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.thread_active_init">
+ <term>
+ <mallctl>prof.thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control the initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. See the <link
+ linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.active">
+ <term>
+ <mallctl>prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active. See the
+ <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for additional information, as well as the interrelated <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ mallctl.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.dump">
+ <term>
+ <mallctl>prof.dump</mallctl>
+ (<type>const char *</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Dump a memory profile to the specified file, or if NULL
+ is specified, to a file according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.gdump">
+ <term>
+ <mallctl>prof.gdump</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>When enabled, trigger a memory profile dump every time
+ the total virtual memory exceeds the previous maximum. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.reset">
+ <term>
+ <mallctl>prof.reset</mallctl>
+ (<type>size_t</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reset all memory profile statistics, and optionally
+ update the sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ and <link
+ linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.lg_sample">
+ <term>
+ <mallctl>prof.lg_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get the current sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.interval">
+ <term>
+ <mallctl>prof.interval</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average number of bytes allocated between
+ interval-based profile dumps. See the
+ <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.cactive">
+ <term>
+ <mallctl>stats.cactive</mallctl>
+ (<type>size_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Pointer to a counter that contains an approximate count
+ of the current number of bytes in active pages. The estimate may be
+ high, but never low, because each arena rounds up when computing its
+ contribution to the counter. Note that the <link
+ linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
+ on this counter. Furthermore, counter consistency is maintained via
+ atomic operations, so it is necessary to use an atomic operation in
+ order to guarantee a consistent read when dereferencing the pointer.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.allocated">
+ <term>
+ <mallctl>stats.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes allocated by the
+ application.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.active">
+ <term>
+ <mallctl>stats.active</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active pages allocated by the
+ application. This is a multiple of the page size, and greater than or
+ equal to <link
+ linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
+ This does not include <link linkend="stats.arenas.i.pdirty">
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages
+ entirely devoted to allocator metadata.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.metadata">
+ <term>
+ <mallctl>stats.metadata</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes dedicated to metadata, which
+ comprise base allocations used for bootstrap-sensitive internal
+ allocator data structures, arena chunk headers (see <link
+ linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>),
+ and internal allocations (see <link
+ linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.resident">
+ <term>
+ <mallctl>stats.resident</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the allocator, comprising all pages dedicated to
+ allocator metadata, pages backing active allocations, and unused dirty
+ pages. This is a maximum rather than precise because pages may not
+ actually be physically resident if they correspond to demand-zeroed
+ virtual memory that has not yet been touched. This is a multiple of the
+ page size, and is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mapped">
+ <term>
+ <mallctl>stats.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active chunks mapped by the
+ allocator. This is a multiple of the chunk size, and is larger than
+ <link linkend="stats.active"><mallctl>stats.active</mallctl></link>.
+ This does not include inactive chunks, even those that contain unused
+ dirty pages, which means that there is no strict ordering between this
+ and <link
+ linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.retained">
+ <term>
+ <mallctl>stats.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in virtual memory mappings that
+ were retained rather than being returned to the operating system via
+ e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>. Retained virtual memory is
+ typically untouched, decommitted, or purged, so it has no strongly
+ associated physical memory (see <link
+ linkend="arena.i.chunk_hooks">chunk hooks</link> for details). Retained
+ memory is excluded from mapped memory statistics, e.g. <link
+ linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dss">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lg_dirty_mult">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Minimum ratio (log base 2) of active to dirty pages.
+ See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.decay_time">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in seconds from the creation of a set
+ of unused dirty pages until an equivalent set of unused dirty pages is
+ purged and/or reused. See <link
+ linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.nthreads">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of threads currently assigned to
+ arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pactive">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages in active runs.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pdirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages within unused runs that are potentially
+ dirty, and for which <function>madvise<parameter>...</parameter>
+ <parameter><constant>MADV_DONTNEED</constant></parameter></function> or
+ similar has not been called.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mapped">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of mapped bytes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.retained">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of retained bytes. See <link
+ linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.metadata.mapped">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of mapped bytes in arena chunk headers, which
+ track the states of the non-metadata pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.metadata.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes dedicated to internal allocations.
+ Internal allocations differ from application-originated allocations in
+ that they are for internal use, and that they are omitted from heap
+ profiles. This statistic is reported separately from <link
+ linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
+ <link
+ linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>
+ because it overlaps with e.g. the <link
+ linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link> and
+ <link linkend="stats.active"><mallctl>stats.active</mallctl></link>
+ statistics, whereas the other metadata statistics do
+ not.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.npurge">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of dirty page purge sweeps performed.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.nmadvise">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of <function>madvise<parameter>...</parameter>
+ <parameter><constant>MADV_DONTNEED</constant></parameter></function> or
+ similar calls made to purge dirty pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.purged">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.purged</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of pages purged.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by small objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests served by
+ small bins.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of small objects returned to bins.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of small allocation requests.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by large objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of large allocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of large deallocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of large allocation requests.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by huge objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge allocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge deallocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge allocation requests.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocations served by bin.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocations returned to bin.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation
+ requests.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curregs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of regions for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option> <option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache fills.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option> <option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache flushes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nruns">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of runs created.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nreruns">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times the current run from which
+ to allocate changed.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curruns">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of runs.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lruns.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lruns.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of deallocation requests for this
+ size class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lruns.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lruns.j.curruns">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of runs for this size class.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of deallocation requests for this
+ size class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.curhchunks">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of huge allocations for this size class.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+ <refsect1 id="heap_profile_format">
+ <title>HEAP PROFILE FORMAT</title>
+ <para>Although the heap profiling functionality was originally designed to
+ be compatible with the
+ <command>pprof</command> command that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>, the addition of per thread heap profiling functionality
+ required a different heap profile format. The <command>jeprof</command>
+ command is derived from <command>pprof</command>, with enhancements to
+ support the heap profile format described here.</para>
+
+ <para>In the following hypothetical heap profile, <constant>[...]</constant>
+ indicates elision for the sake of compactness. <programlisting><![CDATA[
+heap_v2/524288
+ t*: 28106: 56637512 [0: 0]
+ [...]
+ t3: 352: 16777344 [0: 0]
+ [...]
+ t99: 17754: 29341640 [0: 0]
+ [...]
+@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
+ t*: 13: 6688 [0: 0]
+ t3: 12: 6496 [0: ]
+ t99: 1: 192 [0: 0]
+[...]
+
+MAPPED_LIBRARIES:
+[...]]]></programlisting> The following matches the above heap profile, but most
+tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
+descriptions of the corresponding fields. <programlisting><![CDATA[
+<heap_profile_format_version>/<mean_sample_interval>
+ <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ [...]
+ <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+ [...]
+ <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+ [...]
+@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
+ <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+[...]
+
+MAPPED_LIBRARIES:
+</proc/<pid>/maps>]]></programlisting></para>
+ </refsect1>
+
+ <refsect1 id="debugging_malloc_problems">
+ <title>DEBUGGING MALLOC PROBLEMS</title>
+ <para>When debugging, it is a good idea to configure/build jemalloc with
+ the <option>--enable-debug</option> and <option>--enable-fill</option>
+ options, and recompile the program with suitable options and symbols for
+ debugger support. When so configured, jemalloc incorporates a wide variety
+ of run-time assertions that catch application errors such as double-free,
+ write-after-free, etc.</para>
+
+ <para>Programs often accidentally depend on <quote>uninitialized</quote>
+ memory actually being filled with zero bytes. Junk filling
+ (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
+ option) tends to expose such bugs in the form of obviously incorrect
+ results and/or coredumps. Conversely, zero
+ filling (see the <link
+ linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
+ the symptoms of such bugs. Between these two options, it is usually
+ possible to quickly detect, diagnose, and eliminate such bugs.</para>
+
+ <para>This implementation does not provide much detail about the problems
+ it detects, because the performance impact for storing such information
+ would be prohibitive. However, jemalloc does integrate with the most
+ excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the
+ <option>--enable-valgrind</option> configuration option is enabled.</para>
+ </refsect1>
+ <refsect1 id="diagnostic_messages">
+ <title>DIAGNOSTIC MESSAGES</title>
+ <para>If any of the memory allocation/deallocation functions detect an
+ error or warning condition, a message will be printed to file descriptor
+ <constant>STDERR_FILENO</constant>. Errors will result in the process
+ dumping core. If the <link
+ linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
+ warnings are treated as errors.</para>
+
+ <para>The <varname>malloc_message</varname> variable allows the programmer
+ to override the function which emits the text strings forming the errors
+ and warnings if for some reason the <constant>STDERR_FILENO</constant> file
+ descriptor is not suitable for this.
+ <function>malloc_message()</function> takes the
+ <parameter>cbopaque</parameter> pointer argument that is
+ <constant>NULL</constant> unless overridden by the arguments in a call to
+ <function>malloc_stats_print()</function>, followed by a string
+ pointer. Please note that doing anything which tries to allocate memory in
+ this function is likely to result in a crash or deadlock.</para>
+
+ <para>All messages are prefixed by
+ <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
+ </refsect1>
+ <refsect1 id="return_values">
+ <title>RETURN VALUES</title>
+ <refsect2>
+ <title>Standard API</title>
+ <para>The <function>malloc()</function> and
+ <function>calloc()</function> functions return a pointer to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname>.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ returns the value 0 if successful; otherwise it returns an error value.
+ The <function>posix_memalign()</function> function will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>aligned_alloc()</function> function returns
+ a pointer to the allocated memory if successful; otherwise a
+ <constant>NULL</constant> pointer is returned and
+ <varname>errno</varname> is set. The
+ <function>aligned_alloc()</function> function will fail if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>realloc()</function> function returns a
+ pointer, possibly identical to <parameter>ptr</parameter>, to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned, and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname> if the error was the result of an
+ allocation failure. The <function>realloc()</function>
+ function always leaves the original buffer intact when an error occurs.
+ </para>
+
+ <para>The <function>free()</function> function returns no
+ value.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function> and
+ <function>rallocx()</function> functions return a pointer to
+ the allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </para>
+
+ <para>The <function>xallocx()</function> function returns the
+ real size of the resulting resized allocation pointed to by
+ <parameter>ptr</parameter>, which is a value less than
+ <parameter>size</parameter> if the allocation could not be adequately
+ grown in place. </para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation pointed to by <parameter>ptr</parameter>.
+ </para>
+
+ <para>The <function>nallocx()</function> returns the real size
+ that would result from a successful equivalent
+ <function>mallocx()</function> function call, or zero if
+ insufficient memory is available to perform the size computation. </para>
+
+ <para>The <function>mallctl()</function>,
+ <function>mallctlnametomib()</function>, and
+ <function>mallctlbymib()</function> functions return 0 on
+ success; otherwise they return an error value. The functions will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para><parameter>newp</parameter> is not
+ <constant>NULL</constant>, and <parameter>newlen</parameter> is too
+ large or too small. Alternatively, <parameter>*oldlenp</parameter>
+ is too large or too small; in this case as much data as possible
+ are read despite the error.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOENT</errorname></term>
+
+ <listitem><para><parameter>name</parameter> or
+ <parameter>mib</parameter> specifies an unknown/invalid
+ value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EPERM</errorname></term>
+
+ <listitem><para>Attempt to read or write void value, or attempt to
+ write read-only value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EAGAIN</errorname></term>
+
+ <listitem><para>A memory allocation failure
+ occurred.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EFAULT</errorname></term>
+
+ <listitem><para>An interface with side effects failed in some way
+ not directly related to <function>mallctl*()</function>
+ read/write processing.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="environment">
+ <title>ENVIRONMENT</title>
+ <para>The following environment variable affects the execution of the
+ allocation functions:
+ <variablelist>
+ <varlistentry>
+ <term><envar>MALLOC_CONF</envar></term>
+
+ <listitem><para>If the environment variable
+ <envar>MALLOC_CONF</envar> is set, the characters it contains
+ will be interpreted as options.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </refsect1>
+ <refsect1 id="examples">
+ <title>EXAMPLES</title>
+ <para>To dump core whenever a problem occurs:
+ <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
+ </para>
+ <para>To specify in the source a chunk size that is 16 MiB:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "lg_chunk:24";]]></programlisting></para>
+ </refsect1>
+ <refsect1 id="see_also">
+ <title>SEE ALSO</title>
+ <para><citerefentry><refentrytitle>madvise</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>alloca</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>getpagesize</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry></para>
+ </refsect1>
+ <refsect1 id="standards">
+ <title>STANDARDS</title>
+ <para>The <function>malloc()</function>,
+ <function>calloc()</function>,
+ <function>realloc()</function>, and
+ <function>free()</function> functions conform to ISO/IEC
+ 9899:1990 (<quote>ISO C90</quote>).</para>
+
+ <para>The <function>posix_memalign()</function> function conforms
+ to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
+ </refsect1>
+</refentry>
diff --git a/memory/jemalloc/src/doc/manpages.xsl.in b/memory/jemalloc/src/doc/manpages.xsl.in
new file mode 100644
index 000000000..88b2626b9
--- /dev/null
+++ b/memory/jemalloc/src/doc/manpages.xsl.in
@@ -0,0 +1,4 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
+ <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+</xsl:stylesheet>
diff --git a/memory/jemalloc/src/doc/stylesheet.xsl b/memory/jemalloc/src/doc/stylesheet.xsl
new file mode 100644
index 000000000..619365d82
--- /dev/null
+++ b/memory/jemalloc/src/doc/stylesheet.xsl
@@ -0,0 +1,10 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:param name="funcsynopsis.style">ansi</xsl:param>
+ <xsl:param name="function.parens" select="0"/>
+ <xsl:template match="function">
+ <xsl:call-template name="inline.monoseq"/>
+ </xsl:template>
+ <xsl:template match="mallctl">
+ <quote><xsl:call-template name="inline.monoseq"/></quote>
+ </xsl:template>
+</xsl:stylesheet>
diff --git a/memory/jemalloc/src/include/jemalloc/internal/arena.h b/memory/jemalloc/src/include/jemalloc/internal/arena.h
new file mode 100644
index 000000000..f39ce54b5
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/arena.h
@@ -0,0 +1,1516 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+
+/* Maximum number of regions in one run. */
+#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
+#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
+
+/*
+ * Minimum redzone size. Redzones may be larger than this if necessary to
+ * preserve region alignment.
+ */
+#define REDZONE_MINSIZE 16
+
+/*
+ * The minimum ratio of active:dirty pages per arena is computed as:
+ *
+ * (nactive >> lg_dirty_mult) >= ndirty
+ *
+ * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
+ * many active pages as dirty pages.
+ */
+#define LG_DIRTY_MULT_DEFAULT 3
+
+typedef enum {
+ purge_mode_ratio = 0,
+ purge_mode_decay = 1,
+
+ purge_mode_limit = 2
+} purge_mode_t;
+#define PURGE_DEFAULT purge_mode_ratio
+/* Default decay time in seconds. */
+#define DECAY_TIME_DEFAULT 10
+/* Number of event ticks between time checks. */
+#define DECAY_NTICKS_PER_UPDATE 1000
+
+typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
+typedef struct arena_avail_links_s arena_avail_links_t;
+typedef struct arena_run_s arena_run_t;
+typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
+typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
+typedef struct arena_chunk_s arena_chunk_t;
+typedef struct arena_bin_info_s arena_bin_info_t;
+typedef struct arena_decay_s arena_decay_t;
+typedef struct arena_bin_s arena_bin_t;
+typedef struct arena_s arena_t;
+typedef struct arena_tdata_s arena_tdata_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#ifdef JEMALLOC_ARENA_STRUCTS_A
+struct arena_run_s {
+ /* Index of bin this run is associated with. */
+ szind_t binind;
+
+ /* Number of free regions in run. */
+ unsigned nfree;
+
+ /* Per region allocated/deallocated bitmap. */
+ bitmap_t bitmap[BITMAP_GROUPS_MAX];
+};
+
+/* Each element of the chunk map corresponds to one page within the chunk. */
+struct arena_chunk_map_bits_s {
+ /*
+ * Run address (or size) and various flags are stored together. The bit
+ * layout looks like (assuming 32-bit system):
+ *
+ * ???????? ???????? ???nnnnn nnndumla
+ *
+ * ? : Unallocated: Run address for first/last pages, unset for internal
+ * pages.
+ * Small: Run page offset.
+ * Large: Run page count for first page, unset for trailing pages.
+ * n : binind for small size class, BININD_INVALID for large size class.
+ * d : dirty?
+ * u : unzeroed?
+ * m : decommitted?
+ * l : large?
+ * a : allocated?
+ *
+ * Following are example bit patterns for the three types of runs.
+ *
+ * p : run page offset
+ * s : run size
+ * n : binind for size class; large objects set these to BININD_INVALID
+ * x : don't care
+ * - : 0
+ * + : 1
+ * [DUMLA] : bit set
+ * [dumla] : bit unset
+ *
+ * Unallocated (clean):
+ * ssssssss ssssssss sss+++++ +++dum-a
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
+ * ssssssss ssssssss sss+++++ +++dUm-a
+ *
+ * Unallocated (dirty):
+ * ssssssss ssssssss sss+++++ +++D-m-a
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * ssssssss ssssssss sss+++++ +++D-m-a
+ *
+ * Small:
+ * pppppppp pppppppp pppnnnnn nnnd---A
+ * pppppppp pppppppp pppnnnnn nnn----A
+ * pppppppp pppppppp pppnnnnn nnnd---A
+ *
+ * Large:
+ * ssssssss ssssssss sss+++++ +++D--LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ---+++++ +++D--LA
+ *
+ * Large (sampled, size <= LARGE_MINCLASS):
+ * ssssssss ssssssss sssnnnnn nnnD--LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ---+++++ +++D--LA
+ *
+ * Large (not sampled, size == LARGE_MINCLASS):
+ * ssssssss ssssssss sss+++++ +++D--LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ---+++++ +++D--LA
+ */
+ size_t bits;
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
+
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
+#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
+#define CHUNK_MAP_DIRTY ((size_t)0x10U)
+#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
+
+#define CHUNK_MAP_BININD_SHIFT 5
+#define BININD_INVALID ((size_t)0xffU)
+#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
+#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
+
+#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
+#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
+#define CHUNK_MAP_SIZE_MASK \
+ (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
+};
+
+struct arena_runs_dirty_link_s {
+ qr(arena_runs_dirty_link_t) rd_link;
+};
+
+/*
+ * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
+ * like arena_chunk_map_bits_t. Two separate arrays are stored within each
+ * chunk header in order to improve cache locality.
+ */
+struct arena_chunk_map_misc_s {
+ /*
+ * Linkage for run heaps. There are two disjoint uses:
+ *
+ * 1) arena_t's runs_avail heaps.
+ * 2) arena_run_t conceptually uses this linkage for in-use non-full
+ * runs, rather than directly embedding linkage.
+ */
+ phn(arena_chunk_map_misc_t) ph_link;
+
+ union {
+ /* Linkage for list of dirty runs. */
+ arena_runs_dirty_link_t rd;
+
+ /* Profile counters, used for large object runs. */
+ union {
+ void *prof_tctx_pun;
+ prof_tctx_t *prof_tctx;
+ };
+
+ /* Small region run metadata. */
+ arena_run_t run;
+ };
+};
+typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
+#endif /* JEMALLOC_ARENA_STRUCTS_A */
+
+#ifdef JEMALLOC_ARENA_STRUCTS_B
+/* Arena chunk header. */
+struct arena_chunk_s {
+ /*
+ * A pointer to the arena that owns the chunk is stored within the node.
+ * This field as a whole is used by chunks_rtree to support both
+ * ivsalloc() and core-based debugging.
+ */
+ extent_node_t node;
+
+ /*
+ * Map of pages within chunk that keeps track of free/large/small. The
+ * first map_bias entries are omitted, since the chunk header does not
+ * need to be tracked in the map. This omission saves a header page
+ * for common chunk sizes (e.g. 4 MiB).
+ */
+ arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
+};
+
+/*
+ * Read-only information associated with each element of arena_t's bins array
+ * is stored separately, partly to reduce memory usage (only one copy, rather
+ * than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each run has the following layout:
+ *
+ * /--------------------\
+ * | pad? |
+ * |--------------------|
+ * | redzone |
+ * reg0_offset | region 0 |
+ * | redzone |
+ * |--------------------| \
+ * | redzone | |
+ * | region 1 | > reg_interval
+ * | redzone | /
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | redzone |
+ * | region nregs-1 |
+ * | redzone |
+ * |--------------------|
+ * | alignment pad? |
+ * \--------------------/
+ *
+ * reg_interval has at least the same minimum alignment as reg_size; this
+ * preserves the alignment constraint that sa2u() depends on. Alignment pad is
+ * either 0 or redzone_size; it is present only if needed to align reg0_offset.
+ */
+struct arena_bin_info_s {
+ /* Size of regions in a run for this bin's size class. */
+ size_t reg_size;
+
+ /* Redzone size. */
+ size_t redzone_size;
+
+ /* Interval between regions (reg_size + (redzone_size << 1)). */
+ size_t reg_interval;
+
+ /* Total size of a run for this bin's size class. */
+ size_t run_size;
+
+ /* Total number of regions in a run for this bin's size class. */
+ uint32_t nregs;
+
+ /*
+ * Metadata used to manipulate bitmaps for runs associated with this
+ * bin.
+ */
+ bitmap_info_t bitmap_info;
+
+ /* Offset of first region in a run for this bin's size class. */
+ uint32_t reg0_offset;
+};
+
+struct arena_decay_s {
+ /*
+ * Approximate time in seconds from the creation of a set of unused
+ * dirty pages until an equivalent set of unused dirty pages is purged
+ * and/or reused.
+ */
+ ssize_t time;
+ /* time / SMOOTHSTEP_NSTEPS. */
+ nstime_t interval;
+ /*
+ * Time at which the current decay interval logically started. We do
+ * not actually advance to a new epoch until sometime after it starts
+ * because of scheduling and computation delays, and it is even possible
+ * to completely skip epochs. In all cases, during epoch advancement we
+ * merge all relevant activity into the most recently recorded epoch.
+ */
+ nstime_t epoch;
+ /* Deadline randomness generator. */
+ uint64_t jitter_state;
+ /*
+ * Deadline for current epoch. This is the sum of interval and per
+ * epoch jitter which is a uniform random variable in [0..interval).
+ * Epochs always advance by precise multiples of interval, but we
+ * randomize the deadline to reduce the likelihood of arenas purging in
+ * lockstep.
+ */
+ nstime_t deadline;
+ /*
+ * Number of dirty pages at beginning of current epoch. During epoch
+ * advancement we use the delta between arena->decay.ndirty and
+ * arena->ndirty to determine how many dirty pages, if any, were
+ * generated.
+ */
+ size_t ndirty;
+ /*
+ * Trailing log of how many unused dirty pages were generated during
+ * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+ * element is the most recent epoch. Corresponding epoch times are
+ * relative to epoch.
+ */
+ size_t backlog[SMOOTHSTEP_NSTEPS];
+};
+
+struct arena_bin_s {
+ /*
+ * All operations on runcur, runs, and stats require that lock be
+ * locked. Run allocation/deallocation are protected by the arena lock,
+ * which may be acquired while holding one or more bin locks, but not
+ * vise versa.
+ */
+ malloc_mutex_t lock;
+
+ /*
+ * Current run being used to service allocations of this bin's size
+ * class.
+ */
+ arena_run_t *runcur;
+
+ /*
+ * Heap of non-full runs. This heap is used when looking for an
+ * existing run when runcur is no longer usable. We choose the
+ * non-full run that is lowest in memory; this policy tends to keep
+ * objects packed well, and it can also help reduce the number of
+ * almost-empty chunks.
+ */
+ arena_run_heap_t runs;
+
+ /* Bin statistics. */
+ malloc_bin_stats_t stats;
+};
+
+struct arena_s {
+ /* This arena's index within the arenas array. */
+ unsigned ind;
+
+ /*
+ * Number of threads currently assigned to this arena, synchronized via
+ * atomic operations. Each thread has two distinct assignments, one for
+ * application-serving allocation, and the other for internal metadata
+ * allocation. Internal metadata must not be allocated from arenas
+ * created via the arenas.extend mallctl, because the arena.<i>.reset
+ * mallctl indiscriminately discards all allocations for the affected
+ * arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
+ */
+ unsigned nthreads[2];
+
+ /*
+ * There are three classes of arena operations from a locking
+ * perspective:
+ * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
+ * 2) Bin-related operations are protected by bin locks.
+ * 3) Chunk- and run-related operations are protected by this mutex.
+ */
+ malloc_mutex_t lock;
+
+ arena_stats_t stats;
+ /*
+ * List of tcaches for extant threads associated with this arena.
+ * Stats from these are merged incrementally, and at exit if
+ * opt_stats_print is enabled.
+ */
+ ql_head(tcache_t) tcache_ql;
+
+ uint64_t prof_accumbytes;
+
+ /*
+ * PRNG state for cache index randomization of large allocation base
+ * pointers.
+ */
+ size_t offset_state;
+
+ dss_prec_t dss_prec;
+
+
+ /* Extant arena chunks. */
+ ql_head(extent_node_t) achunks;
+
+ /*
+ * In order to avoid rapid chunk allocation/deallocation when an arena
+ * oscillates right on the cusp of needing a new chunk, cache the most
+ * recently freed chunk. The spare is left in the arena's chunk trees
+ * until it is deleted.
+ *
+ * There is one spare chunk per arena, rather than one spare total, in
+ * order to avoid interactions between multiple threads that could make
+ * a single spare inadequate.
+ */
+ arena_chunk_t *spare;
+
+ /* Minimum ratio (log base 2) of nactive:ndirty. */
+ ssize_t lg_dirty_mult;
+
+ /* True if a thread is currently executing arena_purge_to_limit(). */
+ bool purging;
+
+ /* Number of pages in active runs and huge regions. */
+ size_t nactive;
+
+ /*
+ * Current count of pages within unused runs that are potentially
+ * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
+ * By tracking this, we can institute a limit on how much dirty unused
+ * memory is mapped for each arena.
+ */
+ size_t ndirty;
+
+ /*
+ * Unused dirty memory this arena manages. Dirty memory is conceptually
+ * tracked as an arbitrarily interleaved LRU of dirty runs and cached
+ * chunks, but the list linkage is actually semi-duplicated in order to
+ * avoid extra arena_chunk_map_misc_t space overhead.
+ *
+ * LRU-----------------------------------------------------------MRU
+ *
+ * /-- arena ---\
+ * | |
+ * | |
+ * |------------| /- chunk -\
+ * ...->|chunks_cache|<--------------------------->| /----\ |<--...
+ * |------------| | |node| |
+ * | | | | | |
+ * | | /- run -\ /- run -\ | | | |
+ * | | | | | | | | | |
+ * | | | | | | | | | |
+ * |------------| |-------| |-------| | |----| |
+ * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
+ * |------------| |-------| |-------| | |----| |
+ * | | | | | | | | | |
+ * | | | | | | | \----/ |
+ * | | \-------/ \-------/ | |
+ * | | | |
+ * | | | |
+ * \------------/ \---------/
+ */
+ arena_runs_dirty_link_t runs_dirty;
+ extent_node_t chunks_cache;
+
+ /* Decay-based purging state. */
+ arena_decay_t decay;
+
+ /* Extant huge allocations. */
+ ql_head(extent_node_t) huge;
+ /* Synchronizes all huge allocation/update/deallocation. */
+ malloc_mutex_t huge_mtx;
+
+ /*
+ * Trees of chunks that were previously allocated (trees differ only in
+ * node ordering). These are used when allocating chunks, in an attempt
+ * to re-use address space. Depending on function, different tree
+ * orderings are needed, which is why there are two trees with the same
+ * contents.
+ */
+ extent_tree_t chunks_szad_cached;
+ extent_tree_t chunks_ad_cached;
+ extent_tree_t chunks_szad_retained;
+ extent_tree_t chunks_ad_retained;
+
+ malloc_mutex_t chunks_mtx;
+ /* Cache of nodes that were allocated via base_alloc(). */
+ ql_head(extent_node_t) node_cache;
+ malloc_mutex_t node_cache_mtx;
+
+ /* User-configurable chunk hook functions. */
+ chunk_hooks_t chunk_hooks;
+
+ /* bins is used to store trees of free regions. */
+ arena_bin_t bins[NBINS];
+
+ /*
+ * Size-segregated address-ordered heaps of this arena's available runs,
+ * used for first-best-fit run allocation. Runs are quantized, i.e.
+ * they reside in the last heap which corresponds to a size class less
+ * than or equal to the run size.
+ */
+ arena_run_heap_t runs_avail[NPSIZES];
+};
+
+/* Used in conjunction with tsd for fast arena-related context lookup. */
+struct arena_tdata_s {
+ ticker_t decay_ticker;
+};
+#endif /* JEMALLOC_ARENA_STRUCTS_B */
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+static const size_t large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ PAGE
+#else
+ 0
+#endif
+ ;
+
+extern purge_mode_t opt_purge;
+extern const char *purge_mode_names[];
+extern ssize_t opt_lg_dirty_mult;
+extern ssize_t opt_decay_time;
+
+extern arena_bin_info_t arena_bin_info[NBINS];
+
+extern size_t map_bias; /* Number of arena chunk header pages. */
+extern size_t map_misc_offset;
+extern size_t arena_maxrun; /* Max run size for arenas. */
+extern size_t large_maxclass; /* Max large size class. */
+extern unsigned nlclasses; /* Number of large size classes. */
+extern unsigned nhclasses; /* Number of huge size classes. */
+
+#ifdef JEMALLOC_JET
+typedef size_t (run_quantize_t)(size_t);
+extern run_quantize_t *run_quantize_floor;
+extern run_quantize_t *run_quantize_ceil;
+#endif
+void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
+ bool cache);
+void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
+ bool cache);
+extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
+void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero);
+void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t usize);
+void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t lg_dirty_mult);
+ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
+void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
+void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
+void arena_reset(tsd_t *tsd, arena_t *arena);
+void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
+void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
+ bool zero);
+#ifdef JEMALLOC_JET
+typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
+ uint8_t);
+extern arena_redzone_corruption_t *arena_redzone_corruption;
+typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
+extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
+#else
+void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+#endif
+void arena_quarantine_junk_small(void *ptr, size_t usize);
+void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
+ bool zero);
+void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+ szind_t ind, bool zero);
+void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache);
+void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
+void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind);
+#ifdef JEMALLOC_JET
+typedef void (arena_dalloc_junk_large_t)(void *, size_t);
+extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#else
+void arena_dalloc_junk_large(void *ptr, size_t usize);
+#endif
+void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr);
+void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr);
+#ifdef JEMALLOC_JET
+typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
+extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
+#endif
+bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t size, size_t extra, bool zero);
+void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache);
+dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
+ssize_t arena_lg_dirty_mult_default_get(void);
+bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
+ssize_t arena_decay_time_default_get(void);
+bool arena_decay_time_default_set(ssize_t decay_time);
+void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+ unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
+ ssize_t *decay_time, size_t *nactive, size_t *ndirty);
+void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats);
+unsigned arena_nthreads_get(arena_t *arena, bool internal);
+void arena_nthreads_inc(arena_t *arena, bool internal);
+void arena_nthreads_dec(arena_t *arena, bool internal);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
+void arena_boot(void);
+void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
+ size_t pageind);
+const arena_chunk_map_bits_t *arena_bitselm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
+arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
+ size_t pageind);
+const arena_chunk_map_misc_t *arena_miscelm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
+size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
+arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
+arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
+size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
+const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbitsp_read(const size_t *mapbitsp);
+size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_size_decode(size_t mapbits);
+size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+ size_t pageind);
+szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
+void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
+size_t arena_mapbits_size_encode(size_t size);
+void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size, size_t flags);
+void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size);
+void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
+ size_t flags);
+void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size, size_t flags);
+void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+ szind_t binind);
+void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
+ size_t runind, szind_t binind, size_t flags);
+void arena_metadata_allocated_add(arena_t *arena, size_t size);
+void arena_metadata_allocated_sub(arena_t *arena, size_t size);
+size_t arena_metadata_allocated_get(arena_t *arena);
+bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
+szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
+size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+ const void *ptr);
+prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx);
+void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
+void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
+void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero, tcache_t *tcache, bool slow_path);
+arena_t *arena_aalloc(const void *ptr);
+size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
+void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
+# ifdef JEMALLOC_ARENA_INLINE_A
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
+arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+{
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return (&chunk->map_bits[pageind-map_bias]);
+}
+
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
+arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+{
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
+ (uintptr_t)map_misc_offset) + pageind-map_bias);
+}
+
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
+arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
+ map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return (pageind);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+
+ return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
+{
+ arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
+ *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
+
+ assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
+ assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
+
+ return (miscelm);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_run_to_miscelm(arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
+ *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
+
+ assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
+ assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
+
+ return (miscelm);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t *
+arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
+}
+
+JEMALLOC_ALWAYS_INLINE const size_t *
+arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbitsp_read(const size_t *mapbitsp)
+{
+
+ return (*mapbitsp);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_decode(size_t mapbits)
+{
+ size_t size;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ size = mapbits & CHUNK_MAP_SIZE_MASK;
+#else
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ return (size);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ return (arena_mapbits_size_decode(mapbits));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
+ return (arena_mapbits_size_decode(mapbits));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ CHUNK_MAP_ALLOCATED);
+ return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+ szind_t binind;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+ assert(binind < NBINS || binind == BININD_INVALID);
+ return (binind);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ return (mapbits & CHUNK_MAP_DIRTY);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ return (mapbits & CHUNK_MAP_UNZEROED);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ return (mapbits & CHUNK_MAP_DECOMMITTED);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_LARGE);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_ALLOCATED);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
+{
+
+ *mapbitsp = mapbits;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_encode(size_t size)
+{
+ size_t mapbits;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ mapbits = size << CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ mapbits = size;
+#else
+ mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
+ return (mapbits);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
+ size_t flags)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+ assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ CHUNK_MAP_BININD_INVALID | flags);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
+
+ assert((size & PAGE_MASK) == 0);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ (mapbits & ~CHUNK_MAP_SIZE_MASK));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+
+ assert((flags & CHUNK_MAP_UNZEROED) == flags);
+ arena_mapbitsp_write(mapbitsp, flags);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
+ size_t flags)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+ assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+ szind_t binind)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
+
+ assert(binind <= BININD_INVALID);
+ assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
+ large_pad);
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
+ (binind << CHUNK_MAP_BININD_SHIFT));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
+ szind_t binind, size_t flags)
+{
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+
+ assert(binind < BININD_INVALID);
+ assert(pageind - runind >= map_bias);
+ assert((flags & CHUNK_MAP_UNZEROED) == flags);
+ arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
+ (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
+}
+
+JEMALLOC_INLINE void
+arena_metadata_allocated_add(arena_t *arena, size_t size)
+{
+
+ atomic_add_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE void
+arena_metadata_allocated_sub(arena_t *arena, size_t size)
+{
+
+ atomic_sub_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE size_t
+arena_metadata_allocated_get(arena_t *arena)
+{
+
+ return (atomic_read_z(&arena->stats.metadata_allocated));
+}
+
+JEMALLOC_INLINE bool
+arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+ assert(prof_interval != 0);
+
+ arena->prof_accumbytes += accumbytes;
+ if (arena->prof_accumbytes >= prof_interval) {
+ arena->prof_accumbytes -= prof_interval;
+ return (true);
+ }
+ return (false);
+}
+
+JEMALLOC_INLINE bool
+arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+
+ if (likely(prof_interval == 0))
+ return (false);
+ return (arena_prof_accum_impl(arena, accumbytes));
+}
+
+JEMALLOC_INLINE bool
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+
+ if (likely(prof_interval == 0))
+ return (false);
+
+ {
+ bool ret;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ret = arena_prof_accum_impl(arena, accumbytes);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (ret);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
+{
+ szind_t binind;
+
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+
+ if (config_debug) {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+ size_t pageind;
+ size_t actual_mapbits;
+ size_t rpages_ind;
+ const arena_run_t *run;
+ arena_bin_t *bin;
+ szind_t run_binind, actual_binind;
+ arena_bin_info_t *bin_info;
+ const arena_chunk_map_misc_t *miscelm;
+ const void *rpages;
+
+ assert(binind != BININD_INVALID);
+ assert(binind < NBINS);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = extent_node_arena_get(&chunk->node);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ actual_mapbits = arena_mapbits_get(chunk, pageind);
+ assert(mapbits == actual_mapbits);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
+ pageind);
+ miscelm = arena_miscelm_get_const(chunk, rpages_ind);
+ run = &miscelm->run;
+ run_binind = run->binind;
+ bin = &arena->bins[run_binind];
+ actual_binind = (szind_t)(bin - arena->bins);
+ assert(run_binind == actual_binind);
+ bin_info = &arena_bin_info[actual_binind];
+ rpages = arena_miscelm_to_rpages(miscelm);
+ assert(((uintptr_t)ptr - ((uintptr_t)rpages +
+ (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+ == 0);
+ }
+
+ return (binind);
+}
+# endif /* JEMALLOC_ARENA_INLINE_A */
+
+# ifdef JEMALLOC_ARENA_INLINE_B
+JEMALLOC_INLINE szind_t
+arena_bin_index(arena_t *arena, arena_bin_t *bin)
+{
+ szind_t binind = (szind_t)(bin - arena->bins);
+ assert(binind < NBINS);
+ return (binind);
+}
+
+JEMALLOC_INLINE size_t
+arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
+{
+ size_t diff, interval, shift, regind;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ void *rpages = arena_miscelm_to_rpages(miscelm);
+
+ /*
+ * Freeing a pointer lower than region zero can cause assertion
+ * failure.
+ */
+ assert((uintptr_t)ptr >= (uintptr_t)rpages +
+ (uintptr_t)bin_info->reg0_offset);
+
+ /*
+ * Avoid doing division with a variable divisor if possible. Using
+ * actual division here can reduce allocator throughput by over 20%!
+ */
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
+ bin_info->reg0_offset);
+
+ /* Rescale (factor powers of 2 out of the numerator and denominator). */
+ interval = bin_info->reg_interval;
+ shift = ffs_zu(interval) - 1;
+ diff >>= shift;
+ interval >>= shift;
+
+ if (interval == 1) {
+ /* The divisor was a power of 2. */
+ regind = diff;
+ } else {
+ /*
+ * To divide by a number D that is not a power of two we
+ * multiply by (2^21 / D) and then right shift by 21 positions.
+ *
+ * X / D
+ *
+ * becomes
+ *
+ * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
+ *
+ * We can omit the first three elements, because we never
+ * divide by 0, and 1 and 2 are both powers of two, which are
+ * handled above.
+ */
+#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
+#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
+ static const size_t interval_invs[] = {
+ SIZE_INV(3),
+ SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+ SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+ SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+ SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+ SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+ SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+ SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+ };
+
+ if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
+ + 2))) {
+ regind = (diff * interval_invs[interval - 3]) >>
+ SIZE_INV_SHIFT;
+ } else
+ regind = diff / interval;
+#undef SIZE_INV
+#undef SIZE_INV_SHIFT
+ }
+ assert(diff == regind * interval);
+ assert(regind < bin_info->nregs);
+
+ return (regind);
+}
+
+JEMALLOC_INLINE prof_tctx_t *
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+{
+ prof_tctx_t *ret;
+ arena_chunk_t *chunk;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ else {
+ arena_chunk_map_misc_t *elm =
+ arena_miscelm_get_mutable(chunk, pageind);
+ ret = atomic_read_p(&elm->prof_tctx_pun);
+ }
+ } else
+ ret = huge_prof_tctx_get(tsdn, ptr);
+
+ return (ret);
+}
+
+JEMALLOC_INLINE void
+arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
+{
+ arena_chunk_t *chunk;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+
+ if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
+ (uintptr_t)1U)) {
+ arena_chunk_map_misc_t *elm;
+
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get_mutable(chunk, pageind);
+ atomic_write_p(&elm->prof_tctx_pun, tctx);
+ } else {
+ /*
+ * tctx must always be initialized for large runs.
+ * Assert that the surrounding conditional logic is
+ * equivalent to checking whether ptr refers to a large
+ * run.
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ }
+ } else
+ huge_prof_tctx_set(tsdn, ptr, tctx);
+}
+
+JEMALLOC_INLINE void
+arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
+ (uintptr_t)old_tctx > (uintptr_t)1U))) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ size_t pageind;
+ arena_chunk_map_misc_t *elm;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) !=
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get_mutable(chunk, pageind);
+ atomic_write_p(&elm->prof_tctx_pun,
+ (prof_tctx_t *)(uintptr_t)1U);
+ } else
+ huge_prof_tctx_reset(tsdn, ptr);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
+{
+ tsd_t *tsd;
+ ticker_t *decay_ticker;
+
+ if (unlikely(tsdn_null(tsdn)))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ decay_ticker = decay_ticker_get(tsd, arena->ind);
+ if (unlikely(decay_ticker == NULL))
+ return;
+ if (unlikely(ticker_ticks(decay_ticker, nticks)))
+ arena_purge(tsdn, arena, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
+{
+
+ arena_decay_ticks(tsdn, arena, 1);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
+ tcache_t *tcache, bool slow_path)
+{
+
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(size != 0);
+
+ if (likely(tcache != NULL)) {
+ if (likely(size <= SMALL_MAXCLASS)) {
+ return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
+ }
+ if (likely(size <= tcache_maxclass)) {
+ return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
+ }
+ /* (size > tcache_maxclass) case falls through. */
+ assert(size > tcache_maxclass);
+ }
+
+ return (arena_malloc_hard(tsdn, arena, size, ind, zero));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_aalloc(const void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr))
+ return (extent_node_arena_get(&chunk->node));
+ else
+ return (huge_aalloc(ptr));
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+JEMALLOC_ALWAYS_INLINE size_t
+arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+ size_t pageind;
+ szind_t binind;
+
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ binind = arena_mapbits_binind_get(chunk, pageind);
+ if (unlikely(binind == BININD_INVALID || (config_prof && !demote
+ && arena_mapbits_large_get(chunk, pageind) != 0))) {
+ /*
+ * Large allocation. In the common case (demote), and
+ * as this is an inline function, most callers will only
+ * end up looking at binind to determine that ptr is a
+ * small allocation.
+ */
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+ ret = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
+ assert(ret != 0);
+ assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
+ chunk_npages);
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk,
+ pageind+((ret+large_pad)>>LG_PAGE)-1));
+ } else {
+ /*
+ * Small allocation (possibly promoted to a large
+ * object).
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+ arena_ptr_small_binind_get(ptr,
+ arena_mapbits_get(chunk, pageind)) == binind);
+ ret = index2size(binind);
+ }
+ } else
+ ret = huge_salloc(tsdn, ptr);
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
+ /* Small allocation. */
+ if (likely(tcache != NULL)) {
+ szind_t binind = arena_ptr_small_binind_get(ptr,
+ mapbits);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
+ } else {
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
+ }
+ } else {
+ size_t size = arena_mapbits_large_size_get(chunk,
+ pageind);
+
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+
+ if (likely(tcache != NULL) && size - large_pad <=
+ tcache_maxclass) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size - large_pad, slow_path);
+ } else {
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
+ }
+ }
+ } else
+ huge_dalloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
+{
+ arena_chunk_t *chunk;
+
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ if (config_prof && opt_prof) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) !=
+ 0);
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ /*
+ * Make sure to use promoted size, not request
+ * size.
+ */
+ size = arena_mapbits_large_size_get(chunk,
+ pageind) - large_pad;
+ }
+ }
+ assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
+
+ if (likely(size <= SMALL_MAXCLASS)) {
+ /* Small allocation. */
+ if (likely(tcache != NULL)) {
+ szind_t binind = size2index(size);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
+ } else {
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
+ }
+ } else {
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+
+ if (likely(tcache != NULL) && size <= tcache_maxclass) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size, slow_path);
+ } else {
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
+ }
+ }
+ } else
+ huge_dalloc(tsdn, ptr);
+}
+# endif /* JEMALLOC_ARENA_INLINE_B */
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/assert.h b/memory/jemalloc/src/include/jemalloc/internal/assert.h
new file mode 100644
index 000000000..6f8f7eb93
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/assert.h
@@ -0,0 +1,45 @@
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define assert(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef not_reached
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+#endif
+
+#ifndef not_implemented
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef assert_not_implemented
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) \
+ not_implemented(); \
+} while (0)
+#endif
+
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/atomic.h b/memory/jemalloc/src/include/jemalloc/internal/atomic.h
new file mode 100644
index 000000000..3f15ea149
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/atomic.h
@@ -0,0 +1,651 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
+#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
+#define atomic_read_p(p) atomic_add_p(p, NULL)
+#define atomic_read_z(p) atomic_add_z(p, 0)
+#define atomic_read_u(p) atomic_add_u(p, 0)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+/*
+ * All arithmetic functions return the arithmetic result of the atomic
+ * operation. Some atomic operation APIs return the value prior to mutation, in
+ * which case the following functions must redundantly compute the result so
+ * that it can be returned. These functions are normally inlined, so the extra
+ * operations can be optimized away if the return values aren't used by the
+ * callers.
+ *
+ * <t> atomic_read_<t>(<t> *p) { return (*p); }
+ * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
+ * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
+ * bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
+ * {
+ * if (*p != c)
+ * return (true);
+ * *p = s;
+ * return (false);
+ * }
+ * void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
+ */
+
+#ifndef JEMALLOC_ENABLE_INLINE
+uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
+uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
+bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
+void atomic_write_uint64(uint64_t *p, uint64_t x);
+uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
+uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
+bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
+void atomic_write_uint32(uint32_t *p, uint32_t x);
+void *atomic_add_p(void **p, void *x);
+void *atomic_sub_p(void **p, void *x);
+bool atomic_cas_p(void **p, void *c, void *s);
+void atomic_write_p(void **p, const void *x);
+size_t atomic_add_z(size_t *p, size_t x);
+size_t atomic_sub_z(size_t *p, size_t x);
+bool atomic_cas_z(size_t *p, size_t c, size_t s);
+void atomic_write_z(size_t *p, size_t x);
+unsigned atomic_add_u(unsigned *p, unsigned x);
+unsigned atomic_sub_u(unsigned *p, unsigned x);
+bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
+void atomic_write_u(unsigned *p, unsigned x);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
+/******************************************************************************/
+/* 64-bit operations. */
+#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+# if (defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+ uint64_t t = x;
+
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+ uint64_t t;
+
+ x = (uint64_t)(-(int64_t)x);
+ t = x;
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ uint8_t success;
+
+ asm volatile (
+ "lock; cmpxchgq %4, %0;"
+ "sete %1;"
+ : "=m" (*p), "=a" (success) /* Outputs. */
+ : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+
+ return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ asm volatile (
+ "xchgq %1, %0;" /* Lock is implied by xchgq. */
+ : "=m" (*p), "+r" (x) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+# elif (defined(JEMALLOC_C11ATOMICS))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (atomic_fetch_add(a, x) + x);
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (atomic_fetch_sub(a, x) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (!atomic_compare_exchange_strong(a, &c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ atomic_store(a, x);
+}
+# elif (defined(JEMALLOC_ATOMIC9))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+ /*
+ * atomic_fetchadd_64() doesn't exist, but we only ever use this
+ * function on LP64 systems, so atomic_fetchadd_long() will do.
+ */
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (atomic_fetchadd_long(p, (unsigned long)x) + x);
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ atomic_store_rel_long(p, x);
+}
+# elif (defined(JEMALLOC_OSATOMIC))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+ return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+ uint64_t o;
+
+ /*The documented OSAtomic*() API does not expose an atomic exchange. */
+ do {
+ o = atomic_read_uint64(p);
+ } while (atomic_cas_uint64(p, o, x));
+}
+# elif (defined(_MSC_VER))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (InterlockedExchangeAdd64(p, x) + x);
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ uint64_t o;
+
+ o = InterlockedCompareExchange64(p, s, c);
+ return (o != c);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ InterlockedExchange64(p, x);
+}
+# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
+ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+ return (__sync_sub_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+ return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ __sync_lock_test_and_set(p, x);
+}
+# else
+# error "Missing implementation for 64-bit atomic operations"
+# endif
+#endif
+
+/******************************************************************************/
+/* 32-bit operations. */
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+ uint32_t t = x;
+
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+ uint32_t t;
+
+ x = (uint32_t)(-(int32_t)x);
+ t = x;
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ uint8_t success;
+
+ asm volatile (
+ "lock; cmpxchgl %4, %0;"
+ "sete %1;"
+ : "=m" (*p), "=a" (success) /* Outputs. */
+ : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+ : "memory"
+ );
+
+ return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ asm volatile (
+ "xchgl %1, %0;" /* Lock is implied by xchgl. */
+ : "=m" (*p), "+r" (x) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+# elif (defined(JEMALLOC_C11ATOMICS))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (atomic_fetch_add(a, x) + x);
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (atomic_fetch_sub(a, x) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (!atomic_compare_exchange_strong(a, &c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ atomic_store(a, x);
+}
+#elif (defined(JEMALLOC_ATOMIC9))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (atomic_fetchadd_32(p, x) + x);
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!atomic_cmpset_32(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ atomic_store_rel_32(p, x);
+}
+#elif (defined(JEMALLOC_OSATOMIC))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+ uint32_t o;
+
+ /*The documented OSAtomic*() API does not expose an atomic exchange. */
+ do {
+ o = atomic_read_uint32(p);
+ } while (atomic_cas_uint32(p, o, x));
+}
+#elif (defined(_MSC_VER))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (InterlockedExchangeAdd(p, x) + x);
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ uint32_t o;
+
+ o = InterlockedCompareExchange(p, s, c);
+ return (o != c);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ InterlockedExchange(p, x);
+}
+#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
+ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+ return (__sync_sub_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ __sync_lock_test_and_set(p, x);
+}
+#else
+# error "Missing implementation for 32-bit atomic operations"
+#endif
+
+/******************************************************************************/
+/* Pointer operations. */
+JEMALLOC_INLINE void *
+atomic_add_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE void *
+atomic_sub_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((void *)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((void *)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
+#endif
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_p(void **p, void *c, void *s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_p(void **p, const void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+/******************************************************************************/
+/* size_t operations. */
+JEMALLOC_INLINE size_t
+atomic_add_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE size_t
+atomic_sub_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((size_t)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((size_t)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
+#endif
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_z(size_t *p, size_t c, size_t s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+/******************************************************************************/
+/* unsigned operations. */
+JEMALLOC_INLINE unsigned
+atomic_add_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_INT == 2)
+ return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE unsigned
+atomic_sub_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ return ((unsigned)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_INT == 2)
+ return ((unsigned)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
+#endif
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_u(unsigned *p, unsigned c, unsigned s)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_INT == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_INT == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+/******************************************************************************/
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/base.h b/memory/jemalloc/src/include/jemalloc/internal/base.h
new file mode 100644
index 000000000..d6b81e162
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/base.h
@@ -0,0 +1,25 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *base_alloc(tsdn_t *tsdn, size_t size);
+void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped);
+bool base_boot(void);
+void base_prefork(tsdn_t *tsdn);
+void base_postfork_parent(tsdn_t *tsdn);
+void base_postfork_child(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/bitmap.h b/memory/jemalloc/src/include/jemalloc/internal/bitmap.h
new file mode 100644
index 000000000..36f38b59c
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/bitmap.h
@@ -0,0 +1,274 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
+#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
+#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
+
+typedef struct bitmap_level_s bitmap_level_t;
+typedef struct bitmap_info_s bitmap_info_t;
+typedef unsigned long bitmap_t;
+#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
+
+/* Number of bits per group. */
+#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
+#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
+#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
+
+/*
+ * Do some analysis on how big the bitmap is before we use a tree. For a brute
+ * force linear search, if we would have to call ffs_lu() more than 2^3 times,
+ * use a tree instead.
+ */
+#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
+# define USE_TREE
+#endif
+
+/* Number of groups required to store a given number of bits. */
+#define BITMAP_BITS2GROUPS(nbits) \
+ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
+
+/*
+ * Number of groups required at a particular level for a given number of bits.
+ */
+#define BITMAP_GROUPS_L0(nbits) \
+ BITMAP_BITS2GROUPS(nbits)
+#define BITMAP_GROUPS_L1(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
+#define BITMAP_GROUPS_L2(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
+#define BITMAP_GROUPS_L3(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
+ BITMAP_BITS2GROUPS((nbits)))))
+
+/*
+ * Assuming the number of levels, number of groups required for a given number
+ * of bits.
+ */
+#define BITMAP_GROUPS_1_LEVEL(nbits) \
+ BITMAP_GROUPS_L0(nbits)
+#define BITMAP_GROUPS_2_LEVEL(nbits) \
+ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
+#define BITMAP_GROUPS_3_LEVEL(nbits) \
+ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
+#define BITMAP_GROUPS_4_LEVEL(nbits) \
+ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
+
+/*
+ * Maximum number of groups required to support LG_BITMAP_MAXBITS.
+ */
+#ifdef USE_TREE
+
+#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
+#else
+# error "Unsupported bitmap size"
+#endif
+
+/* Maximum number of levels possible. */
+#define BITMAP_MAX_LEVELS \
+ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
+
+#else /* USE_TREE */
+
+#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
+
+#endif /* USE_TREE */
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct bitmap_level_s {
+ /* Offset of this level's groups within the array of groups. */
+ size_t group_offset;
+};
+
+struct bitmap_info_s {
+ /* Logical number of bits in bitmap (stored at bottom level). */
+ size_t nbits;
+
+#ifdef USE_TREE
+ /* Number of levels necessary for nbits. */
+ unsigned nlevels;
+
+ /*
+ * Only the first (nlevels+1) elements are used, and levels are ordered
+ * bottom to top (e.g. the bottom level is stored in levels[0]).
+ */
+ bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
+#else /* USE_TREE */
+ /* Number of groups necessary for nbits. */
+ size_t ngroups;
+#endif /* USE_TREE */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
+size_t bitmap_size(const bitmap_info_t *binfo);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
+bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
+void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
+JEMALLOC_INLINE bool
+bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+#ifdef USE_TREE
+ size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+ bitmap_t rg = bitmap[rgoff];
+ /* The bitmap is full iff the root group is 0. */
+ return (rg == 0);
+#else
+ size_t i;
+
+ for (i = 0; i < binfo->ngroups; i++) {
+ if (bitmap[i] != 0)
+ return (false);
+ }
+ return (true);
+#endif
+}
+
+JEMALLOC_INLINE bool
+bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+ size_t goff;
+ bitmap_t g;
+
+ assert(bit < binfo->nbits);
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ g = bitmap[goff];
+ return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
+}
+
+JEMALLOC_INLINE void
+bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+ size_t goff;
+ bitmap_t *gp;
+ bitmap_t g;
+
+ assert(bit < binfo->nbits);
+ assert(!bitmap_get(bitmap, binfo, bit));
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[goff];
+ g = *gp;
+ assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ assert(bitmap_get(bitmap, binfo, bit));
+#ifdef USE_TREE
+ /* Propagate group state transitions up the tree. */
+ if (g == 0) {
+ unsigned i;
+ for (i = 1; i < binfo->nlevels; i++) {
+ bit = goff;
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[binfo->levels[i].group_offset + goff];
+ g = *gp;
+ assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ if (g != 0)
+ break;
+ }
+ }
+#endif
+}
+
+/* sfu: set first unset. */
+JEMALLOC_INLINE size_t
+bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t bit;
+ bitmap_t g;
+ unsigned i;
+
+ assert(!bitmap_full(bitmap, binfo));
+
+#ifdef USE_TREE
+ i = binfo->nlevels - 1;
+ g = bitmap[binfo->levels[i].group_offset];
+ bit = ffs_lu(g) - 1;
+ while (i > 0) {
+ i--;
+ g = bitmap[binfo->levels[i].group_offset + bit];
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
+ }
+#else
+ i = 0;
+ g = bitmap[0];
+ while ((bit = ffs_lu(g)) == 0) {
+ i++;
+ g = bitmap[i];
+ }
+ bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+#endif
+ bitmap_set(bitmap, binfo, bit);
+ return (bit);
+}
+
+JEMALLOC_INLINE void
+bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+ size_t goff;
+ bitmap_t *gp;
+ bitmap_t g;
+ UNUSED bool propagate;
+
+ assert(bit < binfo->nbits);
+ assert(bitmap_get(bitmap, binfo, bit));
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[goff];
+ g = *gp;
+ propagate = (g == 0);
+ assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ assert(!bitmap_get(bitmap, binfo, bit));
+#ifdef USE_TREE
+ /* Propagate group state transitions up the tree. */
+ if (propagate) {
+ unsigned i;
+ for (i = 1; i < binfo->nlevels; i++) {
+ bit = goff;
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[binfo->levels[i].group_offset + goff];
+ g = *gp;
+ propagate = (g == 0);
+ assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
+ == 0);
+ g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ if (!propagate)
+ break;
+ }
+ }
+#endif /* USE_TREE */
+}
+
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/chunk.h b/memory/jemalloc/src/include/jemalloc/internal/chunk.h
new file mode 100644
index 000000000..38c9a012d
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/chunk.h
@@ -0,0 +1,96 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * Size and alignment of memory chunks that are allocated by the OS's virtual
+ * memory system.
+ */
+#define LG_CHUNK_DEFAULT 21
+
+/* Return the chunk address for allocation address a. */
+#define CHUNK_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define CHUNK_ADDR2OFFSET(a) \
+ ((size_t)((uintptr_t)(a) & chunksize_mask))
+
+/* Return the smallest chunk multiple that is >= s. */
+#define CHUNK_CEILING(s) \
+ (((s) + chunksize_mask) & ~chunksize_mask)
+
+#define CHUNK_HOOKS_INITIALIZER { \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL \
+}
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern size_t opt_lg_chunk;
+extern const char *opt_dss;
+
+extern rtree_t chunks_rtree;
+
+extern size_t chunksize;
+extern size_t chunksize_mask; /* (chunksize - 1). */
+extern size_t chunk_npages;
+
+extern const chunk_hooks_t chunk_hooks_default;
+
+chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
+chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
+ const chunk_hooks_t *chunk_hooks);
+
+bool chunk_register(tsdn_t *tsdn, const void *chunk,
+ const extent_node_t *node);
+void chunk_deregister(const void *chunk, const extent_node_t *node);
+void *chunk_alloc_base(size_t size);
+void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit, bool dalloc_node);
+void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
+void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
+ bool committed);
+bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
+ size_t length);
+bool chunk_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+extent_node_t *chunk_lookup(const void *chunk, bool dependent);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
+JEMALLOC_INLINE extent_node_t *
+chunk_lookup(const void *ptr, bool dependent)
+{
+
+ return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
+#include "jemalloc/internal/chunk_dss.h"
+#include "jemalloc/internal/chunk_mmap.h"
diff --git a/memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h b/memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h
new file mode 100644
index 000000000..da8511ba0
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h
@@ -0,0 +1,37 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+extern const char *dss_prec_names[];
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+dss_prec_t chunk_dss_prec_get(void);
+bool chunk_dss_prec_set(dss_prec_t dss_prec);
+void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit);
+bool chunk_in_dss(void *chunk);
+bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
+void chunk_dss_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/chunk_mmap.h b/memory/jemalloc/src/include/jemalloc/internal/chunk_mmap.h
new file mode 100644
index 000000000..6f2d0ac2e
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/chunk_mmap.h
@@ -0,0 +1,21 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+bool chunk_dalloc_mmap(void *chunk, size_t size);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/ckh.h b/memory/jemalloc/src/include/jemalloc/internal/ckh.h
new file mode 100644
index 000000000..f75ad90b7
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/ckh.h
@@ -0,0 +1,86 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ckh_s ckh_t;
+typedef struct ckhc_s ckhc_t;
+
+/* Typedefs to allow easy function pointer passing. */
+typedef void ckh_hash_t (const void *, size_t[2]);
+typedef bool ckh_keycomp_t (const void *, const void *);
+
+/* Maintain counters used to get an idea of performance. */
+/* #define CKH_COUNT */
+/* Print counter values in ckh_delete() (requires CKH_COUNT). */
+/* #define CKH_VERBOSE */
+
+/*
+ * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
+ * one bucket per L1 cache line.
+ */
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+/* Hash table cell. */
+struct ckhc_s {
+ const void *key;
+ const void *data;
+};
+
+struct ckh_s {
+#ifdef CKH_COUNT
+ /* Counters used to get an idea of performance. */
+ uint64_t ngrows;
+ uint64_t nshrinks;
+ uint64_t nshrinkfails;
+ uint64_t ninserts;
+ uint64_t nrelocs;
+#endif
+
+ /* Used for pseudo-random number generation. */
+ uint64_t prng_state;
+
+ /* Total number of items. */
+ size_t count;
+
+ /*
+ * Minimum and current number of hash table buckets. There are
+ * 2^LG_CKH_BUCKET_CELLS cells per bucket.
+ */
+ unsigned lg_minbuckets;
+ unsigned lg_curbuckets;
+
+ /* Hash and comparison functions. */
+ ckh_hash_t *hash;
+ ckh_keycomp_t *keycomp;
+
+ /* Hash table with 2^lg_curbuckets buckets. */
+ ckhc_t *tab;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp);
+void ckh_delete(tsd_t *tsd, ckh_t *ckh);
+size_t ckh_count(ckh_t *ckh);
+bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
+bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data);
+bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
+void ckh_string_hash(const void *key, size_t r_hash[2]);
+bool ckh_string_keycomp(const void *k1, const void *k2);
+void ckh_pointer_hash(const void *key, size_t r_hash[2]);
+bool ckh_pointer_keycomp(const void *k1, const void *k2);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/ctl.h b/memory/jemalloc/src/include/jemalloc/internal/ctl.h
new file mode 100644
index 000000000..af0f6d7c5
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/ctl.h
@@ -0,0 +1,118 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ctl_node_s ctl_node_t;
+typedef struct ctl_named_node_s ctl_named_node_t;
+typedef struct ctl_indexed_node_s ctl_indexed_node_t;
+typedef struct ctl_arena_stats_s ctl_arena_stats_t;
+typedef struct ctl_stats_s ctl_stats_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct ctl_node_s {
+ bool named;
+};
+
+struct ctl_named_node_s {
+ struct ctl_node_s node;
+ const char *name;
+ /* If (nchildren == 0), this is a terminal node. */
+ unsigned nchildren;
+ const ctl_node_t *children;
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *,
+ size_t *, void *, size_t);
+};
+
+struct ctl_indexed_node_s {
+ struct ctl_node_s node;
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t);
+};
+
+struct ctl_arena_stats_s {
+ bool initialized;
+ unsigned nthreads;
+ const char *dss;
+ ssize_t lg_dirty_mult;
+ ssize_t decay_time;
+ size_t pactive;
+ size_t pdirty;
+
+ /* The remainder are only populated if config_stats is true. */
+
+ arena_stats_t astats;
+
+ /* Aggregate stats for small size classes, based on bin stats. */
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+ uint64_t nrequests_small;
+
+ malloc_bin_stats_t bstats[NBINS];
+ malloc_large_stats_t *lstats; /* nlclasses elements. */
+ malloc_huge_stats_t *hstats; /* nhclasses elements. */
+};
+
+struct ctl_stats_s {
+ size_t allocated;
+ size_t active;
+ size_t metadata;
+ size_t resident;
+ size_t mapped;
+ size_t retained;
+ unsigned narenas;
+ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen);
+int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
+ size_t *miblenp);
+
+int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+bool ctl_boot(void);
+void ctl_prefork(tsdn_t *tsdn);
+void ctl_postfork_parent(tsdn_t *tsdn);
+void ctl_postfork_child(tsdn_t *tsdn);
+
+#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
+ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
+ != 0) { \
+ malloc_printf( \
+ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
+ name); \
+ abort(); \
+ } \
+} while (0)
+
+#define xmallctlnametomib(name, mibp, miblenp) do { \
+ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
+ malloc_printf("<jemalloc>: Failure in " \
+ "xmallctlnametomib(\"%s\", ...)\n", name); \
+ abort(); \
+ } \
+} while (0)
+
+#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
+ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
+ newlen) != 0) { \
+ malloc_write( \
+ "<jemalloc>: Failure in xmallctlbymib()\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/extent.h b/memory/jemalloc/src/include/jemalloc/internal/extent.h
new file mode 100644
index 000000000..49d76a57f
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/extent.h
@@ -0,0 +1,239 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct extent_node_s extent_node_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+/* Tree of extents. Use accessor functions for en_* fields. */
+struct extent_node_s {
+ /* Arena from which this extent came, if any. */
+ arena_t *en_arena;
+
+ /* Pointer to the extent that this tree node is responsible for. */
+ void *en_addr;
+
+ /* Total region size. */
+ size_t en_size;
+
+ /*
+ * The zeroed flag is used by chunk recycling code to track whether
+ * memory is zero-filled.
+ */
+ bool en_zeroed;
+
+ /*
+ * True if physical memory is committed to the extent, whether
+ * explicitly or implicitly as on a system that overcommits and
+ * satisfies physical memory needs on demand via soft page faults.
+ */
+ bool en_committed;
+
+ /*
+ * The achunk flag is used to validate that huge allocation lookups
+ * don't return arena chunks.
+ */
+ bool en_achunk;
+
+ /* Profile counters, used for huge objects. */
+ prof_tctx_t *en_prof_tctx;
+
+ /* Linkage for arena's runs_dirty and chunks_cache rings. */
+ arena_runs_dirty_link_t rd;
+ qr(extent_node_t) cc_link;
+
+ union {
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) szad_link;
+
+ /* Linkage for arena's achunks, huge, and node_cache lists. */
+ ql_elm(extent_node_t) ql_link;
+ };
+
+ /* Linkage for the address-ordered tree. */
+ rb_node(extent_node_t) ad_link;
+};
+typedef rb_tree(extent_node_t) extent_tree_t;
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
+
+rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+arena_t *extent_node_arena_get(const extent_node_t *node);
+void *extent_node_addr_get(const extent_node_t *node);
+size_t extent_node_size_get(const extent_node_t *node);
+bool extent_node_zeroed_get(const extent_node_t *node);
+bool extent_node_committed_get(const extent_node_t *node);
+bool extent_node_achunk_get(const extent_node_t *node);
+prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
+void extent_node_arena_set(extent_node_t *node, arena_t *arena);
+void extent_node_addr_set(extent_node_t *node, void *addr);
+void extent_node_size_set(extent_node_t *node, size_t size);
+void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
+void extent_node_committed_set(extent_node_t *node, bool committed);
+void extent_node_achunk_set(extent_node_t *node, bool achunk);
+void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
+void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
+ size_t size, bool zeroed, bool committed);
+void extent_node_dirty_linkage_init(extent_node_t *node);
+void extent_node_dirty_insert(extent_node_t *node,
+ arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
+void extent_node_dirty_remove(extent_node_t *node);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
+JEMALLOC_INLINE arena_t *
+extent_node_arena_get(const extent_node_t *node)
+{
+
+ return (node->en_arena);
+}
+
+JEMALLOC_INLINE void *
+extent_node_addr_get(const extent_node_t *node)
+{
+
+ return (node->en_addr);
+}
+
+JEMALLOC_INLINE size_t
+extent_node_size_get(const extent_node_t *node)
+{
+
+ return (node->en_size);
+}
+
+JEMALLOC_INLINE bool
+extent_node_zeroed_get(const extent_node_t *node)
+{
+
+ return (node->en_zeroed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_committed_get(const extent_node_t *node)
+{
+
+ assert(!node->en_achunk);
+ return (node->en_committed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_achunk_get(const extent_node_t *node)
+{
+
+ return (node->en_achunk);
+}
+
+JEMALLOC_INLINE prof_tctx_t *
+extent_node_prof_tctx_get(const extent_node_t *node)
+{
+
+ return (node->en_prof_tctx);
+}
+
+JEMALLOC_INLINE void
+extent_node_arena_set(extent_node_t *node, arena_t *arena)
+{
+
+ node->en_arena = arena;
+}
+
+JEMALLOC_INLINE void
+extent_node_addr_set(extent_node_t *node, void *addr)
+{
+
+ node->en_addr = addr;
+}
+
+JEMALLOC_INLINE void
+extent_node_size_set(extent_node_t *node, size_t size)
+{
+
+ node->en_size = size;
+}
+
+JEMALLOC_INLINE void
+extent_node_zeroed_set(extent_node_t *node, bool zeroed)
+{
+
+ node->en_zeroed = zeroed;
+}
+
+JEMALLOC_INLINE void
+extent_node_committed_set(extent_node_t *node, bool committed)
+{
+
+ node->en_committed = committed;
+}
+
+JEMALLOC_INLINE void
+extent_node_achunk_set(extent_node_t *node, bool achunk)
+{
+
+ node->en_achunk = achunk;
+}
+
+JEMALLOC_INLINE void
+extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
+{
+
+ node->en_prof_tctx = tctx;
+}
+
+JEMALLOC_INLINE void
+extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
+ bool zeroed, bool committed)
+{
+
+ extent_node_arena_set(node, arena);
+ extent_node_addr_set(node, addr);
+ extent_node_size_set(node, size);
+ extent_node_zeroed_set(node, zeroed);
+ extent_node_committed_set(node, committed);
+ extent_node_achunk_set(node, false);
+ if (config_prof)
+ extent_node_prof_tctx_set(node, NULL);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_linkage_init(extent_node_t *node)
+{
+
+ qr_new(&node->rd, rd_link);
+ qr_new(node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_insert(extent_node_t *node,
+ arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
+{
+
+ qr_meld(runs_dirty, &node->rd, rd_link);
+ qr_meld(chunks_dirty, node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_remove(extent_node_t *node)
+{
+
+ qr_remove(&node->rd, rd_link);
+ qr_remove(node, cc_link);
+}
+
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/hash.h b/memory/jemalloc/src/include/jemalloc/internal/hash.h
new file mode 100644
index 000000000..1ff2d9a05
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/hash.h
@@ -0,0 +1,357 @@
+/*
+ * The following hash function is based on MurmurHash3, placed into the public
+ * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
+ * details.
+ */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
+void hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2]);
+void hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2]);
+void hash(const void *key, size_t len, const uint32_t seed,
+ size_t r_hash[2]);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
+/******************************************************************************/
+/* Internal implementation. */
+JEMALLOC_INLINE uint32_t
+hash_rotl_32(uint32_t x, int8_t r)
+{
+
+ return ((x << r) | (x >> (32 - r)));
+}
+
+JEMALLOC_INLINE uint64_t
+hash_rotl_64(uint64_t x, int8_t r)
+{
+
+ return ((x << r) | (x >> (64 - r)));
+}
+
+JEMALLOC_INLINE uint32_t
+hash_get_block_32(const uint32_t *p, int i)
+{
+
+ /* Handle unaligned read. */
+ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
+ uint32_t ret;
+
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
+ return (ret);
+ }
+
+ return (p[i]);
+}
+
+JEMALLOC_INLINE uint64_t
+hash_get_block_64(const uint64_t *p, int i)
+{
+
+ /* Handle unaligned read. */
+ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
+ uint64_t ret;
+
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
+ return (ret);
+ }
+
+ return (p[i]);
+}
+
+JEMALLOC_INLINE uint32_t
+hash_fmix_32(uint32_t h)
+{
+
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return (h);
+}
+
+JEMALLOC_INLINE uint64_t
+hash_fmix_64(uint64_t k)
+{
+
+ k ^= k >> 33;
+ k *= KQU(0xff51afd7ed558ccd);
+ k ^= k >> 33;
+ k *= KQU(0xc4ceb9fe1a85ec53);
+ k ^= k >> 33;
+
+ return (k);
+}
+
+JEMALLOC_INLINE uint32_t
+hash_x86_32(const void *key, int len, uint32_t seed)
+{
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 4;
+
+ uint32_t h1 = seed;
+
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
+ int i;
+
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i);
+
+ k1 *= c1;
+ k1 = hash_rotl_32(k1, 15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = hash_rotl_32(h1, 13);
+ h1 = h1*5 + 0xe6546b64;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
+ k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len;
+
+ h1 = hash_fmix_32(h1);
+
+ return (h1);
+}
+
+UNUSED JEMALLOC_INLINE void
+hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2])
+{
+ const uint8_t * data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint32_t h1 = seed;
+ uint32_t h2 = seed;
+ uint32_t h3 = seed;
+ uint32_t h4 = seed;
+
+ const uint32_t c1 = 0x239b961b;
+ const uint32_t c2 = 0xab0e9789;
+ const uint32_t c3 = 0x38b34ae5;
+ const uint32_t c4 = 0xa1e38b93;
+
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
+ int i;
+
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
+ uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
+ uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
+ uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
+
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_32(h1, 19); h1 += h2;
+ h1 = h1*5 + 0x561ccd1b;
+
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ h2 = hash_rotl_32(h2, 17); h2 += h3;
+ h2 = h2*5 + 0x0bcaa747;
+
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ h3 = hash_rotl_32(h3, 15); h3 += h4;
+ h3 = h3*5 + 0x96cd1c35;
+
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ h4 = hash_rotl_32(h4, 13); h4 += h1;
+ h4 = h4*5 + 0x32ac3b17;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
+ uint32_t k1 = 0;
+ uint32_t k2 = 0;
+ uint32_t k3 = 0;
+ uint32_t k4 = 0;
+
+ switch (len & 15) {
+ case 15: k4 ^= tail[14] << 16;
+ case 14: k4 ^= tail[13] << 8;
+ case 13: k4 ^= tail[12] << 0;
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ case 12: k3 ^= tail[11] << 24;
+ case 11: k3 ^= tail[10] << 16;
+ case 10: k3 ^= tail[ 9] << 8;
+ case 9: k3 ^= tail[ 8] << 0;
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ case 8: k2 ^= tail[ 7] << 24;
+ case 7: k2 ^= tail[ 6] << 16;
+ case 6: k2 ^= tail[ 5] << 8;
+ case 5: k2 ^= tail[ 4] << 0;
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ case 4: k1 ^= tail[ 3] << 24;
+ case 3: k1 ^= tail[ 2] << 16;
+ case 2: k1 ^= tail[ 1] << 8;
+ case 1: k1 ^= tail[ 0] << 0;
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ h1 = hash_fmix_32(h1);
+ h2 = hash_fmix_32(h2);
+ h3 = hash_fmix_32(h3);
+ h4 = hash_fmix_32(h4);
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ r_out[0] = (((uint64_t) h2) << 32) | h1;
+ r_out[1] = (((uint64_t) h4) << 32) | h3;
+}
+
+UNUSED JEMALLOC_INLINE void
+hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2])
+{
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint64_t h1 = seed;
+ uint64_t h2 = seed;
+
+ const uint64_t c1 = KQU(0x87c37b91114253d5);
+ const uint64_t c2 = KQU(0x4cf5ad432745937f);
+
+ /* body */
+ {
+ const uint64_t *blocks = (const uint64_t *) (data);
+ int i;
+
+ for (i = 0; i < nblocks; i++) {
+ uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
+ uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
+
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_64(h1, 27); h1 += h2;
+ h1 = h1*5 + 0x52dce729;
+
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ h2 = hash_rotl_64(h2, 31); h2 += h1;
+ h2 = h2*5 + 0x38495ab5;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
+ uint64_t k1 = 0;
+ uint64_t k2 = 0;
+
+ switch (len & 15) {
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
+ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
+ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = hash_fmix_64(h1);
+ h2 = hash_fmix_64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ r_out[0] = h1;
+ r_out[1] = h2;
+}
+
+/******************************************************************************/
+/* API. */
+JEMALLOC_INLINE void
+hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
+{
+
+ assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
+
+#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
+ hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
+#else
+ {
+ uint64_t hashes[2];
+ hash_x86_128(key, (int)len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
+ }
+#endif
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/huge.h b/memory/jemalloc/src/include/jemalloc/internal/huge.h
new file mode 100644
index 000000000..22184d9bb
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/huge.h
@@ -0,0 +1,35 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
+void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero);
+bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize_min, size_t usize_max, bool zero);
+void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache);
+#ifdef JEMALLOC_JET
+typedef void (huge_dalloc_junk_t)(void *, size_t);
+extern huge_dalloc_junk_t *huge_dalloc_junk;
+#endif
+void huge_dalloc(tsdn_t *tsdn, void *ptr);
+arena_t *huge_aalloc(const void *ptr);
+size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
+prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in
new file mode 100644
index 000000000..fdc8fef9d
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in
@@ -0,0 +1,1288 @@
+#ifndef JEMALLOC_INTERNAL_H
+#define JEMALLOC_INTERNAL_H
+
+#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# define JEMALLOC_N(n) jet_##n
+# include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc@install_suffix@.h"
+# undef JEMALLOC_NO_RENAME
+#else
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "../jemalloc@install_suffix@.h"
+#endif
+#include "jemalloc/internal/private_namespace.h"
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock =
+#ifdef JEMALLOC_LAZY_LOCK
+ true
+#else
+ false
+#endif
+ ;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_munmap =
+#ifdef JEMALLOC_MUNMAP
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_valgrind =
+#ifdef JEMALLOC_VALGRIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_ivsalloc =
+#ifdef JEMALLOC_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+
+#ifdef JEMALLOC_C11ATOMICS
+#include <stdatomic.h>
+#endif
+
+#ifdef JEMALLOC_ATOMIC9
+#include <machine/atomic.h>
+#endif
+
+#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <malloc/malloc.h>
+#endif
+
+#include "jemalloc/internal/ph.h"
+#ifndef __PGI
+#define RB_COMPACT
+#endif
+#include "jemalloc/internal/rb.h"
+#include "jemalloc/internal/qr.h"
+#include "jemalloc/internal/ql.h"
+
+/*
+ * jemalloc can conceptually be broken into components (arena, tcache, etc.),
+ * but there are circular dependencies that cannot be broken without
+ * substantial performance degradation. In order to reduce the effect on
+ * visual code flow, read the header files in multiple passes, with one of the
+ * following cpp variables defined during each pass:
+ *
+ * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
+ * types.
+ * JEMALLOC_H_STRUCTS : Data structures.
+ * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
+ * JEMALLOC_H_INLINES : Inline functions.
+ */
+/******************************************************************************/
+#define JEMALLOC_H_TYPES
+
+#include "jemalloc/internal/jemalloc_internal_macros.h"
+
+/* Page size index type. */
+typedef unsigned pszind_t;
+
+/* Size class index type. */
+typedef unsigned szind_t;
+
+/*
+ * Flags bits:
+ *
+ * a: arena
+ * t: tcache
+ * 0: unused
+ * z: zero
+ * n: alignment
+ *
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
+ */
+#define MALLOCX_ARENA_MASK ((int)~0xfffff)
+#define MALLOCX_ARENA_MAX 0xffe
+#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
+#define MALLOCX_TCACHE_MAX 0xffd
+#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
+/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
+#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
+ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
+#define MALLOCX_ALIGN_GET(flags) \
+ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
+#define MALLOCX_ZERO_GET(flags) \
+ ((bool)(flags & MALLOCX_ZERO))
+
+#define MALLOCX_TCACHE_GET(flags) \
+ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
+#define MALLOCX_ARENA_GET(flags) \
+ (((unsigned)(((unsigned)flags) >> 20)) - 1)
+
+/* Smallest size class to support. */
+#define TINY_MIN (1U << LG_TINY_MIN)
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#ifndef LG_QUANTUM
+# if (defined(__i386__) || defined(_M_IX86))
+# define LG_QUANTUM 4
+# endif
+# ifdef __ia64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __alpha__
+# define LG_QUANTUM 4
+# endif
+# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
+# define LG_QUANTUM 4
+# endif
+# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
+# define LG_QUANTUM 4
+# endif
+# ifdef __arm__
+# define LG_QUANTUM 3
+# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
+# ifdef __mips__
+# define LG_QUANTUM 3
+# endif
+# ifdef __or1k__
+# define LG_QUANTUM 3
+# endif
+# ifdef __powerpc__
+# define LG_QUANTUM 4
+# endif
+# ifdef __riscv__
+# define LG_QUANTUM 4
+# endif
+# ifdef __s390__
+# define LG_QUANTUM 4
+# endif
+# ifdef __SH4__
+# define LG_QUANTUM 4
+# endif
+# ifdef __tile__
+# define LG_QUANTUM 4
+# endif
+# ifdef __le32__
+# define LG_QUANTUM 4
+# endif
+# ifndef LG_QUANTUM
+# error "Unknown minimum alignment for architecture; specify via "
+ "--with-lg-quantum"
+# endif
+#endif
+
+#define QUANTUM ((size_t)(1U << LG_QUANTUM))
+#define QUANTUM_MASK (QUANTUM - 1)
+
+/* Return the smallest quantum multiple that is >= a. */
+#define QUANTUM_CEILING(a) \
+ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
+
+#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
+#define LONG_MASK (LONG - 1)
+
+/* Return the smallest long multiple that is >= a. */
+#define LONG_CEILING(a) \
+ (((a) + LONG_MASK) & ~LONG_MASK)
+
+#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
+#define PTR_MASK (SIZEOF_PTR - 1)
+
+/* Return the smallest (void *) multiple that is >= a. */
+#define PTR_CEILING(a) \
+ (((a) + PTR_MASK) & ~PTR_MASK)
+
+/*
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
+ * In addition, this controls the spacing of cacheline-spaced size classes.
+ *
+ * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
+ * only handle raw constants.
+ */
+#define LG_CACHELINE 6
+#define CACHELINE 64
+#define CACHELINE_MASK (CACHELINE - 1)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define CACHELINE_CEILING(s) \
+ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
+
+/* Page size. LG_PAGE is determined by the configure script. */
+#ifdef PAGE_MASK
+# undef PAGE_MASK
+#endif
+#define PAGE ((size_t)(1U << LG_PAGE))
+#define PAGE_MASK ((size_t)(PAGE - 1))
+
+/* Return the page base address for the page containing address a. */
+#define PAGE_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~PAGE_MASK))
+
+/* Return the smallest pagesize multiple that is >= s. */
+#define PAGE_CEILING(s) \
+ (((s) + PAGE_MASK) & ~PAGE_MASK)
+
+/* Return the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2BASE(a, alignment) \
+ ((void *)((uintptr_t)(a) & (-(alignment))))
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & (-(alignment)))
+
+/* Declare a variable-length array. */
+#if __STDC_VERSION__ < 199901L
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# ifdef JEMALLOC_HAS_ALLOCA_H
+# include <alloca.h>
+# else
+# include <stdlib.h>
+# endif
+# endif
+# define VARIABLE_ARRAY(type, name, count) \
+ type *name = alloca(sizeof(type) * (count))
+#else
+# define VARIABLE_ARRAY(type, name, count) type name[(count)]
+#endif
+
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/valgrind.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+
+#undef JEMALLOC_H_TYPES
+/******************************************************************************/
+#define JEMALLOC_H_STRUCTS
+
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/valgrind.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/bitmap.h"
+#define JEMALLOC_ARENA_STRUCTS_A
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_A
+#include "jemalloc/internal/extent.h"
+#define JEMALLOC_ARENA_STRUCTS_B
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_B
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+
+#include "jemalloc/internal/tsd.h"
+
+#undef JEMALLOC_H_STRUCTS
+/******************************************************************************/
+#define JEMALLOC_H_EXTERNS
+
+extern bool opt_abort;
+extern const char *opt_junk;
+extern bool opt_junk_alloc;
+extern bool opt_junk_free;
+extern size_t opt_quarantine;
+extern bool opt_redzone;
+extern bool opt_utrace;
+extern bool opt_xmalloc;
+extern bool opt_zero;
+extern unsigned opt_narenas;
+
+extern bool in_valgrind;
+
+/* Number of CPUs. */
+extern unsigned ncpus;
+
+/* Number of arenas used for automatic multiplexing of threads and arenas. */
+extern unsigned narenas_auto;
+
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+extern arena_t **arenas;
+
+/*
+ * pind2sz_tab encodes the same information as could be computed by
+ * pind2sz_compute().
+ */
+extern size_t const pind2sz_tab[NPSIZES];
+/*
+ * index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by index2size_compute().
+ */
+extern size_t const index2size_tab[NSIZES];
+/*
+ * size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes. In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via size2index().
+ */
+extern uint8_t const size2index_tab[];
+
+arena_t *a0get(void);
+void *a0malloc(size_t size);
+void a0dalloc(void *ptr);
+void *bootstrap_malloc(size_t size);
+void *bootstrap_calloc(size_t num, size_t size);
+void bootstrap_free(void *ptr);
+unsigned narenas_total_get(void);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
+arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+void thread_allocated_cleanup(tsd_t *tsd);
+void thread_deallocated_cleanup(tsd_t *tsd);
+void iarena_cleanup(tsd_t *tsd);
+void arena_cleanup(tsd_t *tsd);
+void arenas_tdata_cleanup(tsd_t *tsd);
+void narenas_tdata_cleanup(tsd_t *tsd);
+void arenas_tdata_bypass_cleanup(tsd_t *tsd);
+void jemalloc_prefork(void);
+void jemalloc_postfork_parent(void);
+void jemalloc_postfork_child(void);
+
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/valgrind.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+#include "jemalloc/internal/tsd.h"
+
+#undef JEMALLOC_H_EXTERNS
+/******************************************************************************/
+#define JEMALLOC_H_INLINES
+
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/valgrind.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+
+#ifndef JEMALLOC_ENABLE_INLINE
+pszind_t psz2ind(size_t psz);
+size_t pind2sz_compute(pszind_t pind);
+size_t pind2sz_lookup(pszind_t pind);
+size_t pind2sz(pszind_t pind);
+size_t psz2u(size_t psz);
+szind_t size2index_compute(size_t size);
+szind_t size2index_lookup(size_t size);
+szind_t size2index(size_t size);
+size_t index2size_compute(szind_t index);
+size_t index2size_lookup(szind_t index);
+size_t index2size(szind_t index);
+size_t s2u_compute(size_t size);
+size_t s2u_lookup(size_t size);
+size_t s2u(size_t size);
+size_t sa2u(size_t size, size_t alignment);
+arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
+arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
+arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
+ bool refresh_if_missing);
+arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
+ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_INLINE pszind_t
+psz2ind(size_t psz)
+{
+
+ if (unlikely(psz > HUGE_MAXCLASS))
+ return (NPSIZES);
+ {
+ pszind_t x = lg_floor((psz<<1)-1);
+ pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
+ (LG_SIZE_CLASS_GROUP + LG_PAGE);
+ pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZI(-1) << lg_delta;
+ pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ pszind_t ind = grp + mod;
+ return (ind);
+ }
+}
+
+JEMALLOC_INLINE size_t
+pind2sz_compute(pszind_t pind)
+{
+
+ {
+ size_t grp = pind >> LG_SIZE_CLASS_GROUP;
+ size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_PAGE +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_PAGE-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t sz = grp_size + mod_size;
+ return (sz);
+ }
+}
+
+JEMALLOC_INLINE size_t
+pind2sz_lookup(pszind_t pind)
+{
+ size_t ret = (size_t)pind2sz_tab[pind];
+ assert(ret == pind2sz_compute(pind));
+ return (ret);
+}
+
+JEMALLOC_INLINE size_t
+pind2sz(pszind_t pind)
+{
+
+ assert(pind < NPSIZES);
+ return (pind2sz_lookup(pind));
+}
+
+JEMALLOC_INLINE size_t
+psz2u(size_t psz)
+{
+
+ if (unlikely(psz > HUGE_MAXCLASS))
+ return (0);
+ {
+ size_t x = lg_floor((psz<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (psz + delta_mask) & ~delta_mask;
+ return (usize);
+ }
+}
+
+JEMALLOC_INLINE szind_t
+size2index_compute(size_t size)
+{
+
+ if (unlikely(size > HUGE_MAXCLASS))
+ return (NSIZES);
+#if (NTBINS != 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+ }
+#endif
+ {
+ szind_t x = lg_floor((size<<1)-1);
+ szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
+ szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZI(-1) << lg_delta;
+ szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ szind_t index = NTBINS + grp + mod;
+ return (index);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index_lookup(size_t size)
+{
+
+ assert(size <= LOOKUP_MAXCLASS);
+ {
+ szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
+ assert(ret == size2index_compute(size));
+ return (ret);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index(size_t size)
+{
+
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS))
+ return (size2index_lookup(size));
+ return (size2index_compute(size));
+}
+
+JEMALLOC_INLINE size_t
+index2size_compute(szind_t index)
+{
+
+#if (NTBINS > 0)
+ if (index < NTBINS)
+ return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+#endif
+ {
+ size_t reduced_index = index - NTBINS;
+ size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
+ size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
+ 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_QUANTUM-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t usize = grp_size + mod_size;
+ return (usize);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size_lookup(szind_t index)
+{
+ size_t ret = (size_t)index2size_tab[index];
+ assert(ret == index2size_compute(index));
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size(szind_t index)
+{
+
+ assert(index < NSIZES);
+ return (index2size_lookup(index));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_compute(size_t size)
+{
+
+ if (unlikely(size > HUGE_MAXCLASS))
+ return (0);
+#if (NTBINS > 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+ (ZU(1) << lg_ceil));
+ }
+#endif
+ {
+ size_t x = lg_floor((size<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (size + delta_mask) & ~delta_mask;
+ return (usize);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_lookup(size_t size)
+{
+ size_t ret = index2size_lookup(size2index_lookup(size));
+
+ assert(ret == s2u_compute(size));
+ return (ret);
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+s2u(size_t size)
+{
+
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS))
+ return (s2u_lookup(size));
+ return (s2u_compute(size));
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size and alignment.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sa2u(size_t size, size_t alignment)
+{
+ size_t usize;
+
+ assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
+ /* Try for a small size class. */
+ if (size <= SMALL_MAXCLASS && alignment < PAGE) {
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each
+ * small size class, every object is aligned at the smallest
+ * power of two that is non-zero in the base two representation
+ * of the size. For example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ */
+ usize = s2u(ALIGNMENT_CEILING(size, alignment));
+ if (usize < LARGE_MINCLASS)
+ return (usize);
+ }
+
+ /* Try for a large size class. */
+ if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
+ /*
+ * We can't achieve subpage alignment, so round up alignment
+ * to the minimum that can actually be supported.
+ */
+ alignment = PAGE_CEILING(alignment);
+
+ /* Make sure result is a large size class. */
+ usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
+
+ /*
+ * Calculate the size of the over-size run that arena_palloc()
+ * would need to allocate in order to guarantee the alignment.
+ */
+ if (usize + large_pad + alignment - PAGE <= arena_maxrun)
+ return (usize);
+ }
+
+ /* Huge size class. Beware of overflow. */
+
+ if (unlikely(alignment > HUGE_MAXCLASS))
+ return (0);
+
+ /*
+ * We can't achieve subchunk alignment, so round up alignment to the
+ * minimum that can actually be supported.
+ */
+ alignment = CHUNK_CEILING(alignment);
+
+ /* Make sure result is a huge size class. */
+ if (size <= chunksize)
+ usize = chunksize;
+ else {
+ usize = s2u(size);
+ if (usize < size) {
+ /* size_t overflow. */
+ return (0);
+ }
+ }
+
+ /*
+ * Calculate the multi-chunk mapping that huge_palloc() would need in
+ * order to guarantee the alignment.
+ */
+ if (usize + alignment - PAGE < usize) {
+ /* size_t overflow. */
+ return (0);
+ }
+ return (usize);
+}
+
+/* Choose an arena based on a per-thread value. */
+JEMALLOC_INLINE arena_t *
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
+{
+ arena_t *ret;
+
+ if (arena != NULL)
+ return (arena);
+
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
+ if (unlikely(ret == NULL))
+ ret = arena_choose_hard(tsd, internal);
+
+ return (ret);
+}
+
+JEMALLOC_INLINE arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena)
+{
+
+ return (arena_choose_impl(tsd, arena, false));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_ichoose(tsd_t *tsd, arena_t *arena)
+{
+
+ return (arena_choose_impl(tsd, arena, true));
+}
+
+JEMALLOC_INLINE arena_tdata_t *
+arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
+{
+ arena_tdata_t *tdata;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+
+ if (unlikely(arenas_tdata == NULL)) {
+ /* arenas_tdata hasn't been initialized yet. */
+ return (arena_tdata_get_hard(tsd, ind));
+ }
+ if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+ /*
+ * ind is invalid, cache is old (too small), or tdata to be
+ * initialized.
+ */
+ return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
+ NULL);
+ }
+
+ tdata = &arenas_tdata[ind];
+ if (likely(tdata != NULL) || !refresh_if_missing)
+ return (tdata);
+ return (arena_tdata_get_hard(tsd, ind));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
+{
+ arena_t *ret;
+
+ assert(ind <= MALLOCX_ARENA_MAX);
+
+ ret = arenas[ind];
+ if (unlikely(ret == NULL)) {
+ ret = atomic_read_p((void *)&arenas[ind]);
+ if (init_if_missing && unlikely(ret == NULL))
+ ret = arena_init(tsdn, ind);
+ }
+ return (ret);
+}
+
+JEMALLOC_INLINE ticker_t *
+decay_ticker_get(tsd_t *tsd, unsigned ind)
+{
+ arena_tdata_t *tdata;
+
+ tdata = arena_tdata_get(tsd, ind, true);
+ if (unlikely(tdata == NULL))
+ return (NULL);
+ return (&tdata->decay_ticker);
+}
+#endif
+
+#include "jemalloc/internal/bitmap.h"
+/*
+ * Include portions of arena.h interleaved with tcache.h in order to resolve
+ * circular dependencies.
+ */
+#define JEMALLOC_ARENA_INLINE_A
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_INLINE_A
+#include "jemalloc/internal/tcache.h"
+#define JEMALLOC_ARENA_INLINE_B
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_INLINE_B
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+
+#ifndef JEMALLOC_ENABLE_INLINE
+arena_t *iaalloc(const void *ptr);
+size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
+ tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
+void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+ bool slow_path);
+void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_metadata, arena_t *arena);
+void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena);
+void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
+size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+size_t u2rz(size_t usize);
+size_t p2rz(tsdn_t *tsdn, const void *ptr);
+void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
+ bool slow_path);
+void idalloc(tsd_t *tsd, void *ptr);
+void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
+void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
+void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache,
+ arena_t *arena);
+void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
+void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero);
+bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_ALWAYS_INLINE arena_t *
+iaalloc(const void *ptr)
+{
+
+ assert(ptr != NULL);
+
+ return (arena_aalloc(ptr));
+}
+
+/*
+ * Typical usage:
+ * tsdn_t *tsdn = [...]
+ * void *ptr = [...]
+ * size_t sz = isalloc(tsdn, ptr, config_prof);
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+{
+
+ assert(ptr != NULL);
+ /* Demotion only makes sense if config_prof is true. */
+ assert(config_prof || !demote);
+
+ return (arena_salloc(tsdn, ptr, demote));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+ bool is_metadata, arena_t *arena, bool slow_path)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
+
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
+ if (config_stats && is_metadata && likely(ret != NULL)) {
+ arena_metadata_allocated_add(iaalloc(ret),
+ isalloc(tsdn, ret, config_prof));
+ }
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
+{
+
+ return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
+ false, NULL, slow_path));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_metadata, arena_t *arena)
+{
+ void *ret;
+
+ assert(usize != 0);
+ assert(usize == sa2u(usize, alignment));
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
+
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
+ assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+ if (config_stats && is_metadata && likely(ret != NULL)) {
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
+ config_prof));
+ }
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
+{
+
+ return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
+{
+
+ return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
+ tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+{
+ extent_node_t *node;
+
+ /* Return 0 if ptr is not within a chunk managed by jemalloc. */
+ node = chunk_lookup(ptr, false);
+ if (node == NULL)
+ return (0);
+ /* Only arena chunks should be looked up via interior pointers. */
+ assert(extent_node_addr_get(node) == ptr ||
+ extent_node_achunk_get(node));
+
+ return (isalloc(tsdn, ptr, demote));
+}
+
+JEMALLOC_INLINE size_t
+u2rz(size_t usize)
+{
+ size_t ret;
+
+ if (usize <= SMALL_MAXCLASS) {
+ szind_t binind = size2index(usize);
+ ret = arena_bin_info[binind].redzone_size;
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+JEMALLOC_INLINE size_t
+p2rz(tsdn_t *tsdn, const void *ptr)
+{
+ size_t usize = isalloc(tsdn, ptr, false);
+
+ return (u2rz(usize));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
+ bool slow_path)
+{
+
+ assert(ptr != NULL);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
+ if (config_stats && is_metadata) {
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
+ config_prof));
+ }
+
+ arena_dalloc(tsdn, ptr, tcache, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloc(tsd_t *tsd, void *ptr)
+{
+
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+{
+
+ if (slow_path && config_fill && unlikely(opt_quarantine))
+ quarantine(tsd, ptr);
+ else
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
+{
+
+ arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
+{
+
+ if (slow_path && config_fill && unlikely(opt_quarantine))
+ quarantine(tsd, ptr);
+ else
+ isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
+{
+ void *p;
+ size_t usize, copysize;
+
+ usize = sa2u(size + extra, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return (NULL);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
+ if (p == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, without extra this time. */
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return (NULL);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
+ arena);
+ if (p == NULL)
+ return (NULL);
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache, arena_t *arena)
+{
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /*
+ * Existing object alignment is inadequate; allocate new space
+ * and copy.
+ */
+ return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
+ zero, tcache, arena));
+ }
+
+ return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
+ tcache));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero)
+{
+
+ return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
+ tcache_get(tsd, true), NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero)
+{
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return (true);
+ }
+
+ return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
+}
+#endif
+
+#include "jemalloc/internal/prof.h"
+
+#undef JEMALLOC_H_INLINES
+/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_H */
diff --git a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h
new file mode 100644
index 000000000..c907d9109
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -0,0 +1,75 @@
+#ifndef JEMALLOC_INTERNAL_DECLS_H
+#define JEMALLOC_INTERNAL_DECLS_H
+
+#include <math.h>
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+
+#else
+# include <sys/param.h>
+# include <sys/mman.h>
+# if !defined(__pnacl__) && !defined(__native_client__)
+# include <sys/syscall.h>
+# if !defined(SYS_write) && defined(__NR_write)
+# define SYS_write __NR_write
+# endif
+# include <sys/uio.h>
+# endif
+# include <pthread.h>
+# ifdef JEMALLOC_OS_UNFAIR_LOCK
+# include <os/lock.h>
+# endif
+# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
+# include <sched.h>
+# endif
+# include <errno.h>
+# include <sys/time.h>
+# include <time.h>
+# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+# include <mach/mach_time.h>
+# endif
+#endif
+#include <sys/types.h>
+
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#ifndef offsetof
+# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
+#endif
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#ifdef _MSC_VER
+# include <io.h>
+typedef intptr_t ssize_t;
+# define PATH_MAX 1024
+# define STDERR_FILENO 2
+# define __func__ __FUNCTION__
+# ifdef JEMALLOC_HAS_RESTRICT
+# define restrict __restrict
+# endif
+/* Disable warnings about deprecated system functions. */
+# pragma warning(disable: 4996)
+#if _MSC_VER < 1800
+static int
+isblank(int c)
+{
+
+ return (c == '\t' || c == ' ');
+}
+#endif
+#else
+# include <unistd.h>
+#endif
+#include <fcntl.h>
+
+#endif /* JEMALLOC_INTERNAL_H */
diff --git a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in
new file mode 100644
index 000000000..9b3dca504
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -0,0 +1,307 @@
+#ifndef JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
+/*
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
+ * public APIs to be prefixed. This makes it possible, with some care, to use
+ * multiple allocators simultaneously.
+ */
+#undef JEMALLOC_PREFIX
+#undef JEMALLOC_CPREFIX
+
+/*
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
+ * from being exported, but for static libraries, naming collisions are a real
+ * possibility.
+ */
+#undef JEMALLOC_PRIVATE_NAMESPACE
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#undef CPU_SPINWAIT
+
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11ATOMICS
+
+/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
+#undef JEMALLOC_ATOMIC9
+
+/*
+ * Defined if OSAtomic*() functions are available, as provided by Darwin, and
+ * documented in the atomic(3) manual page.
+ */
+#undef JEMALLOC_OSATOMIC
+
+/*
+ * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
+ * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines).
+ */
+#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
+
+/*
+ * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
+ * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines).
+ */
+#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
+
+/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#undef JEMALLOC_HAVE_BUILTIN_CLZ
+
+/*
+ * Defined if madvise(2) is available.
+ */
+#undef JEMALLOC_HAVE_MADVISE
+
+/*
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ */
+#undef JEMALLOC_OS_UNFAIR_LOCK
+
+/*
+ * Defined if OSSpin*() functions are available, as provided by Darwin, and
+ * documented in the spinlock(3) manual page.
+ */
+#undef JEMALLOC_OSSPIN
+
+/* Defined if syscall(2) is available. */
+#undef JEMALLOC_HAVE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#undef JEMALLOC_HAVE_ISSETUGID
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
+
+/*
+ * Defined if mach_absolute_time() is available.
+ */
+#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+
+/*
+ * Defined if _malloc_thread_cleanup() exists. At least in the case of
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
+ * bootstrapping will cause recursion into the pthreads library. Therefore, if
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
+ * malloc_tsd.
+ */
+#undef JEMALLOC_MALLOC_THREAD_CLEANUP
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/*
+ * Defined if the pthreads implementation defines
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
+ * to avoid recursive allocation during mutex initialization.
+ */
+#undef JEMALLOC_MUTEX_INIT_CB
+
+/* Non-empty if the tls_model attribute is supported. */
+#undef JEMALLOC_TLS_MODEL
+
+/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
+#undef JEMALLOC_CC_SILENCE
+
+/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
+#undef JEMALLOC_CODE_COVERAGE
+
+/*
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+#undef JEMALLOC_DEBUG
+
+/* JEMALLOC_STATS enables statistics calculation. */
+#undef JEMALLOC_STATS
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/* Use libgcc for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBGCC
+
+/* Use gcc intrinsics for profile backtracing if defined. */
+#undef JEMALLOC_PROF_GCC
+
+/*
+ * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
+ * This makes it possible to allocate/deallocate objects without any locking
+ * when the cache is in the steady state.
+ */
+#undef JEMALLOC_TCACHE
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/* Support memory filling (junk/zero/quarantine/redzone). */
+#undef JEMALLOC_FILL
+
+/* Support utrace(2)-based tracing. */
+#undef JEMALLOC_UTRACE
+
+/* Support Valgrind. */
+#undef JEMALLOC_VALGRIND
+
+/* Support optional abort() on OOM. */
+#undef JEMALLOC_XMALLOC
+
+/* Support lazy locking (avoid locking unless a second thread is launched). */
+#undef JEMALLOC_LAZY_LOCK
+
+/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
+#undef LG_TINY_MIN
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#undef LG_QUANTUM
+
+/* One page is 2^LG_PAGE bytes. */
+#undef LG_PAGE
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
+ * If defined, use munmap() to unmap freed chunks, rather than storing them for
+ * later reuse. This is disabled by default on Linux because common sequences
+ * of mmap()/munmap() calls will cause virtual memory map holes.
+ */
+#undef JEMALLOC_MUNMAP
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
+ * Don't use this directly; instead use unreachable() from util.h
+ */
+#undef JEMALLOC_INTERNAL_UNREACHABLE
+
+/*
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
+ * use ffs_*() from util.h.
+ */
+#undef JEMALLOC_INTERNAL_FFSLL
+#undef JEMALLOC_INTERNAL_FFSL
+#undef JEMALLOC_INTERNAL_FFS
+
+/*
+ * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
+ * within jemalloc-owned chunks before dereferencing them.
+ */
+#undef JEMALLOC_IVSALLOC
+
+/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#undef JEMALLOC_CACHE_OBLIVIOUS
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+#undef JEMALLOC_ZONE
+#undef JEMALLOC_ZONE_VERSION
+
+/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched.
+ * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
+ * unused, such that they will be discarded rather
+ * than swapped out.
+ */
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_MADVISE_FREE
+
+/* Define if operating system has alloca.h header. */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* C99 restrict keyword supported. */
+#undef JEMALLOC_HAS_RESTRICT
+
+/* For use by hash code. */
+#undef JEMALLOC_BIG_ENDIAN
+
+/* sizeof(int) == 2^LG_SIZEOF_INT. */
+#undef LG_SIZEOF_INT
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#undef LG_SIZEOF_LONG
+
+/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
+#undef LG_SIZEOF_LONG_LONG
+
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
+#undef LG_SIZEOF_INTMAX_T
+
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+#undef JEMALLOC_EXPORT
+
+/* config.malloc_conf options string. */
+#undef JEMALLOC_CONFIG_MALLOC_CONF
+
+#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_macros.h b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_macros.h
new file mode 100644
index 000000000..a08ba772e
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -0,0 +1,57 @@
+/*
+ * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
+ * functions that are static inline functions if inlining is enabled, and
+ * single-definition library-private functions if inlining is disabled.
+ *
+ * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
+ * which case the denoted functions are always static, regardless of whether
+ * inlining is enabled.
+ */
+#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
+ /* Disable inlining to make debugging/profiling easier. */
+# define JEMALLOC_ALWAYS_INLINE
+# define JEMALLOC_ALWAYS_INLINE_C static
+# define JEMALLOC_INLINE
+# define JEMALLOC_INLINE_C static
+# define inline
+#else
+# define JEMALLOC_ENABLE_INLINE
+# ifdef JEMALLOC_HAVE_ATTR
+# define JEMALLOC_ALWAYS_INLINE \
+ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
+# define JEMALLOC_ALWAYS_INLINE_C \
+ static inline JEMALLOC_ATTR(always_inline)
+# else
+# define JEMALLOC_ALWAYS_INLINE static inline
+# define JEMALLOC_ALWAYS_INLINE_C static inline
+# endif
+# define JEMALLOC_INLINE static inline
+# define JEMALLOC_INLINE_C static inline
+# ifdef _MSC_VER
+# define inline _inline
+# endif
+#endif
+
+#ifdef JEMALLOC_CC_SILENCE
+# define UNUSED JEMALLOC_ATTR(unused)
+#else
+# define UNUSED
+#endif
+
+#define ZU(z) ((size_t)z)
+#define ZI(z) ((ssize_t)z)
+#define QU(q) ((uint64_t)q)
+#define QI(q) ((int64_t)q)
+
+#define KZU(z) ZU(z##ULL)
+#define KZI(z) ZI(z##LL)
+#define KQU(q) QU(q##ULL)
+#define KQI(q) QI(q##LL)
+
+#ifndef __DECONST
+# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#endif
+
+#ifndef JEMALLOC_HAS_RESTRICT
+# define restrict
+#endif
diff --git a/memory/jemalloc/src/include/jemalloc/internal/mb.h b/memory/jemalloc/src/include/jemalloc/internal/mb.h
new file mode 100644
index 000000000..5384728fd
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/mb.h
@@ -0,0 +1,115 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void mb_write(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
+#ifdef __i386__
+/*
+ * According to the Intel Architecture Software Developer's Manual, current
+ * processors execute instructions in order from the perspective of other
+ * processors in a multiprocessor system, but 1) Intel reserves the right to
+ * change that, and 2) the compiler's optimizer could re-order instructions if
+ * there weren't some form of barrier. Therefore, even if running on an
+ * architecture that does not need memory barriers (everything through at least
+ * i686), an "optimizer barrier" is necessary.
+ */
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+# if 0
+ /* This is a true memory barrier. */
+ asm volatile ("pusha;"
+ "xor %%eax,%%eax;"
+ "cpuid;"
+ "popa;"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+# else
+ /*
+ * This is hopefully enough to keep the compiler from reordering
+ * instructions around this one.
+ */
+ asm volatile ("nop;"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+# endif
+}
+#elif (defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+ asm volatile ("sfence"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+#elif defined(__powerpc__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+ asm volatile ("eieio"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+#elif defined(__sparc64__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+ asm volatile ("membar #StoreStore"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+#elif defined(__tile__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+ __sync_synchronize();
+}
+#else
+/*
+ * This is much slower than a simple memory barrier, but the semantics of mutex
+ * unlock make this work.
+ */
+JEMALLOC_INLINE void
+mb_write(void)
+{
+ malloc_mutex_t mtx;
+
+ malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
+ malloc_mutex_lock(TSDN_NULL, &mtx);
+ malloc_mutex_unlock(TSDN_NULL, &mtx);
+}
+#endif
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/mutex.h b/memory/jemalloc/src/include/jemalloc/internal/mutex.h
new file mode 100644
index 000000000..b442d2d4e
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/mutex.h
@@ -0,0 +1,147 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct malloc_mutex_s malloc_mutex_t;
+
+#ifdef _WIN32
+# define MALLOC_MUTEX_INITIALIZER
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+# define MALLOC_MUTEX_INITIALIZER \
+ {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+#elif (defined(JEMALLOC_OSSPIN))
+# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+#else
+# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
+ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
+# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
+ WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# else
+# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# endif
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct malloc_mutex_s {
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ SRWLOCK lock;
+# else
+ CRITICAL_SECTION lock;
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock lock;
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLock lock;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+ pthread_mutex_t lock;
+ malloc_mutex_t *postponed_next;
+#else
+ pthread_mutex_t lock;
+#endif
+ witness_t witness;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#ifdef JEMALLOC_LAZY_LOCK
+extern bool isthreaded;
+#else
+# undef isthreaded /* Undo private_namespace.h definition. */
+# define isthreaded true
+#endif
+
+bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank);
+void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
+JEMALLOC_INLINE void
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded) {
+ witness_assert_not_owner(tsdn, &mutex->witness);
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ AcquireSRWLockExclusive(&mutex->lock);
+# else
+ EnterCriticalSection(&mutex->lock);
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_lock(&mutex->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockLock(&mutex->lock);
+#else
+ pthread_mutex_lock(&mutex->lock);
+#endif
+ witness_lock(tsdn, &mutex->witness);
+ }
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded) {
+ witness_unlock(tsdn, &mutex->witness);
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ ReleaseSRWLockExclusive(&mutex->lock);
+# else
+ LeaveCriticalSection(&mutex->lock);
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_unlock(&mutex->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockUnlock(&mutex->lock);
+#else
+ pthread_mutex_unlock(&mutex->lock);
+#endif
+ }
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_owner(tsdn, &mutex->witness);
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_not_owner(tsdn, &mutex->witness);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/nstime.h b/memory/jemalloc/src/include/jemalloc/internal/nstime.h
new file mode 100644
index 000000000..93b27dc80
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/nstime.h
@@ -0,0 +1,48 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct nstime_s nstime_t;
+
+/* Maximum supported number of seconds (~584 years). */
+#define NSTIME_SEC_MAX KQU(18446744072)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct nstime_s {
+ uint64_t ns;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void nstime_init(nstime_t *time, uint64_t ns);
+void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
+uint64_t nstime_ns(const nstime_t *time);
+uint64_t nstime_sec(const nstime_t *time);
+uint64_t nstime_nsec(const nstime_t *time);
+void nstime_copy(nstime_t *time, const nstime_t *source);
+int nstime_compare(const nstime_t *a, const nstime_t *b);
+void nstime_add(nstime_t *time, const nstime_t *addend);
+void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
+void nstime_imultiply(nstime_t *time, uint64_t multiplier);
+void nstime_idivide(nstime_t *time, uint64_t divisor);
+uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
+#ifdef JEMALLOC_JET
+typedef bool (nstime_monotonic_t)(void);
+extern nstime_monotonic_t *nstime_monotonic;
+typedef bool (nstime_update_t)(nstime_t *);
+extern nstime_update_t *nstime_update;
+#else
+bool nstime_monotonic(void);
+bool nstime_update(nstime_t *time);
+#endif
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/pages.h b/memory/jemalloc/src/include/jemalloc/internal/pages.h
new file mode 100644
index 000000000..e21effd14
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/pages.h
@@ -0,0 +1,27 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *pages_map(void *addr, size_t size, bool *commit);
+void pages_unmap(void *addr, size_t size);
+void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
+ size_t size, bool *commit);
+bool pages_commit(void *addr, size_t size);
+bool pages_decommit(void *addr, size_t size);
+bool pages_purge(void *addr, size_t size);
+void pages_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/ph.h b/memory/jemalloc/src/include/jemalloc/internal/ph.h
new file mode 100644
index 000000000..4f91c333f
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/ph.h
@@ -0,0 +1,345 @@
+/*
+ * A Pairing Heap implementation.
+ *
+ * "The Pairing Heap: A New Form of Self-Adjusting Heap"
+ * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
+ *
+ * With auxiliary twopass list, described in a follow on paper.
+ *
+ * "Pairing Heaps: Experiments and Analysis"
+ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
+ *
+ *******************************************************************************
+ */
+
+#ifndef PH_H_
+#define PH_H_
+
+/* Node structure. */
+#define phn(a_type) \
+struct { \
+ a_type *phn_prev; \
+ a_type *phn_next; \
+ a_type *phn_lchild; \
+}
+
+/* Root structure. */
+#define ph(a_type) \
+struct { \
+ a_type *ph_root; \
+}
+
+/* Internal utility macros. */
+#define phn_lchild_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_lchild)
+#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
+ a_phn->a_field.phn_lchild = a_lchild; \
+} while (0)
+
+#define phn_next_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_next)
+#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
+ a_phn->a_field.phn_prev = a_prev; \
+} while (0)
+
+#define phn_prev_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_prev)
+#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
+ a_phn->a_field.phn_next = a_next; \
+} while (0)
+
+#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
+ a_type *phn0child; \
+ \
+ assert(a_phn0 != NULL); \
+ assert(a_phn1 != NULL); \
+ assert(a_cmp(a_phn0, a_phn1) <= 0); \
+ \
+ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
+ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
+ phn_next_set(a_type, a_field, a_phn1, phn0child); \
+ if (phn0child != NULL) \
+ phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
+} while (0)
+
+#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
+ if (a_phn0 == NULL) \
+ r_phn = a_phn1; \
+ else if (a_phn1 == NULL) \
+ r_phn = a_phn0; \
+ else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
+ a_cmp); \
+ r_phn = a_phn0; \
+ } else { \
+ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
+ a_cmp); \
+ r_phn = a_phn1; \
+ } \
+} while (0)
+
+#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *head = NULL; \
+ a_type *tail = NULL; \
+ a_type *phn0 = a_phn; \
+ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
+ \
+ /* \
+ * Multipass merge, wherein the first two elements of a FIFO \
+ * are repeatedly merged, and each result is appended to the \
+ * singly linked FIFO, until the FIFO contains only a single \
+ * element. We start with a sibling list but no reference to \
+ * its tail, so we do a single pass over the sibling list to \
+ * populate the FIFO. \
+ */ \
+ if (phn1 != NULL) { \
+ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
+ if (phnrest != NULL) \
+ phn_prev_set(a_type, a_field, phnrest, NULL); \
+ phn_prev_set(a_type, a_field, phn0, NULL); \
+ phn_next_set(a_type, a_field, phn0, NULL); \
+ phn_prev_set(a_type, a_field, phn1, NULL); \
+ phn_next_set(a_type, a_field, phn1, NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
+ head = tail = phn0; \
+ phn0 = phnrest; \
+ while (phn0 != NULL) { \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ phnrest = phn_next_get(a_type, a_field, \
+ phn1); \
+ if (phnrest != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phnrest, NULL); \
+ } \
+ phn_prev_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_prev_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = phnrest; \
+ } else { \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = NULL; \
+ } \
+ } \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ while (true) { \
+ head = phn_next_get(a_type, a_field, \
+ phn1); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn0) == NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn1) == NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ if (head == NULL) \
+ break; \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, \
+ phn0); \
+ } \
+ } \
+ } \
+ r_phn = phn0; \
+} while (0)
+
+#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
+ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
+ if (phn != NULL) { \
+ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_prev_set(a_type, a_field, phn, NULL); \
+ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
+ assert(phn_next_get(a_type, a_field, phn) == NULL); \
+ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
+ a_ph->ph_root); \
+ } \
+} while (0)
+
+#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
+ if (lchild == NULL) \
+ r_phn = NULL; \
+ else { \
+ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
+ r_phn); \
+ } \
+} while (0)
+
+/*
+ * The ph_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to ph_gen().
+ */
+#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
+a_attr void a_prefix##new(a_ph_type *ph); \
+a_attr bool a_prefix##empty(a_ph_type *ph); \
+a_attr a_type *a_prefix##first(a_ph_type *ph); \
+a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
+a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
+a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
+
+/*
+ * The ph_gen() macro generates a type-specific pairing heap implementation,
+ * based on the above cpp macros.
+ */
+#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_ph_type *ph) \
+{ \
+ \
+ memset(ph, 0, sizeof(ph(a_type))); \
+} \
+a_attr bool \
+a_prefix##empty(a_ph_type *ph) \
+{ \
+ \
+ return (ph->ph_root == NULL); \
+} \
+a_attr a_type * \
+a_prefix##first(a_ph_type *ph) \
+{ \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ return (ph->ph_root); \
+} \
+a_attr void \
+a_prefix##insert(a_ph_type *ph, a_type *phn) \
+{ \
+ \
+ memset(&phn->a_field, 0, sizeof(phn(a_type))); \
+ \
+ /* \
+ * Treat the root as an aux list during insertion, and lazily \
+ * merge during a_prefix##remove_first(). For elements that \
+ * are inserted, then removed via a_prefix##remove() before the \
+ * aux list is ever processed, this makes insert/remove \
+ * constant-time, whereas eager merging would make insert \
+ * O(log n). \
+ */ \
+ if (ph->ph_root == NULL) \
+ ph->ph_root = phn; \
+ else { \
+ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
+ a_field, ph->ph_root)); \
+ if (phn_next_get(a_type, a_field, ph->ph_root) != \
+ NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, ph->ph_root), \
+ phn); \
+ } \
+ phn_prev_set(a_type, a_field, phn, ph->ph_root); \
+ phn_next_set(a_type, a_field, ph->ph_root, phn); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##remove_first(a_ph_type *ph) \
+{ \
+ a_type *ret; \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ \
+ ret = ph->ph_root; \
+ \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##remove(a_ph_type *ph, a_type *phn) \
+{ \
+ a_type *replace, *parent; \
+ \
+ /* \
+ * We can delete from aux list without merging it, but we need \
+ * to merge if we are dealing with the root node. \
+ */ \
+ if (ph->ph_root == phn) { \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ if (ph->ph_root == phn) { \
+ ph_merge_children(a_type, a_field, ph->ph_root, \
+ a_cmp, ph->ph_root); \
+ return; \
+ } \
+ } \
+ \
+ /* Get parent (if phn is leftmost child) before mutating. */ \
+ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) \
+ parent = NULL; \
+ } \
+ /* Find a possible replacement node, and link to parent. */ \
+ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
+ /* Set next/prev for sibling linked list. */ \
+ if (replace != NULL) { \
+ if (parent != NULL) { \
+ phn_prev_set(a_type, a_field, replace, parent); \
+ phn_lchild_set(a_type, a_field, parent, \
+ replace); \
+ } else { \
+ phn_prev_set(a_type, a_field, replace, \
+ phn_prev_get(a_type, a_field, phn)); \
+ if (phn_prev_get(a_type, a_field, phn) != \
+ NULL) { \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } \
+ phn_next_set(a_type, a_field, replace, \
+ phn_next_get(a_type, a_field, phn)); \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } else { \
+ if (parent != NULL) { \
+ a_type *next = phn_next_get(a_type, a_field, \
+ phn); \
+ phn_lchild_set(a_type, a_field, parent, next); \
+ if (next != NULL) { \
+ phn_prev_set(a_type, a_field, next, \
+ parent); \
+ } \
+ } else { \
+ assert(phn_prev_get(a_type, a_field, phn) != \
+ NULL); \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ phn_next_get(a_type, a_field, phn)); \
+ } \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ phn_prev_get(a_type, a_field, phn)); \
+ } \
+ } \
+}
+
+#endif /* PH_H_ */
diff --git a/memory/jemalloc/src/include/jemalloc/internal/private_namespace.sh b/memory/jemalloc/src/include/jemalloc/internal/private_namespace.sh
new file mode 100755
index 000000000..cd25eb306
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/private_namespace.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+for symbol in `cat $1` ; do
+ echo "#define ${symbol} JEMALLOC_N(${symbol})"
+done
diff --git a/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt b/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt
new file mode 100644
index 000000000..87c8c9b71
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt
@@ -0,0 +1,626 @@
+a0dalloc
+a0get
+a0malloc
+arena_aalloc
+arena_alloc_junk_small
+arena_basic_stats_merge
+arena_bin_index
+arena_bin_info
+arena_bitselm_get_const
+arena_bitselm_get_mutable
+arena_boot
+arena_choose
+arena_choose_hard
+arena_choose_impl
+arena_chunk_alloc_huge
+arena_chunk_cache_maybe_insert
+arena_chunk_cache_maybe_remove
+arena_chunk_dalloc_huge
+arena_chunk_ralloc_huge_expand
+arena_chunk_ralloc_huge_shrink
+arena_chunk_ralloc_huge_similar
+arena_cleanup
+arena_dalloc
+arena_dalloc_bin
+arena_dalloc_bin_junked_locked
+arena_dalloc_junk_large
+arena_dalloc_junk_small
+arena_dalloc_large
+arena_dalloc_large_junked_locked
+arena_dalloc_small
+arena_decay_tick
+arena_decay_ticks
+arena_decay_time_default_get
+arena_decay_time_default_set
+arena_decay_time_get
+arena_decay_time_set
+arena_dss_prec_get
+arena_dss_prec_set
+arena_get
+arena_ichoose
+arena_init
+arena_lg_dirty_mult_default_get
+arena_lg_dirty_mult_default_set
+arena_lg_dirty_mult_get
+arena_lg_dirty_mult_set
+arena_malloc
+arena_malloc_hard
+arena_malloc_large
+arena_mapbits_allocated_get
+arena_mapbits_binind_get
+arena_mapbits_decommitted_get
+arena_mapbits_dirty_get
+arena_mapbits_get
+arena_mapbits_internal_set
+arena_mapbits_large_binind_set
+arena_mapbits_large_get
+arena_mapbits_large_set
+arena_mapbits_large_size_get
+arena_mapbits_size_decode
+arena_mapbits_size_encode
+arena_mapbits_small_runind_get
+arena_mapbits_small_set
+arena_mapbits_unallocated_set
+arena_mapbits_unallocated_size_get
+arena_mapbits_unallocated_size_set
+arena_mapbits_unzeroed_get
+arena_mapbitsp_get_const
+arena_mapbitsp_get_mutable
+arena_mapbitsp_read
+arena_mapbitsp_write
+arena_maxrun
+arena_maybe_purge
+arena_metadata_allocated_add
+arena_metadata_allocated_get
+arena_metadata_allocated_sub
+arena_migrate
+arena_miscelm_get_const
+arena_miscelm_get_mutable
+arena_miscelm_to_pageind
+arena_miscelm_to_rpages
+arena_new
+arena_node_alloc
+arena_node_dalloc
+arena_nthreads_dec
+arena_nthreads_get
+arena_nthreads_inc
+arena_palloc
+arena_postfork_child
+arena_postfork_parent
+arena_prefork0
+arena_prefork1
+arena_prefork2
+arena_prefork3
+arena_prof_accum
+arena_prof_accum_impl
+arena_prof_accum_locked
+arena_prof_promoted
+arena_prof_tctx_get
+arena_prof_tctx_reset
+arena_prof_tctx_set
+arena_ptr_small_binind_get
+arena_purge
+arena_quarantine_junk_small
+arena_ralloc
+arena_ralloc_junk_large
+arena_ralloc_no_move
+arena_rd_to_miscelm
+arena_redzone_corruption
+arena_reset
+arena_run_regind
+arena_run_to_miscelm
+arena_salloc
+arena_sdalloc
+arena_stats_merge
+arena_tcache_fill_small
+arena_tdata_get
+arena_tdata_get_hard
+arenas
+arenas_tdata_bypass_cleanup
+arenas_tdata_cleanup
+atomic_add_p
+atomic_add_u
+atomic_add_uint32
+atomic_add_uint64
+atomic_add_z
+atomic_cas_p
+atomic_cas_u
+atomic_cas_uint32
+atomic_cas_uint64
+atomic_cas_z
+atomic_sub_p
+atomic_sub_u
+atomic_sub_uint32
+atomic_sub_uint64
+atomic_sub_z
+atomic_write_p
+atomic_write_u
+atomic_write_uint32
+atomic_write_uint64
+atomic_write_z
+base_alloc
+base_boot
+base_postfork_child
+base_postfork_parent
+base_prefork
+base_stats_get
+bitmap_full
+bitmap_get
+bitmap_info_init
+bitmap_init
+bitmap_set
+bitmap_sfu
+bitmap_size
+bitmap_unset
+bootstrap_calloc
+bootstrap_free
+bootstrap_malloc
+bt_init
+buferror
+chunk_alloc_base
+chunk_alloc_cache
+chunk_alloc_dss
+chunk_alloc_mmap
+chunk_alloc_wrapper
+chunk_boot
+chunk_dalloc_cache
+chunk_dalloc_mmap
+chunk_dalloc_wrapper
+chunk_deregister
+chunk_dss_boot
+chunk_dss_mergeable
+chunk_dss_prec_get
+chunk_dss_prec_set
+chunk_hooks_default
+chunk_hooks_get
+chunk_hooks_set
+chunk_in_dss
+chunk_lookup
+chunk_npages
+chunk_purge_wrapper
+chunk_register
+chunks_rtree
+chunksize
+chunksize_mask
+ckh_count
+ckh_delete
+ckh_insert
+ckh_iter
+ckh_new
+ckh_pointer_hash
+ckh_pointer_keycomp
+ckh_remove
+ckh_search
+ckh_string_hash
+ckh_string_keycomp
+ctl_boot
+ctl_bymib
+ctl_byname
+ctl_nametomib
+ctl_postfork_child
+ctl_postfork_parent
+ctl_prefork
+decay_ticker_get
+dss_prec_names
+extent_node_achunk_get
+extent_node_achunk_set
+extent_node_addr_get
+extent_node_addr_set
+extent_node_arena_get
+extent_node_arena_set
+extent_node_committed_get
+extent_node_committed_set
+extent_node_dirty_insert
+extent_node_dirty_linkage_init
+extent_node_dirty_remove
+extent_node_init
+extent_node_prof_tctx_get
+extent_node_prof_tctx_set
+extent_node_size_get
+extent_node_size_set
+extent_node_zeroed_get
+extent_node_zeroed_set
+extent_tree_ad_destroy
+extent_tree_ad_destroy_recurse
+extent_tree_ad_empty
+extent_tree_ad_first
+extent_tree_ad_insert
+extent_tree_ad_iter
+extent_tree_ad_iter_recurse
+extent_tree_ad_iter_start
+extent_tree_ad_last
+extent_tree_ad_new
+extent_tree_ad_next
+extent_tree_ad_nsearch
+extent_tree_ad_prev
+extent_tree_ad_psearch
+extent_tree_ad_remove
+extent_tree_ad_reverse_iter
+extent_tree_ad_reverse_iter_recurse
+extent_tree_ad_reverse_iter_start
+extent_tree_ad_search
+extent_tree_szad_destroy
+extent_tree_szad_destroy_recurse
+extent_tree_szad_empty
+extent_tree_szad_first
+extent_tree_szad_insert
+extent_tree_szad_iter
+extent_tree_szad_iter_recurse
+extent_tree_szad_iter_start
+extent_tree_szad_last
+extent_tree_szad_new
+extent_tree_szad_next
+extent_tree_szad_nsearch
+extent_tree_szad_prev
+extent_tree_szad_psearch
+extent_tree_szad_remove
+extent_tree_szad_reverse_iter
+extent_tree_szad_reverse_iter_recurse
+extent_tree_szad_reverse_iter_start
+extent_tree_szad_search
+ffs_llu
+ffs_lu
+ffs_u
+ffs_u32
+ffs_u64
+ffs_zu
+get_errno
+hash
+hash_fmix_32
+hash_fmix_64
+hash_get_block_32
+hash_get_block_64
+hash_rotl_32
+hash_rotl_64
+hash_x64_128
+hash_x86_128
+hash_x86_32
+huge_aalloc
+huge_dalloc
+huge_dalloc_junk
+huge_malloc
+huge_palloc
+huge_prof_tctx_get
+huge_prof_tctx_reset
+huge_prof_tctx_set
+huge_ralloc
+huge_ralloc_no_move
+huge_salloc
+iaalloc
+ialloc
+iallocztm
+iarena_cleanup
+idalloc
+idalloctm
+in_valgrind
+index2size
+index2size_compute
+index2size_lookup
+index2size_tab
+ipalloc
+ipalloct
+ipallocztm
+iqalloc
+iralloc
+iralloct
+iralloct_realign
+isalloc
+isdalloct
+isqalloc
+isthreaded
+ivsalloc
+ixalloc
+jemalloc_postfork_child
+jemalloc_postfork_parent
+jemalloc_prefork
+large_maxclass
+lg_floor
+lg_prof_sample
+malloc_cprintf
+malloc_mutex_assert_not_owner
+malloc_mutex_assert_owner
+malloc_mutex_boot
+malloc_mutex_init
+malloc_mutex_lock
+malloc_mutex_postfork_child
+malloc_mutex_postfork_parent
+malloc_mutex_prefork
+malloc_mutex_unlock
+malloc_printf
+malloc_snprintf
+malloc_strtoumax
+malloc_tsd_boot0
+malloc_tsd_boot1
+malloc_tsd_cleanup_register
+malloc_tsd_dalloc
+malloc_tsd_malloc
+malloc_tsd_no_cleanup
+malloc_vcprintf
+malloc_vsnprintf
+malloc_write
+map_bias
+map_misc_offset
+mb_write
+narenas_auto
+narenas_tdata_cleanup
+narenas_total_get
+ncpus
+nhbins
+nhclasses
+nlclasses
+nstime_add
+nstime_compare
+nstime_copy
+nstime_divide
+nstime_idivide
+nstime_imultiply
+nstime_init
+nstime_init2
+nstime_monotonic
+nstime_ns
+nstime_nsec
+nstime_sec
+nstime_subtract
+nstime_update
+opt_abort
+opt_decay_time
+opt_dss
+opt_junk
+opt_junk_alloc
+opt_junk_free
+opt_lg_chunk
+opt_lg_dirty_mult
+opt_lg_prof_interval
+opt_lg_prof_sample
+opt_lg_tcache_max
+opt_narenas
+opt_prof
+opt_prof_accum
+opt_prof_active
+opt_prof_final
+opt_prof_gdump
+opt_prof_leak
+opt_prof_prefix
+opt_prof_thread_active_init
+opt_purge
+opt_quarantine
+opt_redzone
+opt_stats_print
+opt_tcache
+opt_utrace
+opt_xmalloc
+opt_zero
+p2rz
+pages_boot
+pages_commit
+pages_decommit
+pages_map
+pages_purge
+pages_trim
+pages_unmap
+pind2sz
+pind2sz_compute
+pind2sz_lookup
+pind2sz_tab
+pow2_ceil_u32
+pow2_ceil_u64
+pow2_ceil_zu
+prng_lg_range_u32
+prng_lg_range_u64
+prng_lg_range_zu
+prng_range_u32
+prng_range_u64
+prng_range_zu
+prng_state_next_u32
+prng_state_next_u64
+prng_state_next_zu
+prof_active
+prof_active_get
+prof_active_get_unlocked
+prof_active_set
+prof_alloc_prep
+prof_alloc_rollback
+prof_backtrace
+prof_boot0
+prof_boot1
+prof_boot2
+prof_bt_count
+prof_dump_header
+prof_dump_open
+prof_free
+prof_free_sampled_object
+prof_gdump
+prof_gdump_get
+prof_gdump_get_unlocked
+prof_gdump_set
+prof_gdump_val
+prof_idump
+prof_interval
+prof_lookup
+prof_malloc
+prof_malloc_sample_object
+prof_mdump
+prof_postfork_child
+prof_postfork_parent
+prof_prefork0
+prof_prefork1
+prof_realloc
+prof_reset
+prof_sample_accum_update
+prof_sample_threshold_update
+prof_tctx_get
+prof_tctx_reset
+prof_tctx_set
+prof_tdata_cleanup
+prof_tdata_count
+prof_tdata_get
+prof_tdata_init
+prof_tdata_reinit
+prof_thread_active_get
+prof_thread_active_init_get
+prof_thread_active_init_set
+prof_thread_active_set
+prof_thread_name_get
+prof_thread_name_set
+psz2ind
+psz2u
+purge_mode_names
+quarantine
+quarantine_alloc_hook
+quarantine_alloc_hook_work
+quarantine_cleanup
+rtree_child_read
+rtree_child_read_hard
+rtree_child_tryread
+rtree_delete
+rtree_get
+rtree_new
+rtree_node_valid
+rtree_set
+rtree_start_level
+rtree_subkey
+rtree_subtree_read
+rtree_subtree_read_hard
+rtree_subtree_tryread
+rtree_val_read
+rtree_val_write
+run_quantize_ceil
+run_quantize_floor
+s2u
+s2u_compute
+s2u_lookup
+sa2u
+set_errno
+size2index
+size2index_compute
+size2index_lookup
+size2index_tab
+spin_adaptive
+spin_init
+stats_cactive
+stats_cactive_add
+stats_cactive_get
+stats_cactive_sub
+stats_print
+tcache_alloc_easy
+tcache_alloc_large
+tcache_alloc_small
+tcache_alloc_small_hard
+tcache_arena_reassociate
+tcache_bin_flush_large
+tcache_bin_flush_small
+tcache_bin_info
+tcache_boot
+tcache_cleanup
+tcache_create
+tcache_dalloc_large
+tcache_dalloc_small
+tcache_enabled_cleanup
+tcache_enabled_get
+tcache_enabled_set
+tcache_event
+tcache_event_hard
+tcache_flush
+tcache_get
+tcache_get_hard
+tcache_maxclass
+tcache_salloc
+tcache_stats_merge
+tcaches
+tcaches_create
+tcaches_destroy
+tcaches_flush
+tcaches_get
+thread_allocated_cleanup
+thread_deallocated_cleanup
+ticker_copy
+ticker_init
+ticker_read
+ticker_tick
+ticker_ticks
+tsd_arena_get
+tsd_arena_set
+tsd_arenap_get
+tsd_arenas_tdata_bypass_get
+tsd_arenas_tdata_bypass_set
+tsd_arenas_tdata_bypassp_get
+tsd_arenas_tdata_get
+tsd_arenas_tdata_set
+tsd_arenas_tdatap_get
+tsd_boot
+tsd_boot0
+tsd_boot1
+tsd_booted
+tsd_booted_get
+tsd_cleanup
+tsd_cleanup_wrapper
+tsd_fetch
+tsd_fetch_impl
+tsd_get
+tsd_get_allocates
+tsd_iarena_get
+tsd_iarena_set
+tsd_iarenap_get
+tsd_initialized
+tsd_init_check_recursion
+tsd_init_finish
+tsd_init_head
+tsd_narenas_tdata_get
+tsd_narenas_tdata_set
+tsd_narenas_tdatap_get
+tsd_wrapper_get
+tsd_wrapper_set
+tsd_nominal
+tsd_prof_tdata_get
+tsd_prof_tdata_set
+tsd_prof_tdatap_get
+tsd_quarantine_get
+tsd_quarantine_set
+tsd_quarantinep_get
+tsd_set
+tsd_tcache_enabled_get
+tsd_tcache_enabled_set
+tsd_tcache_enabledp_get
+tsd_tcache_get
+tsd_tcache_set
+tsd_tcachep_get
+tsd_thread_allocated_get
+tsd_thread_allocated_set
+tsd_thread_allocatedp_get
+tsd_thread_deallocated_get
+tsd_thread_deallocated_set
+tsd_thread_deallocatedp_get
+tsd_tls
+tsd_tsd
+tsd_tsdn
+tsd_witness_fork_get
+tsd_witness_fork_set
+tsd_witness_forkp_get
+tsd_witnesses_get
+tsd_witnesses_set
+tsd_witnessesp_get
+tsdn_fetch
+tsdn_null
+tsdn_tsd
+u2rz
+valgrind_freelike_block
+valgrind_make_mem_defined
+valgrind_make_mem_noaccess
+valgrind_make_mem_undefined
+witness_assert_lockless
+witness_assert_not_owner
+witness_assert_owner
+witness_fork_cleanup
+witness_init
+witness_lock
+witness_lock_error
+witness_lockless_error
+witness_not_owner_error
+witness_owner
+witness_owner_error
+witness_postfork_child
+witness_postfork_parent
+witness_prefork
+witness_unlock
+witnesses_cleanup
+zone_register
diff --git a/memory/jemalloc/src/include/jemalloc/internal/private_unnamespace.sh b/memory/jemalloc/src/include/jemalloc/internal/private_unnamespace.sh
new file mode 100755
index 000000000..23fed8e80
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/private_unnamespace.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+for symbol in `cat $1` ; do
+ echo "#undef ${symbol}"
+done
diff --git a/memory/jemalloc/src/include/jemalloc/internal/prng.h b/memory/jemalloc/src/include/jemalloc/internal/prng.h
new file mode 100644
index 000000000..c2bda19c6
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/prng.h
@@ -0,0 +1,207 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * Simple linear congruential pseudo-random number generator:
+ *
+ * prng(y) = (a*x + c) % m
+ *
+ * where the following constants ensure maximal period:
+ *
+ * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
+ * c == Odd number (relatively prime to 2^n).
+ * m == 2^32
+ *
+ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
+ *
+ * This choice of m has the disadvantage that the quality of the bits is
+ * proportional to bit position. For example, the lowest bit has a cycle of 2,
+ * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
+ * bits.
+ */
+
+#define PRNG_A_32 UINT32_C(1103515241)
+#define PRNG_C_32 UINT32_C(12347)
+
+#define PRNG_A_64 UINT64_C(6364136223846793005)
+#define PRNG_C_64 UINT64_C(1442695040888963407)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+uint32_t prng_state_next_u32(uint32_t state);
+uint64_t prng_state_next_u64(uint64_t state);
+size_t prng_state_next_zu(size_t state);
+
+uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
+ bool atomic);
+uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
+size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
+
+uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
+uint64_t prng_range_u64(uint64_t *state, uint64_t range);
+size_t prng_range_zu(size_t *state, size_t range, bool atomic);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_state_next_u32(uint32_t state)
+{
+
+ return ((state * PRNG_A_32) + PRNG_C_32);
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_state_next_u64(uint64_t state)
+{
+
+ return ((state * PRNG_A_64) + PRNG_C_64);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_state_next_zu(size_t state)
+{
+
+#if LG_SIZEOF_PTR == 2
+ return ((state * PRNG_A_32) + PRNG_C_32);
+#elif LG_SIZEOF_PTR == 3
+ return ((state * PRNG_A_64) + PRNG_C_64);
+#else
+#error Unsupported pointer size
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
+{
+ uint32_t ret, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= 32);
+
+ if (atomic) {
+ uint32_t state0;
+
+ do {
+ state0 = atomic_read_uint32(state);
+ state1 = prng_state_next_u32(state0);
+ } while (atomic_cas_uint32(state, state0, state1));
+ } else {
+ state1 = prng_state_next_u32(*state);
+ *state = state1;
+ }
+ ret = state1 >> (32 - lg_range);
+
+ return (ret);
+}
+
+/* 64-bit atomic operations cannot be supported on all relevant platforms. */
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_lg_range_u64(uint64_t *state, unsigned lg_range)
+{
+ uint64_t ret, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= 64);
+
+ state1 = prng_state_next_u64(*state);
+ *state = state1;
+ ret = state1 >> (64 - lg_range);
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
+{
+ size_t ret, state1;
+
+ assert(lg_range > 0);
+ assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
+
+ if (atomic) {
+ size_t state0;
+
+ do {
+ state0 = atomic_read_z(state);
+ state1 = prng_state_next_zu(state0);
+ } while (atomic_cas_z(state, state0, state1));
+ } else {
+ state1 = prng_state_next_zu(*state);
+ *state = state1;
+ }
+ ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE uint32_t
+prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
+{
+ uint32_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_u32(state, lg_range, atomic);
+ } while (ret >= range);
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+prng_range_u64(uint64_t *state, uint64_t range)
+{
+ uint64_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_u64(state, lg_range);
+ } while (ret >= range);
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prng_range_zu(size_t *state, size_t range, bool atomic)
+{
+ size_t ret;
+ unsigned lg_range;
+
+ assert(range > 1);
+
+ /* Compute the ceiling of lg(range). */
+ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+
+ /* Generate a result in [0..range) via repeated trial. */
+ do {
+ ret = prng_lg_range_zu(state, lg_range, atomic);
+ } while (ret >= range);
+
+ return (ret);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/prof.h b/memory/jemalloc/src/include/jemalloc/internal/prof.h
new file mode 100644
index 000000000..8293b71ed
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/prof.h
@@ -0,0 +1,547 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct prof_bt_s prof_bt_t;
+typedef struct prof_cnt_s prof_cnt_t;
+typedef struct prof_tctx_s prof_tctx_t;
+typedef struct prof_gctx_s prof_gctx_t;
+typedef struct prof_tdata_s prof_tdata_t;
+
+/* Option defaults. */
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
+#define LG_PROF_SAMPLE_DEFAULT 19
+#define LG_PROF_INTERVAL_DEFAULT -1
+
+/*
+ * Hard limit on stack backtrace depth. The version of prof_backtrace() that
+ * is based on __builtin_return_address() necessarily has a hard-coded number
+ * of backtrace frame handlers, and should be kept in sync with this setting.
+ */
+#define PROF_BT_MAX 128
+
+/* Initial hash table size. */
+#define PROF_CKH_MINITEMS 64
+
+/* Size of memory buffer to use when writing dump files. */
+#define PROF_DUMP_BUFSIZE 65536
+
+/* Size of stack-allocated buffer used by prof_printf(). */
+#define PROF_PRINTF_BUFSIZE 128
+
+/*
+ * Number of mutexes shared among all gctx's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NCTX_LOCKS 1024
+
+/*
+ * Number of mutexes shared among all tdata's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NTDATA_LOCKS 256
+
+/*
+ * prof_tdata pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
+#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
+#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct prof_bt_s {
+ /* Backtrace, stored as len program counters. */
+ void **vec;
+ unsigned len;
+};
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
+typedef struct {
+ prof_bt_t *bt;
+ unsigned max;
+} prof_unwind_data_t;
+#endif
+
+struct prof_cnt_s {
+ /* Profiling counters. */
+ uint64_t curobjs;
+ uint64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
+};
+
+typedef enum {
+ prof_tctx_state_initializing,
+ prof_tctx_state_nominal,
+ prof_tctx_state_dumping,
+ prof_tctx_state_purgatory /* Dumper must finish destroying. */
+} prof_tctx_state_t;
+
+struct prof_tctx_s {
+ /* Thread data for thread that performed the allocation. */
+ prof_tdata_t *tdata;
+
+ /*
+ * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+ * defunct during teardown.
+ */
+ uint64_t thr_uid;
+ uint64_t thr_discrim;
+
+ /* Profiling counters, protected by tdata->lock. */
+ prof_cnt_t cnts;
+
+ /* Associated global context. */
+ prof_gctx_t *gctx;
+
+ /*
+ * UID that distinguishes multiple tctx's created by the same thread,
+ * but coexisting in gctx->tctxs. There are two ways that such
+ * coexistence can occur:
+ * - A dumper thread can cause a tctx to be retained in the purgatory
+ * state.
+ * - Although a single "producer" thread must create all tctx's which
+ * share the same thr_uid, multiple "consumers" can each concurrently
+ * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
+ * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
+ * threshold can be hit again before the first consumer finishes
+ * executing prof_tctx_destroy().
+ */
+ uint64_t tctx_uid;
+
+ /* Linkage into gctx's tctxs. */
+ rb_node(prof_tctx_t) tctx_link;
+
+ /*
+ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
+ * sample vs destroy race.
+ */
+ bool prepared;
+
+ /* Current dump-related state, protected by gctx->lock. */
+ prof_tctx_state_t state;
+
+ /*
+ * Copy of cnts snapshotted during early dump phase, protected by
+ * dump_mtx.
+ */
+ prof_cnt_t dump_cnts;
+};
+typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
+
+struct prof_gctx_s {
+ /* Protects nlimbo, cnt_summed, and tctxs. */
+ malloc_mutex_t *lock;
+
+ /*
+ * Number of threads that currently cause this gctx to be in a state of
+ * limbo due to one of:
+ * - Initializing this gctx.
+ * - Initializing per thread counters associated with this gctx.
+ * - Preparing to destroy this gctx.
+ * - Dumping a heap profile that includes this gctx.
+ * nlimbo must be 1 (single destroyer) in order to safely destroy the
+ * gctx.
+ */
+ unsigned nlimbo;
+
+ /*
+ * Tree of profile counters, one for each thread that has allocated in
+ * this context.
+ */
+ prof_tctx_tree_t tctxs;
+
+ /* Linkage for tree of contexts to be dumped. */
+ rb_node(prof_gctx_t) dump_link;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Associated backtrace. */
+ prof_bt_t bt;
+
+ /* Backtrace vector, variable size, referred to by bt. */
+ void *vec[1];
+};
+typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
+
+struct prof_tdata_s {
+ malloc_mutex_t *lock;
+
+ /* Monotonically increasing unique thread identifier. */
+ uint64_t thr_uid;
+
+ /*
+ * Monotonically increasing discriminator among tdata structures
+ * associated with the same thr_uid.
+ */
+ uint64_t thr_discrim;
+
+ /* Included in heap profile dumps if non-NULL. */
+ char *thread_name;
+
+ bool attached;
+ bool expired;
+
+ rb_node(prof_tdata_t) tdata_link;
+
+ /*
+ * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
+ * necessary when incrementing this field, because only one thread ever
+ * does so.
+ */
+ uint64_t tctx_uid_next;
+
+ /*
+ * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
+ * backtraces for which it has non-zero allocation/deallocation counters
+ * associated with thread-specific prof_tctx_t objects. Other threads
+ * may write to prof_tctx_t contents when freeing associated objects.
+ */
+ ckh_t bt2tctx;
+
+ /* Sampling state. */
+ uint64_t prng_state;
+ uint64_t bytes_until_sample;
+
+ /* State used to avoid dumping while operating on prof internals. */
+ bool enq;
+ bool enq_idump;
+ bool enq_gdump;
+
+ /*
+ * Set to true during an early dump phase for tdata's which are
+ * currently being dumped. New threads' tdata's have this initialized
+ * to false so that they aren't accidentally included in later dump
+ * phases.
+ */
+ bool dumping;
+
+ /*
+ * True if profiling is active for this tdata's thread
+ * (thread.prof.active mallctl).
+ */
+ bool active;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Backtrace vector, used for calls to prof_backtrace(). */
+ void *vec[PROF_BT_MAX];
+};
+typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool opt_prof;
+extern bool opt_prof_active;
+extern bool opt_prof_thread_active_init;
+extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
+extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
+extern bool opt_prof_gdump; /* High-water memory dumping. */
+extern bool opt_prof_final; /* Final profile dumping. */
+extern bool opt_prof_leak; /* Dump leak summary at exit. */
+extern bool opt_prof_accum; /* Report cumulative bytes. */
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Accessed via prof_active_[gs]et{_unlocked,}(). */
+extern bool prof_active;
+
+/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
+extern bool prof_gdump_val;
+
+/*
+ * Profile dump interval, measured in bytes allocated. Each arena triggers a
+ * profile dump when it reaches this threshold. The effect is that the
+ * interval between profile dumps averages prof_interval, though the actual
+ * interval between dumps will tend to be sporadic, and the interval will be a
+ * maximum of approximately (prof_interval * narenas).
+ */
+extern uint64_t prof_interval;
+
+/*
+ * Initialized as opt_lg_prof_sample, and potentially modified during profiling
+ * resets.
+ */
+extern size_t lg_prof_sample;
+
+void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
+void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
+void bt_init(prof_bt_t *bt, void **vec);
+void prof_backtrace(prof_bt_t *bt);
+prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_tdata_count(void);
+size_t prof_bt_count(void);
+const prof_cnt_t *prof_cnt_all(void);
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *prof_dump_open;
+typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
+extern prof_dump_header_t *prof_dump_header;
+#endif
+void prof_idump(tsdn_t *tsdn);
+bool prof_mdump(tsd_t *tsd, const char *filename);
+void prof_gdump(tsdn_t *tsdn);
+prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
+void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_tdata_cleanup(tsd_t *tsd);
+bool prof_active_get(tsdn_t *tsdn);
+bool prof_active_set(tsdn_t *tsdn, bool active);
+const char *prof_thread_name_get(tsd_t *tsd);
+int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
+bool prof_thread_active_get(tsd_t *tsd);
+bool prof_thread_active_set(tsd_t *tsd, bool active);
+bool prof_thread_active_init_get(tsdn_t *tsdn);
+bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
+bool prof_gdump_get(tsdn_t *tsdn);
+bool prof_gdump_set(tsdn_t *tsdn, bool active);
+void prof_boot0(void);
+void prof_boot1(void);
+bool prof_boot2(tsd_t *tsd);
+void prof_prefork0(tsdn_t *tsdn);
+void prof_prefork1(tsdn_t *tsdn);
+void prof_postfork_parent(tsdn_t *tsdn);
+void prof_postfork_child(tsdn_t *tsdn);
+void prof_sample_threshold_update(prof_tdata_t *tdata);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+bool prof_active_get_unlocked(void);
+bool prof_gdump_get_unlocked(void);
+prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
+prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *tctx);
+bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
+ prof_tdata_t **tdata_out);
+prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
+ bool update);
+void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
+ prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
+ size_t old_usize, prof_tctx_t *old_tctx);
+void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
+JEMALLOC_ALWAYS_INLINE bool
+prof_active_get_unlocked(void)
+{
+
+ /*
+ * Even if opt_prof is true, sampling can be temporarily disabled by
+ * setting prof_active to false. No locking is used when reading
+ * prof_active in the fast path, so there are no guarantees regarding
+ * how long it will take for all threads to notice state changes.
+ */
+ return (prof_active);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void)
+{
+
+ /*
+ * No locking is used when reading prof_gdump_val in the fast path, so
+ * there are no guarantees regarding how long it will take for all
+ * threads to notice state changes.
+ */
+ return (prof_gdump_val);
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tdata_t *
+prof_tdata_get(tsd_t *tsd, bool create)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (create) {
+ if (unlikely(tdata == NULL)) {
+ if (tsd_nominal(tsd)) {
+ tdata = prof_tdata_init(tsd);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ } else if (unlikely(tdata->expired)) {
+ tdata = prof_tdata_reinit(tsd, tdata);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ assert(tdata == NULL || tdata->attached);
+ }
+
+ return (tdata);
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ return (arena_prof_tctx_get(tsdn, ptr));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_set(tsdn, ptr, usize, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
+ prof_tdata_t **tdata_out)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, true);
+ if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
+ tdata = NULL;
+
+ if (tdata_out != NULL)
+ *tdata_out = tdata;
+
+ if (unlikely(tdata == NULL))
+ return (true);
+
+ if (likely(tdata->bytes_until_sample >= usize)) {
+ if (update)
+ tdata->bytes_until_sample -= usize;
+ return (true);
+ } else {
+ /* Compute new sample threshold. */
+ if (update)
+ prof_sample_threshold_update(tdata);
+ return (!tdata->active);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
+{
+ prof_tctx_t *ret;
+ prof_tdata_t *tdata;
+ prof_bt_t bt;
+
+ assert(usize == s2u(usize));
+
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata)))
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ else {
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(&bt);
+ ret = prof_lookup(tsd, &bt);
+ }
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(usize == isalloc(tsdn, ptr, true));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ prof_malloc_sample_object(tsdn, ptr, usize, tctx);
+ else
+ prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
+ bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+ prof_tctx_t *old_tctx)
+{
+ bool sampled, old_sampled;
+
+ cassert(config_prof);
+ assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
+
+ if (prof_active && !updated && ptr != NULL) {
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+ if (prof_sample_accum_update(tsd, usize, true, NULL)) {
+ /*
+ * Don't sample. The usize passed to prof_alloc_prep()
+ * was larger than what actually got allocated, so a
+ * backtrace was captured for this allocation, even
+ * though its actual usize was insufficient to cross the
+ * sample threshold.
+ */
+ prof_alloc_rollback(tsd, tctx, true);
+ tctx = (prof_tctx_t *)(uintptr_t)1U;
+ }
+ }
+
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+
+ if (unlikely(sampled))
+ prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
+ else
+ prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
+
+ if (unlikely(old_sampled))
+ prof_free_sampled_object(tsd, old_usize, old_tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_free(tsd_t *tsd, const void *ptr, size_t usize)
+{
+ prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+
+ cassert(config_prof);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ prof_free_sampled_object(tsd, usize, tctx);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/public_namespace.sh b/memory/jemalloc/src/include/jemalloc/internal/public_namespace.sh
new file mode 100755
index 000000000..362109f71
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/public_namespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#define je_${n} JEMALLOC_N(${n})"
+done
diff --git a/memory/jemalloc/src/include/jemalloc/internal/public_unnamespace.sh b/memory/jemalloc/src/include/jemalloc/internal/public_unnamespace.sh
new file mode 100755
index 000000000..4239d1775
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/public_unnamespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#undef je_${n}"
+done
diff --git a/memory/jemalloc/src/include/jemalloc/internal/ql.h b/memory/jemalloc/src/include/jemalloc/internal/ql.h
new file mode 100644
index 000000000..1834bb855
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/ql.h
@@ -0,0 +1,81 @@
+/* List definitions. */
+#define ql_head(a_type) \
+struct { \
+ a_type *qlh_first; \
+}
+
+#define ql_head_initializer(a_head) {NULL}
+
+#define ql_elm(a_type) qr(a_type)
+
+/* List functions. */
+#define ql_new(a_head) do { \
+ (a_head)->qlh_first = NULL; \
+} while (0)
+
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+
+#define ql_first(a_head) ((a_head)->qlh_first)
+
+#define ql_last(a_head, a_field) \
+ ((ql_first(a_head) != NULL) \
+ ? qr_prev(ql_first(a_head), a_field) : NULL)
+
+#define ql_next(a_head, a_elm, a_field) \
+ ((ql_last(a_head, a_field) != (a_elm)) \
+ ? qr_next((a_elm), a_field) : NULL)
+
+#define ql_prev(a_head, a_elm, a_field) \
+ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
+ : NULL)
+
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+ qr_before_insert((a_qlelm), (a_elm), a_field); \
+ if (ql_first(a_head) == (a_qlelm)) { \
+ ql_first(a_head) = (a_elm); \
+ } \
+} while (0)
+
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
+ qr_after_insert((a_qlelm), (a_elm), a_field)
+
+#define ql_head_insert(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = (a_elm); \
+} while (0)
+
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = qr_next((a_elm), a_field); \
+} while (0)
+
+#define ql_remove(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) == (a_elm)) { \
+ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
+ } \
+ if (ql_first(a_head) != (a_elm)) { \
+ qr_remove((a_elm), a_field); \
+ } else { \
+ ql_first(a_head) = NULL; \
+ } \
+} while (0)
+
+#define ql_head_remove(a_head, a_type, a_field) do { \
+ a_type *t = ql_first(a_head); \
+ ql_remove((a_head), t, a_field); \
+} while (0)
+
+#define ql_tail_remove(a_head, a_type, a_field) do { \
+ a_type *t = ql_last(a_head, a_field); \
+ ql_remove((a_head), t, a_field); \
+} while (0)
+
+#define ql_foreach(a_var, a_head, a_field) \
+ qr_foreach((a_var), ql_first(a_head), a_field)
+
+#define ql_reverse_foreach(a_var, a_head, a_field) \
+ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/memory/jemalloc/src/include/jemalloc/internal/qr.h b/memory/jemalloc/src/include/jemalloc/internal/qr.h
new file mode 100644
index 000000000..0fbaec25e
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/qr.h
@@ -0,0 +1,69 @@
+/* Ring definitions. */
+#define qr(a_type) \
+struct { \
+ a_type *qre_next; \
+ a_type *qre_prev; \
+}
+
+/* Ring functions. */
+#define qr_new(a_qr, a_field) do { \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qrelm); \
+ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
+ (a_qrelm)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
+ do \
+ { \
+ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
+ (a_qr)->a_field.qre_prev = (a_qrelm); \
+ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
+ (a_qrelm)->a_field.qre_next = (a_qr); \
+ } while (0)
+
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+ void *t; \
+ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
+ t = (a_qr_a)->a_field.qre_prev; \
+ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
+ (a_qr_b)->a_field.qre_prev = t; \
+} while (0)
+
+/*
+ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code.
+ */
+#define qr_split(a_qr_a, a_qr_b, a_field) \
+ qr_meld((a_qr_a), (a_qr_b), a_field)
+
+#define qr_remove(a_qr, a_field) do { \
+ (a_qr)->a_field.qre_prev->a_field.qre_next \
+ = (a_qr)->a_field.qre_next; \
+ (a_qr)->a_field.qre_next->a_field.qre_prev \
+ = (a_qr)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_foreach(var, a_qr, a_field) \
+ for ((var) = (a_qr); \
+ (var) != NULL; \
+ (var) = (((var)->a_field.qre_next != (a_qr)) \
+ ? (var)->a_field.qre_next : NULL))
+
+#define qr_reverse_foreach(var, a_qr, a_field) \
+ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
+ (var) != NULL; \
+ (var) = (((var) != (a_qr)) \
+ ? (var)->a_field.qre_prev : NULL))
diff --git a/memory/jemalloc/src/include/jemalloc/internal/quarantine.h b/memory/jemalloc/src/include/jemalloc/internal/quarantine.h
new file mode 100644
index 000000000..ae607399f
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/quarantine.h
@@ -0,0 +1,60 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct quarantine_obj_s quarantine_obj_t;
+typedef struct quarantine_s quarantine_t;
+
+/* Default per thread quarantine size if valgrind is enabled. */
+#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct quarantine_obj_s {
+ void *ptr;
+ size_t usize;
+};
+
+struct quarantine_s {
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
+#define LG_MAXOBJS_INIT 10
+ size_t lg_maxobjs;
+ quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void quarantine_alloc_hook_work(tsd_t *tsd);
+void quarantine(tsd_t *tsd, void *ptr);
+void quarantine_cleanup(tsd_t *tsd);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void quarantine_alloc_hook(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
+JEMALLOC_ALWAYS_INLINE void
+quarantine_alloc_hook(void)
+{
+ tsd_t *tsd;
+
+ assert(config_fill && opt_quarantine);
+
+ tsd = tsd_fetch();
+ if (tsd_quarantine_get(tsd) == NULL)
+ quarantine_alloc_hook_work(tsd);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/rb.h b/memory/jemalloc/src/include/jemalloc/internal/rb.h
new file mode 100644
index 000000000..3770342f8
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/rb.h
@@ -0,0 +1,1003 @@
+/*-
+ *******************************************************************************
+ *
+ * cpp macro implementation of left-leaning 2-3 red-black trees. Parent
+ * pointers are not used, and color bits are stored in the least significant
+ * bit of right-child pointers (if RB_COMPACT is defined), thus making node
+ * linkage as compact as is possible for red-black trees.
+ *
+ * Usage:
+ *
+ * #include <stdint.h>
+ * #include <stdbool.h>
+ * #define NDEBUG // (Optional, see assert(3).)
+ * #include <assert.h>
+ * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
+ * #include <rb.h>
+ * ...
+ *
+ *******************************************************************************
+ */
+
+#ifndef RB_H_
+#define RB_H_
+
+#ifdef RB_COMPACT
+/* Node structure. */
+#define rb_node(a_type) \
+struct { \
+ a_type *rbn_left; \
+ a_type *rbn_right_red; \
+}
+#else
+#define rb_node(a_type) \
+struct { \
+ a_type *rbn_left; \
+ a_type *rbn_right; \
+ bool rbn_red; \
+}
+#endif
+
+/* Root structure. */
+#define rb_tree(a_type) \
+struct { \
+ a_type *rbt_root; \
+}
+
+/* Left accessors. */
+#define rbtn_left_get(a_type, a_field, a_node) \
+ ((a_node)->a_field.rbn_left)
+#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
+ (a_node)->a_field.rbn_left = a_left; \
+} while (0)
+
+#ifdef RB_COMPACT
+/* Right accessors. */
+#define rbtn_right_get(a_type, a_field, a_node) \
+ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((ssize_t)-2)))
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
+ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
+} while (0)
+
+/* Color accessors. */
+#define rbtn_red_get(a_type, a_field, a_node) \
+ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((size_t)1)))
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
+ | ((ssize_t)a_red)); \
+} while (0)
+#define rbtn_red_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
+ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
+} while (0)
+#define rbtn_black_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
+} while (0)
+
+/* Node initializer. */
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+ /* Bookkeeping bit cannot be used by node pointer. */ \
+ assert(((uintptr_t)(a_node) & 0x1) == 0); \
+ rbtn_left_set(a_type, a_field, (a_node), NULL); \
+ rbtn_right_set(a_type, a_field, (a_node), NULL); \
+ rbtn_red_set(a_type, a_field, (a_node)); \
+} while (0)
+#else
+/* Right accessors. */
+#define rbtn_right_get(a_type, a_field, a_node) \
+ ((a_node)->a_field.rbn_right)
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+ (a_node)->a_field.rbn_right = a_right; \
+} while (0)
+
+/* Color accessors. */
+#define rbtn_red_get(a_type, a_field, a_node) \
+ ((a_node)->a_field.rbn_red)
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+ (a_node)->a_field.rbn_red = (a_red); \
+} while (0)
+#define rbtn_red_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_red = true; \
+} while (0)
+#define rbtn_black_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_red = false; \
+} while (0)
+
+/* Node initializer. */
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+ rbtn_left_set(a_type, a_field, (a_node), NULL); \
+ rbtn_right_set(a_type, a_field, (a_node), NULL); \
+ rbtn_red_set(a_type, a_field, (a_node)); \
+} while (0)
+#endif
+
+/* Tree initializer. */
+#define rb_new(a_type, a_field, a_rbt) do { \
+ (a_rbt)->rbt_root = NULL; \
+} while (0)
+
+/* Internal utility macros. */
+#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
+ (r_node) = (a_root); \
+ if ((r_node) != NULL) { \
+ for (; \
+ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
+ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
+ } \
+ } \
+} while (0)
+
+#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
+ (r_node) = (a_root); \
+ if ((r_node) != NULL) { \
+ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
+ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
+ } \
+ } \
+} while (0)
+
+#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
+ rbtn_right_set(a_type, a_field, (a_node), \
+ rbtn_left_get(a_type, a_field, (r_node))); \
+ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
+ rbtn_left_set(a_type, a_field, (a_node), \
+ rbtn_right_get(a_type, a_field, (r_node))); \
+ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+/*
+ * The rb_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to rb_gen().
+ */
+
+#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
+a_attr void \
+a_prefix##new(a_rbt_type *rbtree); \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree); \
+a_attr a_type * \
+a_prefix##first(a_rbt_type *rbtree); \
+a_attr a_type * \
+a_prefix##last(a_rbt_type *rbtree); \
+a_attr a_type * \
+a_prefix##next(a_rbt_type *rbtree, a_type *node); \
+a_attr a_type * \
+a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
+a_attr a_type * \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
+a_attr a_type * \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
+a_attr a_type * \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
+a_attr void \
+a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
+a_attr void \
+a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
+a_attr a_type * \
+a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
+ a_rbt_type *, a_type *, void *), void *arg); \
+a_attr a_type * \
+a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
+a_attr void \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
+ void *arg);
+
+/*
+ * The rb_gen() macro generates a type-specific red-black tree implementation,
+ * based on the above cpp macros.
+ *
+ * Arguments:
+ *
+ * a_attr : Function attribute for generated functions (ex: static).
+ * a_prefix : Prefix for generated functions (ex: ex_).
+ * a_rb_type : Type for red-black tree data structure (ex: ex_t).
+ * a_type : Type for red-black tree node data structure (ex: ex_node_t).
+ * a_field : Name of red-black tree node linkage (ex: ex_link).
+ * a_cmp : Node comparison function name, with the following prototype:
+ * int (a_cmp *)(a_type *a_node, a_type *a_other);
+ * ^^^^^^
+ * or a_key
+ * Interpretation of comparison function return values:
+ * -1 : a_node < a_other
+ * 0 : a_node == a_other
+ * 1 : a_node > a_other
+ * In all cases, the a_node or a_key macro argument is the first
+ * argument to the comparison function, which makes it possible
+ * to write comparison functions that treat the first argument
+ * specially.
+ *
+ * Assuming the following setup:
+ *
+ * typedef struct ex_node_s ex_node_t;
+ * struct ex_node_s {
+ * rb_node(ex_node_t) ex_link;
+ * };
+ * typedef rb_tree(ex_node_t) ex_t;
+ * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
+ *
+ * The following API is generated:
+ *
+ * static void
+ * ex_new(ex_t *tree);
+ * Description: Initialize a red-black tree structure.
+ * Args:
+ * tree: Pointer to an uninitialized red-black tree object.
+ *
+ * static bool
+ * ex_empty(ex_t *tree);
+ * Description: Determine whether tree is empty.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * Ret: True if tree is empty, false otherwise.
+ *
+ * static ex_node_t *
+ * ex_first(ex_t *tree);
+ * static ex_node_t *
+ * ex_last(ex_t *tree);
+ * Description: Get the first/last node in tree.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * Ret: First/last node in tree, or NULL if tree is empty.
+ *
+ * static ex_node_t *
+ * ex_next(ex_t *tree, ex_node_t *node);
+ * static ex_node_t *
+ * ex_prev(ex_t *tree, ex_node_t *node);
+ * Description: Get node's successor/predecessor.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * node: A node in tree.
+ * Ret: node's successor/predecessor in tree, or NULL if node is
+ * last/first.
+ *
+ * static ex_node_t *
+ * ex_search(ex_t *tree, const ex_node_t *key);
+ * Description: Search for node that matches key.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * key : Search key.
+ * Ret: Node in tree that matches key, or NULL if no match.
+ *
+ * static ex_node_t *
+ * ex_nsearch(ex_t *tree, const ex_node_t *key);
+ * static ex_node_t *
+ * ex_psearch(ex_t *tree, const ex_node_t *key);
+ * Description: Search for node that matches key. If no match is found,
+ * return what would be key's successor/predecessor, were
+ * key in tree.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * key : Search key.
+ * Ret: Node in tree that matches key, or if no match, hypothetical node's
+ * successor/predecessor (NULL if no successor/predecessor).
+ *
+ * static void
+ * ex_insert(ex_t *tree, ex_node_t *node);
+ * Description: Insert node into tree.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * node: Node to be inserted into tree.
+ *
+ * static void
+ * ex_remove(ex_t *tree, ex_node_t *node);
+ * Description: Remove node from tree.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * node: Node in tree to be removed.
+ *
+ * static ex_node_t *
+ * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
+ * ex_node_t *, void *), void *arg);
+ * static ex_node_t *
+ * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
+ * ex_node_t *, void *), void *arg);
+ * Description: Iterate forward/backward over tree, starting at node. If
+ * tree is modified, iteration must be immediately
+ * terminated by the callback function that causes the
+ * modification.
+ * Args:
+ * tree : Pointer to an initialized red-black tree object.
+ * start: Node at which to start iteration, or NULL to start at
+ * first/last node.
+ * cb : Callback function, which is called for each node during
+ * iteration. Under normal circumstances the callback function
+ * should return NULL, which causes iteration to continue. If a
+ * callback function returns non-NULL, iteration is immediately
+ * terminated and the non-NULL return value is returned by the
+ * iterator. This is useful for re-starting iteration after
+ * modifying tree.
+ * arg : Opaque pointer passed to cb().
+ * Ret: NULL if iteration completed, or the non-NULL callback return value
+ * that caused termination of the iteration.
+ *
+ * static void
+ * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
+ * Description: Iterate over the tree with post-order traversal, remove
+ * each node, and run the callback if non-null. This is
+ * used for destroying a tree without paying the cost to
+ * rebalance it. The tree must not be otherwise altered
+ * during traversal.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * cb : Callback function, which, if non-null, is called for each node
+ * during iteration. There is no way to stop iteration once it
+ * has begun.
+ * arg : Opaque pointer passed to cb().
+ */
+#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_rbt_type *rbtree) { \
+ rb_new(a_type, a_field, rbtree); \
+} \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree) { \
+ return (rbtree->rbt_root == NULL); \
+} \
+a_attr a_type * \
+a_prefix##first(a_rbt_type *rbtree) { \
+ a_type *ret; \
+ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##last(a_rbt_type *rbtree) { \
+ a_type *ret; \
+ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
+ a_type *ret; \
+ if (rbtn_right_get(a_type, a_field, node) != NULL) { \
+ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
+ a_field, node), ret); \
+ } else { \
+ a_type *tnode = rbtree->rbt_root; \
+ assert(tnode != NULL); \
+ ret = NULL; \
+ while (true) { \
+ int cmp = (a_cmp)(node, tnode); \
+ if (cmp < 0) { \
+ ret = tnode; \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ break; \
+ } \
+ assert(tnode != NULL); \
+ } \
+ } \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
+ a_type *ret; \
+ if (rbtn_left_get(a_type, a_field, node) != NULL) { \
+ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
+ a_field, node), ret); \
+ } else { \
+ a_type *tnode = rbtree->rbt_root; \
+ assert(tnode != NULL); \
+ ret = NULL; \
+ while (true) { \
+ int cmp = (a_cmp)(node, tnode); \
+ if (cmp < 0) { \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ ret = tnode; \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ break; \
+ } \
+ assert(tnode != NULL); \
+ } \
+ } \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
+ a_type *ret; \
+ int cmp; \
+ ret = rbtree->rbt_root; \
+ while (ret != NULL \
+ && (cmp = (a_cmp)(key, ret)) != 0) { \
+ if (cmp < 0) { \
+ ret = rbtn_left_get(a_type, a_field, ret); \
+ } else { \
+ ret = rbtn_right_get(a_type, a_field, ret); \
+ } \
+ } \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
+ a_type *ret; \
+ a_type *tnode = rbtree->rbt_root; \
+ ret = NULL; \
+ while (tnode != NULL) { \
+ int cmp = (a_cmp)(key, tnode); \
+ if (cmp < 0) { \
+ ret = tnode; \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ ret = tnode; \
+ break; \
+ } \
+ } \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
+ a_type *ret; \
+ a_type *tnode = rbtree->rbt_root; \
+ ret = NULL; \
+ while (tnode != NULL) { \
+ int cmp = (a_cmp)(key, tnode); \
+ if (cmp < 0) { \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ ret = tnode; \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ ret = tnode; \
+ break; \
+ } \
+ } \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
+ struct { \
+ a_type *node; \
+ int cmp; \
+ } path[sizeof(void *) << 4], *pathp; \
+ rbt_node_new(a_type, a_field, rbtree, node); \
+ /* Wind. */ \
+ path->node = rbtree->rbt_root; \
+ for (pathp = path; pathp->node != NULL; pathp++) { \
+ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
+ assert(cmp != 0); \
+ if (cmp < 0) { \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } else { \
+ pathp[1].node = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ } \
+ } \
+ pathp->node = node; \
+ /* Unwind. */ \
+ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
+ a_type *cnode = pathp->node; \
+ if (pathp->cmp < 0) { \
+ a_type *left = pathp[1].node; \
+ rbtn_left_set(a_type, a_field, cnode, left); \
+ if (rbtn_red_get(a_type, a_field, left)) { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
+ /* Fix up 4-node. */ \
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, cnode, tnode); \
+ cnode = tnode; \
+ } \
+ } else { \
+ return; \
+ } \
+ } else { \
+ a_type *right = pathp[1].node; \
+ rbtn_right_set(a_type, a_field, cnode, right); \
+ if (rbtn_red_get(a_type, a_field, right)) { \
+ a_type *left = rbtn_left_get(a_type, a_field, cnode); \
+ if (left != NULL && rbtn_red_get(a_type, a_field, \
+ left)) { \
+ /* Split 4-node. */ \
+ rbtn_black_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, right); \
+ rbtn_red_set(a_type, a_field, cnode); \
+ } else { \
+ /* Lean left. */ \
+ a_type *tnode; \
+ bool tred = rbtn_red_get(a_type, a_field, cnode); \
+ rbtn_rotate_left(a_type, a_field, cnode, tnode); \
+ rbtn_color_set(a_type, a_field, tnode, tred); \
+ rbtn_red_set(a_type, a_field, cnode); \
+ cnode = tnode; \
+ } \
+ } else { \
+ return; \
+ } \
+ } \
+ pathp->node = cnode; \
+ } \
+ /* Set root, and make it black. */ \
+ rbtree->rbt_root = path->node; \
+ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
+} \
+a_attr void \
+a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
+ struct { \
+ a_type *node; \
+ int cmp; \
+ } *pathp, *nodep, path[sizeof(void *) << 4]; \
+ /* Wind. */ \
+ nodep = NULL; /* Silence compiler warning. */ \
+ path->node = rbtree->rbt_root; \
+ for (pathp = path; pathp->node != NULL; pathp++) { \
+ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
+ if (cmp < 0) { \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } else { \
+ pathp[1].node = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ if (cmp == 0) { \
+ /* Find node's successor, in preparation for swap. */ \
+ pathp->cmp = 1; \
+ nodep = pathp; \
+ for (pathp++; pathp->node != NULL; \
+ pathp++) { \
+ pathp->cmp = -1; \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } \
+ break; \
+ } \
+ } \
+ } \
+ assert(nodep->node == node); \
+ pathp--; \
+ if (pathp->node != node) { \
+ /* Swap node with its successor. */ \
+ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
+ rbtn_color_set(a_type, a_field, pathp->node, \
+ rbtn_red_get(a_type, a_field, node)); \
+ rbtn_left_set(a_type, a_field, pathp->node, \
+ rbtn_left_get(a_type, a_field, node)); \
+ /* If node's successor is its right child, the following code */\
+ /* will do the wrong thing for the right child pointer. */\
+ /* However, it doesn't matter, because the pointer will be */\
+ /* properly set when the successor is pruned. */\
+ rbtn_right_set(a_type, a_field, pathp->node, \
+ rbtn_right_get(a_type, a_field, node)); \
+ rbtn_color_set(a_type, a_field, node, tred); \
+ /* The pruned leaf node's child pointers are never accessed */\
+ /* again, so don't bother setting them to nil. */\
+ nodep->node = pathp->node; \
+ pathp->node = node; \
+ if (nodep == path) { \
+ rbtree->rbt_root = nodep->node; \
+ } else { \
+ if (nodep[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, nodep[-1].node, \
+ nodep->node); \
+ } else { \
+ rbtn_right_set(a_type, a_field, nodep[-1].node, \
+ nodep->node); \
+ } \
+ } \
+ } else { \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ if (left != NULL) { \
+ /* node has no successor, but it has a left child. */\
+ /* Splice node out, without losing the left child. */\
+ assert(!rbtn_red_get(a_type, a_field, node)); \
+ assert(rbtn_red_get(a_type, a_field, left)); \
+ rbtn_black_set(a_type, a_field, left); \
+ if (pathp == path) { \
+ rbtree->rbt_root = left; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ left); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ left); \
+ } \
+ } \
+ return; \
+ } else if (pathp == path) { \
+ /* The tree only contained one node. */ \
+ rbtree->rbt_root = NULL; \
+ return; \
+ } \
+ } \
+ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
+ /* Prune red node, which requires no fixup. */ \
+ assert(pathp[-1].cmp < 0); \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
+ return; \
+ } \
+ /* The node to be pruned is black, so unwind until balance is */\
+ /* restored. */\
+ pathp->node = NULL; \
+ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
+ assert(pathp->cmp != 0); \
+ if (pathp->cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp->node, \
+ pathp[1].node); \
+ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
+ a_type *right = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ a_type *rightleft = rbtn_left_get(a_type, a_field, \
+ right); \
+ a_type *tnode; \
+ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
+ rightleft)) { \
+ /* In the following diagrams, ||, //, and \\ */\
+ /* indicate the path to the removed node. */\
+ /* */\
+ /* || */\
+ /* pathp(r) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ /* */\
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ rbtn_rotate_right(a_type, a_field, right, tnode); \
+ rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ } else { \
+ /* || */\
+ /* pathp(r) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ /* */\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ } \
+ /* Balance restored, but rotation modified subtree */\
+ /* root. */\
+ assert((uintptr_t)pathp > (uintptr_t)path); \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ return; \
+ } else { \
+ a_type *right = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ a_type *rightleft = rbtn_left_get(a_type, a_field, \
+ right); \
+ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
+ rightleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, rightleft); \
+ rbtn_rotate_right(a_type, a_field, right, tnode); \
+ rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subtree root, which may actually be the tree */\
+ /* root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ a_type *tnode; \
+ rbtn_red_set(a_type, a_field, pathp->node); \
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ pathp->node = tnode; \
+ } \
+ } \
+ } else { \
+ a_type *left; \
+ rbtn_right_set(a_type, a_field, pathp->node, \
+ pathp[1].node); \
+ left = rbtn_left_get(a_type, a_field, pathp->node); \
+ if (rbtn_red_get(a_type, a_field, left)) { \
+ a_type *tnode; \
+ a_type *leftright = rbtn_right_get(a_type, a_field, \
+ left); \
+ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
+ leftright); \
+ if (leftrightleft != NULL && rbtn_red_get(a_type, \
+ a_field, leftrightleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (r) (b) */\
+ /* \ */\
+ /* (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *unode; \
+ rbtn_black_set(a_type, a_field, leftrightleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ unode); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ rbtn_right_set(a_type, a_field, unode, tnode); \
+ rbtn_rotate_left(a_type, a_field, unode, tnode); \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (r) (b) */\
+ /* \ */\
+ /* (b) */\
+ /* / */\
+ /* (b) */\
+ assert(leftright != NULL); \
+ rbtn_red_set(a_type, a_field, leftright); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ rbtn_black_set(a_type, a_field, tnode); \
+ } \
+ /* Balance restored, but rotation modified subtree */\
+ /* root, which may actually be the tree root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ } \
+ return; \
+ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
+ /* || */\
+ /* pathp(r) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ rbtn_red_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subtree root. */\
+ assert((uintptr_t)pathp > (uintptr_t)path); \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(r) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ rbtn_red_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ /* Balance restored. */ \
+ return; \
+ } \
+ } else { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
+ leftleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subtree root, which may actually be the tree */\
+ /* root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ rbtn_red_set(a_type, a_field, left); \
+ } \
+ } \
+ } \
+ } \
+ /* Set root. */ \
+ rbtree->rbt_root = path->node; \
+ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
+} \
+a_attr a_type * \
+a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
+ if (node == NULL) { \
+ return (NULL); \
+ } else { \
+ a_type *ret; \
+ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
+ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
+ arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
+ int cmp = a_cmp(start, node); \
+ if (cmp < 0) { \
+ a_type *ret; \
+ if ((ret = a_prefix##iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
+ } else if (cmp > 0) { \
+ return (a_prefix##iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)); \
+ } else { \
+ a_type *ret; \
+ if ((ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
+ a_rbt_type *, a_type *, void *), void *arg) { \
+ a_type *ret; \
+ if (start != NULL) { \
+ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
+ cb, arg); \
+ } else { \
+ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
+ } \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
+ if (node == NULL) { \
+ return (NULL); \
+ } else { \
+ a_type *ret; \
+ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
+ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
+ void *arg) { \
+ int cmp = a_cmp(start, node); \
+ if (cmp > 0) { \
+ a_type *ret; \
+ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
+ (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ } else if (cmp < 0) { \
+ return (a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ } else { \
+ a_type *ret; \
+ if ((ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
+ a_type *ret; \
+ if (start != NULL) { \
+ ret = a_prefix##reverse_iter_start(rbtree, start, \
+ rbtree->rbt_root, cb, arg); \
+ } else { \
+ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
+ cb, arg); \
+ } \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
+ a_type *, void *), void *arg) { \
+ if (node == NULL) { \
+ return; \
+ } \
+ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
+ node), cb, arg); \
+ rbtn_left_set(a_type, a_field, (node), NULL); \
+ a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
+ node), cb, arg); \
+ rbtn_right_set(a_type, a_field, (node), NULL); \
+ if (cb) { \
+ cb(node, arg); \
+ } \
+} \
+a_attr void \
+a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
+ void *arg) { \
+ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
+ rbtree->rbt_root = NULL; \
+}
+
+#endif /* RB_H_ */
diff --git a/memory/jemalloc/src/include/jemalloc/internal/rtree.h b/memory/jemalloc/src/include/jemalloc/internal/rtree.h
new file mode 100644
index 000000000..8d0c584da
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/rtree.h
@@ -0,0 +1,366 @@
+/*
+ * This radix tree implementation is tailored to the singular purpose of
+ * associating metadata with chunks that are currently owned by jemalloc.
+ *
+ *******************************************************************************
+ */
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct rtree_node_elm_s rtree_node_elm_t;
+typedef struct rtree_level_s rtree_level_t;
+typedef struct rtree_s rtree_t;
+
+/*
+ * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
+ * machine address width.
+ */
+#define LG_RTREE_BITS_PER_LEVEL 4
+#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
+/* Maximum rtree height. */
+#define RTREE_HEIGHT_MAX \
+ ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
+
+/* Used for two-stage lock-free node initialization. */
+#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
+
+/*
+ * The node allocation callback function's argument is the number of contiguous
+ * rtree_node_elm_t structures to allocate, and the resulting memory must be
+ * zeroed.
+ */
+typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
+typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct rtree_node_elm_s {
+ union {
+ void *pun;
+ rtree_node_elm_t *child;
+ extent_node_t *val;
+ };
+};
+
+struct rtree_level_s {
+ /*
+ * A non-NULL subtree points to a subtree rooted along the hypothetical
+ * path to the leaf node corresponding to key 0. Depending on what keys
+ * have been used to store to the tree, an arbitrary combination of
+ * subtree pointers may remain NULL.
+ *
+ * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
+ * This results in a 3-level tree, and the leftmost leaf can be directly
+ * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
+ * 0x00000000) can be accessed via subtrees[1], and the remainder of the
+ * tree can be accessed via subtrees[0].
+ *
+ * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
+ *
+ * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
+ *
+ * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
+ *
+ * This has practical implications on x64, which currently uses only the
+ * lower 47 bits of virtual address space in userland, thus leaving
+ * subtrees[0] unused and avoiding a level of tree traversal.
+ */
+ union {
+ void *subtree_pun;
+ rtree_node_elm_t *subtree;
+ };
+ /* Number of key bits distinguished by this level. */
+ unsigned bits;
+ /*
+ * Cumulative number of key bits distinguished by traversing to
+ * corresponding tree level.
+ */
+ unsigned cumbits;
+};
+
+struct rtree_s {
+ rtree_node_alloc_t *alloc;
+ rtree_node_dalloc_t *dalloc;
+ unsigned height;
+ /*
+ * Precomputed table used to convert from the number of leading 0 key
+ * bits to which subtree level to start at.
+ */
+ unsigned start_level[RTREE_HEIGHT_MAX];
+ rtree_level_t levels[RTREE_HEIGHT_MAX];
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc);
+void rtree_delete(rtree_t *rtree);
+rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
+ unsigned level);
+rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
+ rtree_node_elm_t *elm, unsigned level);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
+uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
+
+bool rtree_node_valid(rtree_node_elm_t *node);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
+ bool dependent);
+rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent);
+extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
+ bool dependent);
+void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
+ const extent_node_t *val);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
+ bool dependent);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
+ bool dependent);
+
+extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
+bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
+JEMALLOC_ALWAYS_INLINE unsigned
+rtree_start_level(rtree_t *rtree, uintptr_t key)
+{
+ unsigned start_level;
+
+ if (unlikely(key == 0))
+ return (rtree->height - 1);
+
+ start_level = rtree->start_level[lg_floor(key) >>
+ LG_RTREE_BITS_PER_LEVEL];
+ assert(start_level < rtree->height);
+ return (start_level);
+}
+
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
+{
+
+ return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ rtree->levels[level].cumbits)) & ((ZU(1) <<
+ rtree->levels[level].bits) - 1));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_node_valid(rtree_node_elm_t *node)
+{
+
+ return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
+{
+ rtree_node_elm_t *child;
+
+ /* Double-checked read (first read may be stale. */
+ child = elm->child;
+ if (!dependent && !rtree_node_valid(child))
+ child = atomic_read_p(&elm->pun);
+ assert(!dependent || child != NULL);
+ return (child);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
+ bool dependent)
+{
+ rtree_node_elm_t *child;
+
+ child = rtree_child_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(child)))
+ child = rtree_child_read_hard(rtree, elm, level);
+ assert(!dependent || child != NULL);
+ return (child);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_node_t *
+rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
+{
+
+ if (dependent) {
+ /*
+ * Reading a val on behalf of a pointer to a valid allocation is
+ * guaranteed to be a clean read even without synchronization,
+ * because the rtree update became visible in memory before the
+ * pointer came into existence.
+ */
+ return (elm->val);
+ } else {
+ /*
+ * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
+ * dependent on a previous rtree write, which means a stale read
+ * could result if synchronization were omitted here.
+ */
+ return (atomic_read_p(&elm->pun));
+ }
+}
+
+JEMALLOC_INLINE void
+rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
+{
+
+ atomic_write_p(&elm->pun, val);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
+{
+ rtree_node_elm_t *subtree;
+
+ /* Double-checked read (first read may be stale. */
+ subtree = rtree->levels[level].subtree;
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
+ assert(!dependent || subtree != NULL);
+ return (subtree);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
+{
+ rtree_node_elm_t *subtree;
+
+ subtree = rtree_subtree_tryread(rtree, level, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_read_hard(rtree, level);
+ assert(!dependent || subtree != NULL);
+ return (subtree);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_node_t *
+rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
+{
+ uintptr_t subkey;
+ unsigned start_level;
+ rtree_node_elm_t *node;
+
+ start_level = rtree_start_level(rtree, key);
+
+ node = rtree_subtree_tryread(rtree, start_level, dependent);
+#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
+ switch (start_level + RTREE_GET_BIAS) {
+#define RTREE_GET_SUBTREE(level) \
+ case level: \
+ assert(level < (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ node = rtree_child_tryread(&node[subkey], dependent); \
+ /* Fall through. */
+#define RTREE_GET_LEAF(level) \
+ case level: \
+ assert(level == (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ /* \
+ * node is a leaf, so it contains values rather than \
+ * child pointers. \
+ */ \
+ return (rtree_val_read(rtree, &node[subkey], \
+ dependent));
+#if RTREE_HEIGHT_MAX > 1
+ RTREE_GET_SUBTREE(0)
+#endif
+#if RTREE_HEIGHT_MAX > 2
+ RTREE_GET_SUBTREE(1)
+#endif
+#if RTREE_HEIGHT_MAX > 3
+ RTREE_GET_SUBTREE(2)
+#endif
+#if RTREE_HEIGHT_MAX > 4
+ RTREE_GET_SUBTREE(3)
+#endif
+#if RTREE_HEIGHT_MAX > 5
+ RTREE_GET_SUBTREE(4)
+#endif
+#if RTREE_HEIGHT_MAX > 6
+ RTREE_GET_SUBTREE(5)
+#endif
+#if RTREE_HEIGHT_MAX > 7
+ RTREE_GET_SUBTREE(6)
+#endif
+#if RTREE_HEIGHT_MAX > 8
+ RTREE_GET_SUBTREE(7)
+#endif
+#if RTREE_HEIGHT_MAX > 9
+ RTREE_GET_SUBTREE(8)
+#endif
+#if RTREE_HEIGHT_MAX > 10
+ RTREE_GET_SUBTREE(9)
+#endif
+#if RTREE_HEIGHT_MAX > 11
+ RTREE_GET_SUBTREE(10)
+#endif
+#if RTREE_HEIGHT_MAX > 12
+ RTREE_GET_SUBTREE(11)
+#endif
+#if RTREE_HEIGHT_MAX > 13
+ RTREE_GET_SUBTREE(12)
+#endif
+#if RTREE_HEIGHT_MAX > 14
+ RTREE_GET_SUBTREE(13)
+#endif
+#if RTREE_HEIGHT_MAX > 15
+ RTREE_GET_SUBTREE(14)
+#endif
+#if RTREE_HEIGHT_MAX > 16
+# error Unsupported RTREE_HEIGHT_MAX
+#endif
+ RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
+#undef RTREE_GET_SUBTREE
+#undef RTREE_GET_LEAF
+ default: not_reached();
+ }
+#undef RTREE_GET_BIAS
+ not_reached();
+}
+
+JEMALLOC_INLINE bool
+rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
+{
+ uintptr_t subkey;
+ unsigned i, start_level;
+ rtree_node_elm_t *node, *child;
+
+ start_level = rtree_start_level(rtree, key);
+
+ node = rtree_subtree_read(rtree, start_level, false);
+ if (node == NULL)
+ return (true);
+ for (i = start_level; /**/; i++, node = child) {
+ subkey = rtree_subkey(rtree, key, i);
+ if (i == rtree->height - 1) {
+ /*
+ * node is a leaf, so it contains values rather than
+ * child pointers.
+ */
+ rtree_val_write(rtree, &node[subkey], val);
+ return (false);
+ }
+ assert(i + 1 < rtree->height);
+ child = rtree_child_read(rtree, &node[subkey], i, false);
+ if (child == NULL)
+ return (true);
+ }
+ not_reached();
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/size_classes.sh b/memory/jemalloc/src/include/jemalloc/internal/size_classes.sh
new file mode 100755
index 000000000..f6fbce4ef
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/size_classes.sh
@@ -0,0 +1,318 @@
+#!/bin/sh
+#
+# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
+
+# The following limits are chosen such that they cover all supported platforms.
+
+# Pointer sizes.
+lg_zarr="2 3"
+
+# Quanta.
+lg_qarr=$1
+
+# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
+lg_tmin=$2
+
+# Maximum lookup size.
+lg_kmax=12
+
+# Page sizes.
+lg_parr=`echo $3 | tr ',' ' '`
+
+# Size class group size (number of size classes for each size doubling).
+lg_g=$4
+
+pow2() {
+ e=$1
+ pow2_result=1
+ while [ ${e} -gt 0 ] ; do
+ pow2_result=$((${pow2_result} + ${pow2_result}))
+ e=$((${e} - 1))
+ done
+}
+
+lg() {
+ x=$1
+ lg_result=0
+ while [ ${x} -gt 1 ] ; do
+ lg_result=$((${lg_result} + 1))
+ x=$((${x} / 2))
+ done
+}
+
+size_class() {
+ index=$1
+ lg_grp=$2
+ lg_delta=$3
+ ndelta=$4
+ lg_p=$5
+ lg_kmax=$6
+
+ if [ ${lg_delta} -ge ${lg_p} ] ; then
+ psz="yes"
+ else
+ pow2 ${lg_p}; p=${pow2_result}
+ pow2 ${lg_grp}; grp=${pow2_result}
+ pow2 ${lg_delta}; delta=${pow2_result}
+ sz=$((${grp} + ${delta} * ${ndelta}))
+ npgs=$((${sz} / ${p}))
+ if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
+ psz="yes"
+ else
+ psz="no"
+ fi
+ fi
+
+ lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
+ if [ ${pow2_result} -lt ${ndelta} ] ; then
+ rem="yes"
+ else
+ rem="no"
+ fi
+
+ lg_size=${lg_grp}
+ if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
+ lg_size=$((${lg_grp} + 1))
+ else
+ lg_size=${lg_grp}
+ rem="yes"
+ fi
+
+ if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
+ bin="yes"
+ else
+ bin="no"
+ fi
+ if [ ${lg_size} -lt ${lg_kmax} \
+ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
+ lg_delta_lookup=${lg_delta}
+ else
+ lg_delta_lookup="no"
+ fi
+ printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
+ # Defined upon return:
+ # - psz ("yes" or "no")
+ # - bin ("yes" or "no")
+ # - lg_delta_lookup (${lg_delta} or "no")
+}
+
+sep_line() {
+ echo " \\"
+}
+
+size_classes() {
+ lg_z=$1
+ lg_q=$2
+ lg_t=$3
+ lg_p=$4
+ lg_g=$5
+
+ pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
+ pow2 ${lg_g}; g=${pow2_result}
+
+ echo "#define SIZE_CLASSES \\"
+ echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
+
+ ntbins=0
+ nlbins=0
+ lg_tiny_maxclass='"NA"'
+ nbins=0
+ npsizes=0
+
+ # Tiny size classes.
+ ndelta=0
+ index=0
+ lg_grp=${lg_t}
+ lg_delta=${lg_grp}
+ while [ ${lg_grp} -lt ${lg_q} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ if [ ${lg_delta_lookup} != "no" ] ; then
+ nlbins=$((${index} + 1))
+ fi
+ if [ ${psz} = "yes" ] ; then
+ npsizes=$((${npsizes} + 1))
+ fi
+ if [ ${bin} != "no" ] ; then
+ nbins=$((${index} + 1))
+ fi
+ ntbins=$((${ntbins} + 1))
+ lg_tiny_maxclass=${lg_grp} # Final written value is correct.
+ index=$((${index} + 1))
+ lg_delta=${lg_grp}
+ lg_grp=$((${lg_grp} + 1))
+ done
+
+ # First non-tiny group.
+ if [ ${ntbins} -gt 0 ] ; then
+ sep_line
+ # The first size class has an unusual encoding, because the size has to be
+ # split between grp and delta*ndelta.
+ lg_grp=$((${lg_grp} - 1))
+ ndelta=1
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ index=$((${index} + 1))
+ lg_grp=$((${lg_grp} + 1))
+ lg_delta=$((${lg_delta} + 1))
+ if [ ${psz} = "yes" ] ; then
+ npsizes=$((${npsizes} + 1))
+ fi
+ fi
+ while [ ${ndelta} -lt ${g} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ index=$((${index} + 1))
+ ndelta=$((${ndelta} + 1))
+ if [ ${psz} = "yes" ] ; then
+ npsizes=$((${npsizes} + 1))
+ fi
+ done
+
+ # All remaining groups.
+ lg_grp=$((${lg_grp} + ${lg_g}))
+ while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
+ sep_line
+ ndelta=1
+ if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
+ ndelta_limit=$((${g} - 1))
+ else
+ ndelta_limit=${g}
+ fi
+ while [ ${ndelta} -le ${ndelta_limit} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ if [ ${lg_delta_lookup} != "no" ] ; then
+ nlbins=$((${index} + 1))
+ # Final written value is correct:
+ lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ fi
+ if [ ${psz} = "yes" ] ; then
+ npsizes=$((${npsizes} + 1))
+ fi
+ if [ ${bin} != "no" ] ; then
+ nbins=$((${index} + 1))
+ # Final written value is correct:
+ small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ if [ ${lg_g} -gt 0 ] ; then
+ lg_large_minclass=$((${lg_grp} + 1))
+ else
+ lg_large_minclass=$((${lg_grp} + 2))
+ fi
+ fi
+ # Final written value is correct:
+ huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ index=$((${index} + 1))
+ ndelta=$((${ndelta} + 1))
+ done
+ lg_grp=$((${lg_grp} + 1))
+ lg_delta=$((${lg_delta} + 1))
+ done
+ echo
+ nsizes=${index}
+
+ # Defined upon completion:
+ # - ntbins
+ # - nlbins
+ # - nbins
+ # - nsizes
+ # - npsizes
+ # - lg_tiny_maxclass
+ # - lookup_maxclass
+ # - small_maxclass
+ # - lg_large_minclass
+ # - huge_maxclass
+}
+
+cat <<EOF
+/* This file was automatically generated by size_classes.sh. */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
+ * be defined prior to inclusion, and it in turn defines:
+ *
+ * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
+ * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
+ * bin, lg_delta_lookup) tuples.
+ * index: Size class index.
+ * lg_grp: Lg group base size (no deltas added).
+ * lg_delta: Lg delta to previous size class.
+ * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
+ * psz: 'yes' if a multiple of the page size, 'no' otherwise.
+ * bin: 'yes' if a small bin size class, 'no' otherwise.
+ * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
+ * otherwise.
+ * NTBINS: Number of tiny bins.
+ * NLBINS: Number of bins supported by the lookup table.
+ * NBINS: Number of small size class bins.
+ * NSIZES: Number of size classes.
+ * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
+ * LG_TINY_MAXCLASS: Lg of maximum tiny size class.
+ * LOOKUP_MAXCLASS: Maximum size class included in lookup table.
+ * SMALL_MAXCLASS: Maximum small size class.
+ * LG_LARGE_MINCLASS: Lg of minimum large size class.
+ * HUGE_MAXCLASS: Maximum (huge) size class.
+ */
+
+#define LG_SIZE_CLASS_GROUP ${lg_g}
+
+EOF
+
+for lg_z in ${lg_zarr} ; do
+ for lg_q in ${lg_qarr} ; do
+ lg_t=${lg_tmin}
+ while [ ${lg_t} -le ${lg_q} ] ; do
+ # Iterate through page sizes and compute how many bins there are.
+ for lg_p in ${lg_parr} ; do
+ echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
+ size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
+ echo "#define SIZE_CLASSES_DEFINED"
+ echo "#define NTBINS ${ntbins}"
+ echo "#define NLBINS ${nlbins}"
+ echo "#define NBINS ${nbins}"
+ echo "#define NSIZES ${nsizes}"
+ echo "#define NPSIZES ${npsizes}"
+ echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
+ echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
+ echo "#define SMALL_MAXCLASS ${small_maxclass}"
+ echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
+ echo "#define HUGE_MAXCLASS ${huge_maxclass}"
+ echo "#endif"
+ echo
+ done
+ lg_t=$((${lg_t} + 1))
+ done
+ done
+done
+
+cat <<EOF
+#ifndef SIZE_CLASSES_DEFINED
+# error "No size class definitions match configuration"
+#endif
+#undef SIZE_CLASSES_DEFINED
+/*
+ * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
+ * cannot support more than 256 small size classes. Further constrain NBINS to
+ * 255 since all small size classes, plus a "not small" size class must be
+ * stored in 8 bits of arena_chunk_map_bits_t's bits field.
+ */
+#if (NBINS > 255)
+# error "Too many small size classes"
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+EOF
diff --git a/memory/jemalloc/src/include/jemalloc/internal/smoothstep.h b/memory/jemalloc/src/include/jemalloc/internal/smoothstep.h
new file mode 100644
index 000000000..c5333ccad
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/smoothstep.h
@@ -0,0 +1,246 @@
+/*
+ * This file was generated by the following command:
+ * sh smoothstep.sh smoother 200 24 3 15
+ */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ * 3 2
+ * smoothstep(x) = -2x + 3x
+ *
+ * 5 4 3
+ * smootherstep(x) = 6x - 15x + 10x
+ *
+ * 7 6 5 4
+ * smootheststep(x) = -20x + 70x - 84x + 35x
+ */
+
+#define SMOOTHSTEP_VARIANT "smoother"
+#define SMOOTHSTEP_NSTEPS 200
+#define SMOOTHSTEP_BFP 24
+#define SMOOTHSTEP \
+ /* STEP(step, h, x, y) */ \
+ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
+ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
+ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
+ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
+ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
+ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
+ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
+ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
+ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
+ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
+ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
+ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
+ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
+ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
+ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
+ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
+ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
+ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
+ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
+ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
+ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
+ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
+ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
+ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
+ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
+ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
+ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
+ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
+ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
+ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
+ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
+ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
+ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
+ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
+ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
+ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
+ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
+ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
+ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
+ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
+ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
+ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
+ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
+ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
+ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
+ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
+ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
+ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
+ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
+ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
+ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
+ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
+ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
+ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
+ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
+ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
+ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
+ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
+ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
+ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
+ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
+ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
+ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
+ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
+ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
+ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
+ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
+ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
+ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
+ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
+ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
+ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
+ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
+ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
+ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
+ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
+ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
+ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
+ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
+ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
+ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
+ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
+ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
+ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
+ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
+ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
+ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
+ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
+ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
+ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
+ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
+ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
+ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
+ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
+ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
+ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
+ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
+ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
+ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
+ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
+ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
+ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
+ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
+ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
+ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
+ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
+ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
+ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
+ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
+ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
+ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
+ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
+ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
+ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
+ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
+ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
+ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
+ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
+ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
+ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
+ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
+ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
+ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
+ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
+ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
+ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
+ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
+ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
+ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
+ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
+ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
+ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
+ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
+ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
+ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
+ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
+ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
+ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
+ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
+ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
+ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
+ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
+ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
+ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
+ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
+ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
+ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
+ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
+ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
+ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
+ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
+ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
+ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
+ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
+ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
+ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
+ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
+ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
+ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
+ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
+ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
+ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
+ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
+ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
+ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
+ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
+ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
+ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
+ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
+ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
+ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
+ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
+ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
+ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
+ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
+ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
+ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
+ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
+ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
+ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
+ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
+ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
+ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
+ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
+ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
+ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
+ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
+ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
+ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
+ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
+ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
+ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
+ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
+ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
+ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
+ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
+ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
+ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
+ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
+ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/smoothstep.sh b/memory/jemalloc/src/include/jemalloc/internal/smoothstep.sh
new file mode 100755
index 000000000..8124693f7
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/smoothstep.sh
@@ -0,0 +1,115 @@
+#!/bin/sh
+#
+# Generate a discrete lookup table for a sigmoid function in the smoothstep
+# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
+# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
+# the entries using a binary fixed point representation.
+#
+# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
+#
+# <variant> is in {smooth, smoother, smoothest}.
+# <nsteps> must be greater than zero.
+# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
+# <xprec> is x decimal precision.
+# <yprec> is y decimal precision.
+
+#set -x
+
+cmd="sh smoothstep.sh $*"
+variant=$1
+nsteps=$2
+bfp=$3
+xprec=$4
+yprec=$5
+
+case "${variant}" in
+ smooth)
+ ;;
+ smoother)
+ ;;
+ smoothest)
+ ;;
+ *)
+ echo "Unsupported variant"
+ exit 1
+ ;;
+esac
+
+smooth() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoother() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoothest() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+cat <<EOF
+/*
+ * This file was generated by the following command:
+ * $cmd
+ */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ * 3 2
+ * smoothstep(x) = -2x + 3x
+ *
+ * 5 4 3
+ * smootherstep(x) = 6x - 15x + 10x
+ *
+ * 7 6 5 4
+ * smootheststep(x) = -20x + 70x - 84x + 35x
+ */
+
+#define SMOOTHSTEP_VARIANT "${variant}"
+#define SMOOTHSTEP_NSTEPS ${nsteps}
+#define SMOOTHSTEP_BFP ${bfp}
+#define SMOOTHSTEP \\
+ /* STEP(step, h, x, y) */ \\
+EOF
+
+s=1
+while [ $s -le $nsteps ] ; do
+ $variant ${s}
+ x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
+
+ s=$((s+1))
+done
+echo
+
+cat <<EOF
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+EOF
diff --git a/memory/jemalloc/src/include/jemalloc/internal/spin.h b/memory/jemalloc/src/include/jemalloc/internal/spin.h
new file mode 100644
index 000000000..9ef5ceb92
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/spin.h
@@ -0,0 +1,51 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct spin_s spin_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct spin_s {
+ unsigned iteration;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void spin_init(spin_t *spin);
+void spin_adaptive(spin_t *spin);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
+JEMALLOC_INLINE void
+spin_init(spin_t *spin)
+{
+
+ spin->iteration = 0;
+}
+
+JEMALLOC_INLINE void
+spin_adaptive(spin_t *spin)
+{
+ volatile uint64_t i;
+
+ for (i = 0; i < (KQU(1) << spin->iteration); i++)
+ CPU_SPINWAIT;
+
+ if (spin->iteration < 63)
+ spin->iteration++;
+}
+
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/stats.h b/memory/jemalloc/src/include/jemalloc/internal/stats.h
new file mode 100644
index 000000000..b62181783
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/stats.h
@@ -0,0 +1,201 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct tcache_bin_stats_s tcache_bin_stats_t;
+typedef struct malloc_bin_stats_s malloc_bin_stats_t;
+typedef struct malloc_large_stats_s malloc_large_stats_t;
+typedef struct malloc_huge_stats_s malloc_huge_stats_t;
+typedef struct arena_stats_s arena_stats_t;
+typedef struct chunk_stats_s chunk_stats_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct tcache_bin_stats_s {
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
+};
+
+struct malloc_bin_stats_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the bin. Note that tcache may allocate an object, then recycle it
+ * many times, resulting many increments to nrequests, but only one
+ * each to nmalloc and ndalloc.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to the size of this
+ * bin. This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ uint64_t nrequests;
+
+ /*
+ * Current number of regions of this size class, including regions
+ * currently cached by tcache.
+ */
+ size_t curregs;
+
+ /* Number of tcache fills from this bin. */
+ uint64_t nfills;
+
+ /* Number of tcache flushes to this bin. */
+ uint64_t nflushes;
+
+ /* Total number of runs created for this bin's size class. */
+ uint64_t nruns;
+
+ /*
+ * Total number of runs reused by extracting them from the runs tree for
+ * this bin's size class.
+ */
+ uint64_t reruns;
+
+ /* Current number of runs in this bin. */
+ size_t curruns;
+};
+
+struct malloc_large_stats_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the arena. Note that tcache may allocate an object, then recycle it
+ * many times, resulting many increments to nrequests, but only one
+ * each to nmalloc and ndalloc.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to this size class.
+ * This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ uint64_t nrequests;
+
+ /*
+ * Current number of runs of this size class, including runs currently
+ * cached by tcache.
+ */
+ size_t curruns;
+};
+
+struct malloc_huge_stats_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the arena.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /* Current number of (multi-)chunk allocations of this size class. */
+ size_t curhchunks;
+};
+
+struct arena_stats_s {
+ /* Number of bytes currently mapped. */
+ size_t mapped;
+
+ /*
+ * Number of bytes currently retained as a side effect of munmap() being
+ * disabled/bypassed. Retained bytes are technically mapped (though
+ * always decommitted or purged), but they are excluded from the mapped
+ * statistic (above).
+ */
+ size_t retained;
+
+ /*
+ * Total number of purge sweeps, total number of madvise calls made,
+ * and total pages purged in order to keep dirty unused memory under
+ * control.
+ */
+ uint64_t npurge;
+ uint64_t nmadvise;
+ uint64_t purged;
+
+ /*
+ * Number of bytes currently mapped purely for metadata purposes, and
+ * number of bytes currently allocated for internal metadata.
+ */
+ size_t metadata_mapped;
+ size_t metadata_allocated; /* Protected via atomic_*_z(). */
+
+ /* Per-size-category statistics. */
+ size_t allocated_large;
+ uint64_t nmalloc_large;
+ uint64_t ndalloc_large;
+ uint64_t nrequests_large;
+
+ size_t allocated_huge;
+ uint64_t nmalloc_huge;
+ uint64_t ndalloc_huge;
+
+ /* One element for each large size class. */
+ malloc_large_stats_t *lstats;
+
+ /* One element for each huge size class. */
+ malloc_huge_stats_t *hstats;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool opt_stats_print;
+
+extern size_t stats_cactive;
+
+void stats_print(void (*write)(void *, const char *), void *cbopaque,
+ const char *opts);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+size_t stats_cactive_get(void);
+void stats_cactive_add(size_t size);
+void stats_cactive_sub(size_t size);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
+JEMALLOC_INLINE size_t
+stats_cactive_get(void)
+{
+
+ return (atomic_read_z(&stats_cactive));
+}
+
+JEMALLOC_INLINE void
+stats_cactive_add(size_t size)
+{
+ UNUSED size_t cactive;
+
+ assert(size > 0);
+ assert((size & chunksize_mask) == 0);
+
+ cactive = atomic_add_z(&stats_cactive, size);
+ assert(cactive - size < cactive);
+}
+
+JEMALLOC_INLINE void
+stats_cactive_sub(size_t size)
+{
+ UNUSED size_t cactive;
+
+ assert(size > 0);
+ assert((size & chunksize_mask) == 0);
+
+ cactive = atomic_sub_z(&stats_cactive, size);
+ assert(cactive + size > cactive);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/tcache.h b/memory/jemalloc/src/include/jemalloc/internal/tcache.h
new file mode 100644
index 000000000..01ba062de
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/tcache.h
@@ -0,0 +1,469 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct tcache_bin_info_s tcache_bin_info_t;
+typedef struct tcache_bin_s tcache_bin_t;
+typedef struct tcache_s tcache_t;
+typedef struct tcaches_s tcaches_t;
+
+/*
+ * tcache pointers close to NULL are used to encode state information that is
+ * used for two purposes: preventing thread caching on a per thread basis and
+ * cleaning up during thread shutdown.
+ */
+#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
+#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
+#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
+#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
+
+/*
+ * Absolute minimum number of cache slots for each small bin.
+ */
+#define TCACHE_NSLOTS_SMALL_MIN 20
+
+/*
+ * Absolute maximum number of cache slots for each small bin in the thread
+ * cache. This is an additional constraint beyond that imposed as: twice the
+ * number of regions per run for this size class.
+ *
+ * This constant must be an even number.
+ */
+#define TCACHE_NSLOTS_SMALL_MAX 200
+
+/* Number of cache slots for large size classes. */
+#define TCACHE_NSLOTS_LARGE 20
+
+/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
+#define LG_TCACHE_MAXCLASS_DEFAULT 15
+
+/*
+ * TCACHE_GC_SWEEP is the approximate number of allocation events between
+ * full GC sweeps. Integer rounding may cause the actual number to be
+ * slightly higher, since GC is performed incrementally.
+ */
+#define TCACHE_GC_SWEEP 8192
+
+/* Number of tcache allocation/deallocation events between incremental GCs. */
+#define TCACHE_GC_INCR \
+ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+typedef enum {
+ tcache_enabled_false = 0, /* Enable cast to/from bool. */
+ tcache_enabled_true = 1,
+ tcache_enabled_default = 2
+} tcache_enabled_t;
+
+/*
+ * Read-only information associated with each element of tcache_t's tbins array
+ * is stored separately, mainly to reduce memory usage.
+ */
+struct tcache_bin_info_s {
+ unsigned ncached_max; /* Upper limit on ncached. */
+};
+
+struct tcache_bin_s {
+ tcache_bin_stats_t tstats;
+ int low_water; /* Min # cached since last GC. */
+ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
+ unsigned ncached; /* # of cached objects. */
+ /*
+ * To make use of adjacent cacheline prefetch, the items in the avail
+ * stack goes to higher address for newer allocations. avail points
+ * just above the available space, which means that
+ * avail[-ncached, ... -1] are available items and the lowest item will
+ * be allocated first.
+ */
+ void **avail; /* Stack of available objects. */
+};
+
+struct tcache_s {
+ ql_elm(tcache_t) link; /* Used for aggregating stats. */
+ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
+ ticker_t gc_ticker; /* Drives incremental GC. */
+ szind_t next_gc_bin; /* Next bin to GC. */
+ tcache_bin_t tbins[1]; /* Dynamically sized. */
+ /*
+ * The pointer stacks associated with tbins follow as a contiguous
+ * array. During tcache initialization, the avail pointer in each
+ * element of tbins is initialized to point to the proper offset within
+ * this array.
+ */
+};
+
+/* Linkage for list of available (previously used) explicit tcache IDs. */
+struct tcaches_s {
+ union {
+ tcache_t *tcache;
+ tcaches_t *next;
+ };
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool opt_tcache;
+extern ssize_t opt_lg_tcache_max;
+
+extern tcache_bin_info_t *tcache_bin_info;
+
+/*
+ * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
+ * large-object bins.
+ */
+extern unsigned nhbins;
+
+/* Maximum cached size class. */
+extern size_t tcache_maxclass;
+
+/*
+ * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
+ * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
+ * completely disjoint from this data structure. tcaches starts off as a sparse
+ * array, so it has no physical memory footprint until individual pages are
+ * touched. This allows the entire array to be allocated the first time an
+ * explicit tcache is created without a disproportionate impact on memory usage.
+ */
+extern tcaches_t *tcaches;
+
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+ arena_t *oldarena, arena_t *newarena);
+tcache_t *tcache_get_hard(tsd_t *tsd);
+tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_enabled_cleanup(tsd_t *tsd);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void tcache_event(tsd_t *tsd, tcache_t *tcache);
+void tcache_flush(void);
+bool tcache_enabled_get(void);
+tcache_t *tcache_get(tsd_t *tsd, bool create);
+void tcache_enabled_set(bool enabled);
+void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
+void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ size_t size, szind_t ind, bool zero, bool slow_path);
+void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ size_t size, szind_t ind, bool zero, bool slow_path);
+void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
+ szind_t binind, bool slow_path);
+void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
+ size_t size, bool slow_path);
+tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
+JEMALLOC_INLINE void
+tcache_flush(void)
+{
+ tsd_t *tsd;
+
+ cassert(config_tcache);
+
+ tsd = tsd_fetch();
+ tcache_cleanup(tsd);
+}
+
+JEMALLOC_INLINE bool
+tcache_enabled_get(void)
+{
+ tsd_t *tsd;
+ tcache_enabled_t tcache_enabled;
+
+ cassert(config_tcache);
+
+ tsd = tsd_fetch();
+ tcache_enabled = tsd_tcache_enabled_get(tsd);
+ if (tcache_enabled == tcache_enabled_default) {
+ tcache_enabled = (tcache_enabled_t)opt_tcache;
+ tsd_tcache_enabled_set(tsd, tcache_enabled);
+ }
+
+ return ((bool)tcache_enabled);
+}
+
+JEMALLOC_INLINE void
+tcache_enabled_set(bool enabled)
+{
+ tsd_t *tsd;
+ tcache_enabled_t tcache_enabled;
+
+ cassert(config_tcache);
+
+ tsd = tsd_fetch();
+
+ tcache_enabled = (tcache_enabled_t)enabled;
+ tsd_tcache_enabled_set(tsd, tcache_enabled);
+
+ if (!enabled)
+ tcache_cleanup(tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get(tsd_t *tsd, bool create)
+{
+ tcache_t *tcache;
+
+ if (!config_tcache)
+ return (NULL);
+
+ tcache = tsd_tcache_get(tsd);
+ if (!create)
+ return (tcache);
+ if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
+ tcache = tcache_get_hard(tsd);
+ tsd_tcache_set(tsd, tcache);
+ }
+
+ return (tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_event(tsd_t *tsd, tcache_t *tcache)
+{
+
+ if (TCACHE_GC_INCR == 0)
+ return;
+
+ if (unlikely(ticker_tick(&tcache->gc_ticker)))
+ tcache_event_hard(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
+{
+ void *ret;
+
+ if (unlikely(tbin->ncached == 0)) {
+ tbin->low_water = -1;
+ *tcache_success = false;
+ return (NULL);
+ }
+ /*
+ * tcache_success (instead of ret) should be checked upon the return of
+ * this function. We avoid checking (ret == NULL) because there is
+ * never a null stored on the avail stack (which is unknown to the
+ * compiler), and eagerly checking ret would cause pipeline stall
+ * (waiting for the cacheline).
+ */
+ *tcache_success = true;
+ ret = *(tbin->avail - tbin->ncached);
+ tbin->ncached--;
+
+ if (unlikely((int)tbin->ncached < tbin->low_water))
+ tbin->low_water = tbin->ncached;
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ szind_t binind, bool zero, bool slow_path)
+{
+ void *ret;
+ tcache_bin_t *tbin;
+ bool tcache_success;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(binind < NBINS);
+ tbin = &tcache->tbins[binind];
+ ret = tcache_alloc_easy(tbin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ bool tcache_hard_success;
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
+ tbin, binind, &tcache_hard_success);
+ if (tcache_hard_success == false)
+ return (NULL);
+ }
+
+ assert(ret);
+ /*
+ * Only compute usize if required. The checks in the following if
+ * statement are all static.
+ */
+ if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
+ usize = index2size(binind);
+ assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
+ }
+
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ } else {
+ if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ memset(ret, 0, usize);
+ }
+
+ if (config_stats)
+ tbin->tstats.nrequests++;
+ if (config_prof)
+ tcache->prof_accumbytes += usize;
+ tcache_event(tsd, tcache);
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ szind_t binind, bool zero, bool slow_path)
+{
+ void *ret;
+ tcache_bin_t *tbin;
+ bool tcache_success;
+
+ assert(binind < nhbins);
+ tbin = &tcache->tbins[binind];
+ ret = tcache_alloc_easy(tbin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ /*
+ * Only allocate one large object at a time, because it's quite
+ * expensive to create one and not use it.
+ */
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
+ if (ret == NULL)
+ return (NULL);
+ } else {
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ /* Only compute usize on demand */
+ if (config_prof || (slow_path && config_fill) ||
+ unlikely(zero)) {
+ usize = index2size(binind);
+ assert(usize <= tcache_maxclass);
+ }
+
+ if (config_prof && usize == LARGE_MINCLASS) {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+ size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
+ LG_PAGE);
+ arena_mapbits_large_binind_set(chunk, pageind,
+ BININD_INVALID);
+ }
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ memset(ret, JEMALLOC_ALLOC_JUNK,
+ usize);
+ } else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ } else
+ memset(ret, 0, usize);
+
+ if (config_stats)
+ tbin->tstats.nrequests++;
+ if (config_prof)
+ tcache->prof_accumbytes += usize;
+ }
+
+ tcache_event(tsd, tcache);
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+ bool slow_path)
+{
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
+
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free))
+ arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
+
+ tbin = &tcache->tbins[binind];
+ tbin_info = &tcache_bin_info[binind];
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ (tbin_info->ncached_max >> 1));
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->ncached++;
+ *(tbin->avail - tbin->ncached) = ptr;
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
+ bool slow_path)
+{
+ szind_t binind;
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
+
+ assert((size & PAGE_MASK) == 0);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
+
+ binind = size2index(size);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free))
+ arena_dalloc_junk_large(ptr, size);
+
+ tbin = &tcache->tbins[binind];
+ tbin_info = &tcache_bin_info[binind];
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_large(tsd, tbin, binind,
+ (tbin_info->ncached_max >> 1), tcache);
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->ncached++;
+ *(tbin->avail - tbin->ncached) = ptr;
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcaches_get(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ if (unlikely(elm->tcache == NULL)) {
+ elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
+ NULL));
+ }
+ return (elm->tcache);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/ticker.h b/memory/jemalloc/src/include/jemalloc/internal/ticker.h
new file mode 100644
index 000000000..4696e56d2
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/ticker.h
@@ -0,0 +1,75 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ticker_s ticker_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct ticker_s {
+ int32_t tick;
+ int32_t nticks;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void ticker_init(ticker_t *ticker, int32_t nticks);
+void ticker_copy(ticker_t *ticker, const ticker_t *other);
+int32_t ticker_read(const ticker_t *ticker);
+bool ticker_ticks(ticker_t *ticker, int32_t nticks);
+bool ticker_tick(ticker_t *ticker);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
+JEMALLOC_INLINE void
+ticker_init(ticker_t *ticker, int32_t nticks)
+{
+
+ ticker->tick = nticks;
+ ticker->nticks = nticks;
+}
+
+JEMALLOC_INLINE void
+ticker_copy(ticker_t *ticker, const ticker_t *other)
+{
+
+ *ticker = *other;
+}
+
+JEMALLOC_INLINE int32_t
+ticker_read(const ticker_t *ticker)
+{
+
+ return (ticker->tick);
+}
+
+JEMALLOC_INLINE bool
+ticker_ticks(ticker_t *ticker, int32_t nticks)
+{
+
+ if (unlikely(ticker->tick < nticks)) {
+ ticker->tick = ticker->nticks;
+ return (true);
+ }
+ ticker->tick -= nticks;
+ return(false);
+}
+
+JEMALLOC_INLINE bool
+ticker_tick(ticker_t *ticker)
+{
+
+ return (ticker_ticks(ticker, 1));
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/tsd.h b/memory/jemalloc/src/include/jemalloc/internal/tsd.h
new file mode 100644
index 000000000..9055acafd
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/tsd.h
@@ -0,0 +1,787 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Maximum number of malloc_tsd users with cleanup functions. */
+#define MALLOC_TSD_CLEANUPS_MAX 2
+
+typedef bool (*malloc_tsd_cleanup_t)(void);
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+typedef struct tsd_init_block_s tsd_init_block_t;
+typedef struct tsd_init_head_s tsd_init_head_t;
+#endif
+
+typedef struct tsd_s tsd_t;
+typedef struct tsdn_s tsdn_t;
+
+#define TSDN_NULL ((tsdn_t *)0)
+
+typedef enum {
+ tsd_state_uninitialized,
+ tsd_state_nominal,
+ tsd_state_purgatory,
+ tsd_state_reincarnated
+} tsd_state_t;
+
+/*
+ * TLS/TSD-agnostic macro-based implementation of thread-specific data. There
+ * are five macros that support (at least) three use cases: file-private,
+ * library-private, and library-private inlined. Following is an example
+ * library-private tsd variable:
+ *
+ * In example.h:
+ * typedef struct {
+ * int x;
+ * int y;
+ * } example_t;
+ * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
+ * malloc_tsd_types(example_, example_t)
+ * malloc_tsd_protos(, example_, example_t)
+ * malloc_tsd_externs(example_, example_t)
+ * In example.c:
+ * malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
+ * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
+ * example_tsd_cleanup)
+ *
+ * The result is a set of generated functions, e.g.:
+ *
+ * bool example_tsd_boot(void) {...}
+ * bool example_tsd_booted_get(void) {...}
+ * example_t *example_tsd_get(bool init) {...}
+ * void example_tsd_set(example_t *val) {...}
+ *
+ * Note that all of the functions deal in terms of (a_type *) rather than
+ * (a_type) so that it is possible to support non-pointer types (unlike
+ * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
+ * cast to (void *). This means that the cleanup function needs to cast the
+ * function argument to (a_type *), then dereference the resulting pointer to
+ * access fields, e.g.
+ *
+ * void
+ * example_tsd_cleanup(void *arg)
+ * {
+ * example_t *example = (example_t *)arg;
+ *
+ * example->x = 42;
+ * [...]
+ * if ([want the cleanup function to be called again])
+ * example_tsd_set(example);
+ * }
+ *
+ * If example_tsd_set() is called within example_tsd_cleanup(), it will be
+ * called again. This is similar to how pthreads TSD destruction works, except
+ * that pthreads only calls the cleanup function again if the value was set to
+ * non-NULL.
+ */
+
+/* malloc_tsd_types(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define malloc_tsd_types(a_name, a_type)
+#elif (defined(JEMALLOC_TLS))
+#define malloc_tsd_types(a_name, a_type)
+#elif (defined(_WIN32))
+#define malloc_tsd_types(a_name, a_type) \
+typedef struct { \
+ bool initialized; \
+ a_type val; \
+} a_name##tsd_wrapper_t;
+#else
+#define malloc_tsd_types(a_name, a_type) \
+typedef struct { \
+ bool initialized; \
+ a_type val; \
+} a_name##tsd_wrapper_t;
+#endif
+
+/* malloc_tsd_protos(). */
+#define malloc_tsd_protos(a_attr, a_name, a_type) \
+a_attr bool \
+a_name##tsd_boot0(void); \
+a_attr void \
+a_name##tsd_boot1(void); \
+a_attr bool \
+a_name##tsd_boot(void); \
+a_attr bool \
+a_name##tsd_booted_get(void); \
+a_attr a_type * \
+a_name##tsd_get(bool init); \
+a_attr void \
+a_name##tsd_set(a_type *val);
+
+/* malloc_tsd_externs(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define malloc_tsd_externs(a_name, a_type) \
+extern __thread a_type a_name##tsd_tls; \
+extern __thread bool a_name##tsd_initialized; \
+extern bool a_name##tsd_booted;
+#elif (defined(JEMALLOC_TLS))
+#define malloc_tsd_externs(a_name, a_type) \
+extern __thread a_type a_name##tsd_tls; \
+extern pthread_key_t a_name##tsd_tsd; \
+extern bool a_name##tsd_booted;
+#elif (defined(_WIN32))
+#define malloc_tsd_externs(a_name, a_type) \
+extern DWORD a_name##tsd_tsd; \
+extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
+extern bool a_name##tsd_booted;
+#else
+#define malloc_tsd_externs(a_name, a_type) \
+extern pthread_key_t a_name##tsd_tsd; \
+extern tsd_init_head_t a_name##tsd_init_head; \
+extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
+extern bool a_name##tsd_booted;
+#endif
+
+/* malloc_tsd_data(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
+a_attr __thread a_type JEMALLOC_TLS_MODEL \
+ a_name##tsd_tls = a_initializer; \
+a_attr __thread bool JEMALLOC_TLS_MODEL \
+ a_name##tsd_initialized = false; \
+a_attr bool a_name##tsd_booted = false;
+#elif (defined(JEMALLOC_TLS))
+#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
+a_attr __thread a_type JEMALLOC_TLS_MODEL \
+ a_name##tsd_tls = a_initializer; \
+a_attr pthread_key_t a_name##tsd_tsd; \
+a_attr bool a_name##tsd_booted = false;
+#elif (defined(_WIN32))
+#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
+a_attr DWORD a_name##tsd_tsd; \
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
+ false, \
+ a_initializer \
+}; \
+a_attr bool a_name##tsd_booted = false;
+#else
+#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
+a_attr pthread_key_t a_name##tsd_tsd; \
+a_attr tsd_init_head_t a_name##tsd_init_head = { \
+ ql_head_initializer(blocks), \
+ MALLOC_MUTEX_INITIALIZER \
+}; \
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
+ false, \
+ a_initializer \
+}; \
+a_attr bool a_name##tsd_booted = false;
+#endif
+
+/* malloc_tsd_funcs(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
+ a_cleanup) \
+/* Initialization/cleanup. */ \
+a_attr bool \
+a_name##tsd_cleanup_wrapper(void) \
+{ \
+ \
+ if (a_name##tsd_initialized) { \
+ a_name##tsd_initialized = false; \
+ a_cleanup(&a_name##tsd_tls); \
+ } \
+ return (a_name##tsd_initialized); \
+} \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ malloc_tsd_cleanup_register( \
+ &a_name##tsd_cleanup_wrapper); \
+ } \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ \
+ /* Do nothing. */ \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ return (a_name##tsd_boot0()); \
+} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
+a_attr bool \
+a_name##tsd_get_allocates(void) \
+{ \
+ \
+ return (false); \
+} \
+/* Get/set. */ \
+a_attr a_type * \
+a_name##tsd_get(bool init) \
+{ \
+ \
+ assert(a_name##tsd_booted); \
+ return (&a_name##tsd_tls); \
+} \
+a_attr void \
+a_name##tsd_set(a_type *val) \
+{ \
+ \
+ assert(a_name##tsd_booted); \
+ a_name##tsd_tls = (*val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ a_name##tsd_initialized = true; \
+}
+#elif (defined(JEMALLOC_TLS))
+#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
+ a_cleanup) \
+/* Initialization/cleanup. */ \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
+ 0) \
+ return (true); \
+ } \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ \
+ /* Do nothing. */ \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ return (a_name##tsd_boot0()); \
+} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
+a_attr bool \
+a_name##tsd_get_allocates(void) \
+{ \
+ \
+ return (false); \
+} \
+/* Get/set. */ \
+a_attr a_type * \
+a_name##tsd_get(bool init) \
+{ \
+ \
+ assert(a_name##tsd_booted); \
+ return (&a_name##tsd_tls); \
+} \
+a_attr void \
+a_name##tsd_set(a_type *val) \
+{ \
+ \
+ assert(a_name##tsd_booted); \
+ a_name##tsd_tls = (*val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ if (pthread_setspecific(a_name##tsd_tsd, \
+ (void *)(&a_name##tsd_tls))) { \
+ malloc_write("<jemalloc>: Error" \
+ " setting TSD for "#a_name"\n"); \
+ if (opt_abort) \
+ abort(); \
+ } \
+ } \
+}
+#elif (defined(_WIN32))
+#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
+ a_cleanup) \
+/* Initialization/cleanup. */ \
+a_attr bool \
+a_name##tsd_cleanup_wrapper(void) \
+{ \
+ DWORD error = GetLastError(); \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ TlsGetValue(a_name##tsd_tsd); \
+ SetLastError(error); \
+ \
+ if (wrapper == NULL) \
+ return (false); \
+ if (a_cleanup != malloc_tsd_no_cleanup && \
+ wrapper->initialized) { \
+ wrapper->initialized = false; \
+ a_cleanup(&wrapper->val); \
+ if (wrapper->initialized) { \
+ /* Trigger another cleanup round. */ \
+ return (true); \
+ } \
+ } \
+ malloc_tsd_dalloc(wrapper); \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
+{ \
+ \
+ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
+ malloc_write("<jemalloc>: Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+} \
+a_attr a_name##tsd_wrapper_t * \
+a_name##tsd_wrapper_get(bool init) \
+{ \
+ DWORD error = GetLastError(); \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ TlsGetValue(a_name##tsd_tsd); \
+ SetLastError(error); \
+ \
+ if (init && unlikely(wrapper == NULL)) { \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } else { \
+ wrapper->initialized = false; \
+ wrapper->val = a_initializer; \
+ } \
+ a_name##tsd_wrapper_set(wrapper); \
+ } \
+ return (wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ a_name##tsd_tsd = TlsAlloc(); \
+ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
+ return (true); \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ malloc_tsd_cleanup_register( \
+ &a_name##tsd_cleanup_wrapper); \
+ } \
+ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
+ sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_wrapper_set(wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ if (a_name##tsd_boot0()) \
+ return (true); \
+ a_name##tsd_boot1(); \
+ return (false); \
+} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
+a_attr bool \
+a_name##tsd_get_allocates(void) \
+{ \
+ \
+ return (true); \
+} \
+/* Get/set. */ \
+a_attr a_type * \
+a_name##tsd_get(bool init) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(init); \
+ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
+ return (NULL); \
+ return (&wrapper->val); \
+} \
+a_attr void \
+a_name##tsd_set(a_type *val) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper->val = *(val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ wrapper->initialized = true; \
+}
+#else
+#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
+ a_cleanup) \
+/* Initialization/cleanup. */ \
+a_attr void \
+a_name##tsd_cleanup_wrapper(void *arg) \
+{ \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup && \
+ wrapper->initialized) { \
+ wrapper->initialized = false; \
+ a_cleanup(&wrapper->val); \
+ if (wrapper->initialized) { \
+ /* Trigger another cleanup round. */ \
+ if (pthread_setspecific(a_name##tsd_tsd, \
+ (void *)wrapper)) { \
+ malloc_write("<jemalloc>: Error" \
+ " setting TSD for "#a_name"\n"); \
+ if (opt_abort) \
+ abort(); \
+ } \
+ return; \
+ } \
+ } \
+ malloc_tsd_dalloc(wrapper); \
+} \
+a_attr void \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
+{ \
+ \
+ if (pthread_setspecific(a_name##tsd_tsd, \
+ (void *)wrapper)) { \
+ malloc_write("<jemalloc>: Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+} \
+a_attr a_name##tsd_wrapper_t * \
+a_name##tsd_wrapper_get(bool init) \
+{ \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ pthread_getspecific(a_name##tsd_tsd); \
+ \
+ if (init && unlikely(wrapper == NULL)) { \
+ tsd_init_block_t block; \
+ wrapper = tsd_init_check_recursion( \
+ &a_name##tsd_init_head, &block); \
+ if (wrapper) \
+ return (wrapper); \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ block.data = wrapper; \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } else { \
+ wrapper->initialized = false; \
+ wrapper->val = a_initializer; \
+ } \
+ a_name##tsd_wrapper_set(wrapper); \
+ tsd_init_finish(&a_name##tsd_init_head, &block); \
+ } \
+ return (wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ if (pthread_key_create(&a_name##tsd_tsd, \
+ a_name##tsd_cleanup_wrapper) != 0) \
+ return (true); \
+ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
+ sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_wrapper_set(wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ if (a_name##tsd_boot0()) \
+ return (true); \
+ a_name##tsd_boot1(); \
+ return (false); \
+} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
+a_attr bool \
+a_name##tsd_get_allocates(void) \
+{ \
+ \
+ return (true); \
+} \
+/* Get/set. */ \
+a_attr a_type * \
+a_name##tsd_get(bool init) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(init); \
+ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
+ return (NULL); \
+ return (&wrapper->val); \
+} \
+a_attr void \
+a_name##tsd_set(a_type *val) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper->val = *(val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ wrapper->initialized = true; \
+}
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+#endif
+
+#define MALLOC_TSD \
+/* O(name, type) */ \
+ O(tcache, tcache_t *) \
+ O(thread_allocated, uint64_t) \
+ O(thread_deallocated, uint64_t) \
+ O(prof_tdata, prof_tdata_t *) \
+ O(iarena, arena_t *) \
+ O(arena, arena_t *) \
+ O(arenas_tdata, arena_tdata_t *) \
+ O(narenas_tdata, unsigned) \
+ O(arenas_tdata_bypass, bool) \
+ O(tcache_enabled, tcache_enabled_t) \
+ O(quarantine, quarantine_t *) \
+ O(witnesses, witness_list_t) \
+ O(witness_fork, bool) \
+
+#define TSD_INITIALIZER { \
+ tsd_state_uninitialized, \
+ NULL, \
+ 0, \
+ 0, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ 0, \
+ false, \
+ tcache_enabled_default, \
+ NULL, \
+ ql_head_initializer(witnesses), \
+ false \
+}
+
+struct tsd_s {
+ tsd_state_t state;
+#define O(n, t) \
+ t n;
+MALLOC_TSD
+#undef O
+};
+
+/*
+ * Wrapper around tsd_t that makes it possible to avoid implicit conversion
+ * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
+ * explicitly converted to tsd_t, which is non-nullable.
+ */
+struct tsdn_s {
+ tsd_t tsd;
+};
+
+static const tsd_t tsd_initializer = TSD_INITIALIZER;
+
+malloc_tsd_types(, tsd_t)
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *malloc_tsd_malloc(size_t size);
+void malloc_tsd_dalloc(void *wrapper);
+void malloc_tsd_no_cleanup(void *arg);
+void malloc_tsd_cleanup_register(bool (*f)(void));
+tsd_t *malloc_tsd_boot0(void);
+void malloc_tsd_boot1(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+#endif
+void tsd_cleanup(void *arg);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
+
+tsd_t *tsd_fetch_impl(bool init);
+tsd_t *tsd_fetch(void);
+tsdn_t *tsd_tsdn(tsd_t *tsd);
+bool tsd_nominal(tsd_t *tsd);
+#define O(n, t) \
+t *tsd_##n##p_get(tsd_t *tsd); \
+t tsd_##n##_get(tsd_t *tsd); \
+void tsd_##n##_set(tsd_t *tsd, t n);
+MALLOC_TSD
+#undef O
+tsdn_t *tsdn_fetch(void);
+bool tsdn_null(const tsdn_t *tsdn);
+tsd_t *tsdn_tsd(tsdn_t *tsdn);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
+malloc_tsd_externs(, tsd_t)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch_impl(bool init)
+{
+ tsd_t *tsd = tsd_get(init);
+
+ if (!init && tsd_get_allocates() && tsd == NULL)
+ return (NULL);
+ assert(tsd != NULL);
+
+ if (unlikely(tsd->state != tsd_state_nominal)) {
+ if (tsd->state == tsd_state_uninitialized) {
+ tsd->state = tsd_state_nominal;
+ /* Trigger cleanup handler registration. */
+ tsd_set(tsd);
+ } else if (tsd->state == tsd_state_purgatory) {
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ } else
+ assert(tsd->state == tsd_state_reincarnated);
+ }
+
+ return (tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch(void)
+{
+
+ return (tsd_fetch_impl(true));
+}
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsd_tsdn(tsd_t *tsd)
+{
+
+ return ((tsdn_t *)tsd);
+}
+
+JEMALLOC_INLINE bool
+tsd_nominal(tsd_t *tsd)
+{
+
+ return (tsd->state == tsd_state_nominal);
+}
+
+#define O(n, t) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get(tsd_t *tsd) \
+{ \
+ \
+ return (&tsd->n); \
+} \
+ \
+JEMALLOC_ALWAYS_INLINE t \
+tsd_##n##_get(tsd_t *tsd) \
+{ \
+ \
+ return (*tsd_##n##p_get(tsd)); \
+} \
+ \
+JEMALLOC_ALWAYS_INLINE void \
+tsd_##n##_set(tsd_t *tsd, t n) \
+{ \
+ \
+ assert(tsd->state == tsd_state_nominal); \
+ tsd->n = n; \
+}
+MALLOC_TSD
+#undef O
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsdn_fetch(void)
+{
+
+ if (!tsd_booted_get())
+ return (NULL);
+
+ return (tsd_tsdn(tsd_fetch_impl(false)));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsdn_null(const tsdn_t *tsdn)
+{
+
+ return (tsdn == NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsdn_tsd(tsdn_t *tsdn)
+{
+
+ assert(!tsdn_null(tsdn));
+
+ return (&tsdn->tsd);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/util.h b/memory/jemalloc/src/include/jemalloc/internal/util.h
new file mode 100644
index 000000000..aee00d6d9
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/util.h
@@ -0,0 +1,338 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#ifdef _WIN32
+# ifdef _WIN64
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX "ll"
+# else
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX ""
+# endif
+# define FMTd32 "d"
+# define FMTu32 "u"
+# define FMTx32 "x"
+# define FMTd64 FMT64_PREFIX "d"
+# define FMTu64 FMT64_PREFIX "u"
+# define FMTx64 FMT64_PREFIX "x"
+# define FMTdPTR FMTPTR_PREFIX "d"
+# define FMTuPTR FMTPTR_PREFIX "u"
+# define FMTxPTR FMTPTR_PREFIX "x"
+#else
+# include <inttypes.h>
+# define FMTd32 PRId32
+# define FMTu32 PRIu32
+# define FMTx32 PRIx32
+# define FMTd64 PRId64
+# define FMTu64 PRIu64
+# define FMTx64 PRIx64
+# define FMTdPTR PRIdPTR
+# define FMTuPTR PRIuPTR
+# define FMTxPTR PRIxPTR
+#endif
+
+/* Size of stack-allocated buffer passed to buferror(). */
+#define BUFERROR_BUF 64
+
+/*
+ * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
+ * large enough for all possible uses within jemalloc.
+ */
+#define MALLOC_PRINTF_BUFSIZE 4096
+
+/* Junk fill patterns. */
+#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
+#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
+
+/*
+ * Wrap a cpp argument that contains commas such that it isn't broken up into
+ * multiple arguments.
+ */
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
+
+/*
+ * Silence compiler warnings due to uninitialized values. This is used
+ * wherever the compiler fails to recognize that the variable is never used
+ * uninitialized.
+ */
+#ifdef JEMALLOC_CC_SILENCE
+# define JEMALLOC_CC_SILENCE_INIT(v) = v
+#else
+# define JEMALLOC_CC_SILENCE_INIT(v)
+#endif
+
+#ifdef __GNUC__
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#else
+# define likely(x) !!(x)
+# define unlikely(x) !!(x)
+#endif
+
+#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
+# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
+#endif
+
+#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
+
+#include "jemalloc/internal/assert.h"
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define cassert(c) do { \
+ if (unlikely(!(c))) \
+ not_reached(); \
+} while (0)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr,
+ char **restrict endptr, int base);
+void malloc_write(const char *s);
+
+/*
+ * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
+ * point math.
+ */
+size_t malloc_vsnprintf(char *str, size_t size, const char *format,
+ va_list ap);
+size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
+ JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap);
+void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
+ const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+unsigned ffs_llu(unsigned long long bitmap);
+unsigned ffs_lu(unsigned long bitmap);
+unsigned ffs_u(unsigned bitmap);
+unsigned ffs_zu(size_t bitmap);
+unsigned ffs_u64(uint64_t bitmap);
+unsigned ffs_u32(uint32_t bitmap);
+uint64_t pow2_ceil_u64(uint64_t x);
+uint32_t pow2_ceil_u32(uint32_t x);
+size_t pow2_ceil_zu(size_t x);
+unsigned lg_floor(size_t x);
+void set_errno(int errnum);
+int get_errno(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
+
+/* Sanity check. */
+#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
+ || !defined(JEMALLOC_INTERNAL_FFS)
+# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
+#endif
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_llu(unsigned long long bitmap)
+{
+
+ return (JEMALLOC_INTERNAL_FFSLL(bitmap));
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_lu(unsigned long bitmap)
+{
+
+ return (JEMALLOC_INTERNAL_FFSL(bitmap));
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u(unsigned bitmap)
+{
+
+ return (JEMALLOC_INTERNAL_FFS(bitmap));
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_zu(size_t bitmap)
+{
+
+#if LG_SIZEOF_PTR == LG_SIZEOF_INT
+ return (ffs_u(bitmap));
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
+ return (ffs_lu(bitmap));
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
+ return (ffs_llu(bitmap));
+#else
+#error No implementation for size_t ffs()
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u64(uint64_t bitmap)
+{
+
+#if LG_SIZEOF_LONG == 3
+ return (ffs_lu(bitmap));
+#elif LG_SIZEOF_LONG_LONG == 3
+ return (ffs_llu(bitmap));
+#else
+#error No implementation for 64-bit ffs()
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+ffs_u32(uint32_t bitmap)
+{
+
+#if LG_SIZEOF_INT == 2
+ return (ffs_u(bitmap));
+#else
+#error No implementation for 32-bit ffs()
+#endif
+ return (ffs_u(bitmap));
+}
+
+JEMALLOC_INLINE uint64_t
+pow2_ceil_u64(uint64_t x)
+{
+
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ x++;
+ return (x);
+}
+
+JEMALLOC_INLINE uint32_t
+pow2_ceil_u32(uint32_t x)
+{
+
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+ return (x);
+}
+
+/* Compute the smallest power of 2 that is >= x. */
+JEMALLOC_INLINE size_t
+pow2_ceil_zu(size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return (pow2_ceil_u64(x));
+#else
+ return (pow2_ceil_u32(x));
+#endif
+}
+
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE unsigned
+lg_floor(size_t x)
+{
+ size_t ret;
+
+ assert(x != 0);
+
+ asm ("bsr %1, %0"
+ : "=r"(ret) // Outputs.
+ : "r"(x) // Inputs.
+ );
+ assert(ret < UINT_MAX);
+ return ((unsigned)ret);
+}
+#elif (defined(_MSC_VER))
+JEMALLOC_INLINE unsigned
+lg_floor(size_t x)
+{
+ unsigned long ret;
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == 3)
+ _BitScanReverse64(&ret, x);
+#elif (LG_SIZEOF_PTR == 2)
+ _BitScanReverse(&ret, x);
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+ assert(ret < UINT_MAX);
+ return ((unsigned)ret);
+}
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+JEMALLOC_INLINE unsigned
+lg_floor(size_t x)
+{
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
+ return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
+#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+ return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+}
+#else
+JEMALLOC_INLINE unsigned
+lg_floor(size_t x)
+{
+
+ assert(x != 0);
+
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
+#if (LG_SIZEOF_PTR == 3)
+ x |= (x >> 32);
+#endif
+ if (x == SIZE_T_MAX)
+ return ((8 << LG_SIZEOF_PTR) - 1);
+ x++;
+ return (ffs_zu(x) - 2);
+}
+#endif
+
+/* Set error code. */
+JEMALLOC_INLINE void
+set_errno(int errnum)
+{
+
+#ifdef _WIN32
+ SetLastError(errnum);
+#else
+ errno = errnum;
+#endif
+}
+
+/* Get last error code. */
+JEMALLOC_INLINE int
+get_errno(void)
+{
+
+#ifdef _WIN32
+ return (GetLastError());
+#else
+ return (errno);
+#endif
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/internal/valgrind.h b/memory/jemalloc/src/include/jemalloc/internal/valgrind.h
new file mode 100644
index 000000000..1a8680828
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/valgrind.h
@@ -0,0 +1,114 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#ifdef JEMALLOC_VALGRIND
+#include <valgrind/valgrind.h>
+
+/*
+ * The size that is reported to Valgrind must be consistent through a chain of
+ * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
+ * jemalloc, so it is critical that all callers of these macros provide usize
+ * rather than request size. As a result, buffer overflow detection is
+ * technically weakened for the standard API, though it is generally accepted
+ * practice to consider any extra bytes reported by malloc_usable_size() as
+ * usable space.
+ */
+#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_noaccess(ptr, usize); \
+} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_undefined(ptr, usize); \
+} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_defined(ptr, usize); \
+} while (0)
+/*
+ * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
+ * calls must be embedded in macros rather than in functions so that when
+ * Valgrind reports errors, there are no extra stack frames in the backtraces.
+ */
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) { \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
+ zero); \
+ } \
+} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do { \
+ if (unlikely(in_valgrind)) { \
+ size_t rzsize = p2rz(tsdn, ptr); \
+ \
+ if (!maybe_moved || ptr == old_ptr) { \
+ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
+ usize, rzsize); \
+ if (zero && old_usize < usize) { \
+ valgrind_make_mem_defined( \
+ (void *)((uintptr_t)ptr + \
+ old_usize), usize - old_usize); \
+ } \
+ } else { \
+ if (!old_ptr_maybe_null || old_ptr != NULL) { \
+ valgrind_freelike_block(old_ptr, \
+ old_rzsize); \
+ } \
+ if (!ptr_maybe_null || ptr != NULL) { \
+ size_t copy_size = (old_usize < usize) \
+ ? old_usize : usize; \
+ size_t tail_size = usize - copy_size; \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
+ rzsize, false); \
+ if (copy_size > 0) { \
+ valgrind_make_mem_defined(ptr, \
+ copy_size); \
+ } \
+ if (zero && tail_size > 0) { \
+ valgrind_make_mem_defined( \
+ (void *)((uintptr_t)ptr + \
+ copy_size), tail_size); \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_freelike_block(ptr, rzsize); \
+} while (0)
+#else
+#define RUNNING_ON_VALGRIND ((unsigned)0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do {} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#ifdef JEMALLOC_VALGRIND
+void valgrind_make_mem_noaccess(void *ptr, size_t usize);
+void valgrind_make_mem_undefined(void *ptr, size_t usize);
+void valgrind_make_mem_defined(void *ptr, size_t usize);
+void valgrind_freelike_block(void *ptr, size_t usize);
+#endif
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/memory/jemalloc/src/include/jemalloc/internal/witness.h b/memory/jemalloc/src/include/jemalloc/internal/witness.h
new file mode 100644
index 000000000..cdf15d797
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/internal/witness.h
@@ -0,0 +1,266 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct witness_s witness_t;
+typedef unsigned witness_rank_t;
+typedef ql_head(witness_t) witness_list_t;
+typedef int witness_comp_t (const witness_t *, const witness_t *);
+
+/*
+ * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
+ * the witness machinery.
+ */
+#define WITNESS_RANK_OMIT 0U
+
+#define WITNESS_RANK_INIT 1U
+#define WITNESS_RANK_CTL 1U
+#define WITNESS_RANK_ARENAS 2U
+
+#define WITNESS_RANK_PROF_DUMP 3U
+#define WITNESS_RANK_PROF_BT2GCTX 4U
+#define WITNESS_RANK_PROF_TDATAS 5U
+#define WITNESS_RANK_PROF_TDATA 6U
+#define WITNESS_RANK_PROF_GCTX 7U
+
+#define WITNESS_RANK_ARENA 8U
+#define WITNESS_RANK_ARENA_CHUNKS 9U
+#define WITNESS_RANK_ARENA_NODE_CACHE 10
+
+#define WITNESS_RANK_BASE 11U
+
+#define WITNESS_RANK_LEAF 0xffffffffU
+#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
+#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
+#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
+
+#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct witness_s {
+ /* Name, used for printing lock order reversal messages. */
+ const char *name;
+
+ /*
+ * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
+ * must be acquired in order of increasing rank.
+ */
+ witness_rank_t rank;
+
+ /*
+ * If two witnesses are of equal rank and they have the samp comp
+ * function pointer, it is called as a last attempt to differentiate
+ * between witnesses of equal rank.
+ */
+ witness_comp_t *comp;
+
+ /* Linkage for thread's currently owned locks. */
+ ql_elm(witness_t) link;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp);
+#ifdef JEMALLOC_JET
+typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
+extern witness_lock_error_t *witness_lock_error;
+#else
+void witness_lock_error(const witness_list_t *witnesses,
+ const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_owner_error_t)(const witness_t *);
+extern witness_owner_error_t *witness_owner_error;
+#else
+void witness_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_not_owner_error_t)(const witness_t *);
+extern witness_not_owner_error_t *witness_not_owner_error;
+#else
+void witness_not_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_lockless_error_t)(const witness_list_t *);
+extern witness_lockless_error_t *witness_lockless_error;
+#else
+void witness_lockless_error(const witness_list_t *witnesses);
+#endif
+
+void witnesses_cleanup(tsd_t *tsd);
+void witness_fork_cleanup(tsd_t *tsd);
+void witness_prefork(tsd_t *tsd);
+void witness_postfork_parent(tsd_t *tsd);
+void witness_postfork_child(tsd_t *tsd);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+bool witness_owner(tsd_t *tsd, const witness_t *witness);
+void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_lockless(tsdn_t *tsdn);
+void witness_lock(tsdn_t *tsdn, witness_t *witness);
+void witness_unlock(tsdn_t *tsdn, witness_t *witness);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
+JEMALLOC_INLINE bool
+witness_owner(tsd_t *tsd, const witness_t *witness)
+{
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ return (true);
+ }
+
+ return (false);
+}
+
+JEMALLOC_INLINE void
+witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ if (witness_owner(tsd, witness))
+ return;
+ witness_owner_error(witness);
+}
+
+JEMALLOC_INLINE void
+witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ witness_not_owner_error(witness);
+ }
+}
+
+JEMALLOC_INLINE void
+witness_assert_lockless(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w != NULL)
+ witness_lockless_error(witnesses);
+}
+
+JEMALLOC_INLINE void
+witness_lock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witness_assert_not_owner(tsdn, witness);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w == NULL) {
+ /* No other locks; do nothing. */
+ } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
+ /* Forking, and relaxed ranking satisfied. */
+ } else if (w->rank > witness->rank) {
+ /* Not forking, rank order reversal. */
+ witness_lock_error(witnesses, witness);
+ } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
+ witness->comp || w->comp(w, witness) > 0)) {
+ /*
+ * Missing/incompatible comparison function, or comparison
+ * function indicates rank order reversal.
+ */
+ witness_lock_error(witnesses, witness);
+ }
+
+ ql_elm_new(witness, link);
+ ql_tail_insert(witnesses, witness, link);
+}
+
+JEMALLOC_INLINE void
+witness_unlock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ /*
+ * Check whether owner before removal, rather than relying on
+ * witness_assert_owner() to abort, so that unit tests can test this
+ * function's failure mode without causing undefined behavior.
+ */
+ if (witness_owner(tsd, witness)) {
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_remove(witnesses, witness, link);
+ } else
+ witness_assert_owner(tsdn, witness);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc.sh b/memory/jemalloc/src/include/jemalloc/jemalloc.sh
new file mode 100755
index 000000000..c085814f2
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+objroot=$1
+
+cat <<EOF
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+
+for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
+ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
+ cat "${objroot}include/jemalloc/${hdr}" \
+ | grep -v 'Generated from .* by configure\.' \
+ | sed -e 's/^#define /#define /g' \
+ | sed -e 's/ $//g'
+ echo
+done
+
+cat <<EOF
+#ifdef __cplusplus
+}
+#endif
+#endif /* JEMALLOC_H_ */
+EOF
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_defs.h.in b/memory/jemalloc/src/include/jemalloc/jemalloc_defs.h.in
new file mode 100644
index 000000000..6d89435c2
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_defs.h.in
@@ -0,0 +1,45 @@
+/* Defined if __attribute__((...)) syntax is supported. */
+#undef JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE_MEMALIGN
+#undef JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#undef JEMALLOC_USABLE_SIZE_CONST
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+#undef JEMALLOC_USE_CXX_THROW
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#undef LG_SIZEOF_PTR
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in b/memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in
new file mode 100644
index 000000000..129240ed9
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in
@@ -0,0 +1,103 @@
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "@jemalloc_version@"
+#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
+#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
+#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
+#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
+#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
+
+# define MALLOCX_LG_ALIGN(la) ((int)(la))
+# if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+# else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+# endif
+# define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if _MSC_VER
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#endif
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_mangle.sh b/memory/jemalloc/src/include/jemalloc/jemalloc_mangle.sh
new file mode 100755
index 000000000..df328b78d
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_mangle.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+public_symbols_txt=$1
+symbol_prefix=$2
+
+cat <<EOF
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# define ${n} ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+
+/*
+ * The ${symbol_prefix}* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# undef ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in b/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in
new file mode 100644
index 000000000..a78414b19
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in
@@ -0,0 +1,66 @@
+/*
+ * The @je_@ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
+ */
+extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
+extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *@je_@cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_rename.sh b/memory/jemalloc/src/include/jemalloc/jemalloc_rename.sh
new file mode 100755
index 000000000..f94389120
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_rename.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+public_symbols_txt=$1
+
+cat <<EOF
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
+ echo "# define je_${n} ${m}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_typedefs.h.in b/memory/jemalloc/src/include/jemalloc/jemalloc_typedefs.h.in
new file mode 100644
index 000000000..fa7b350ad
--- /dev/null
+++ b/memory/jemalloc/src/include/jemalloc/jemalloc_typedefs.h.in
@@ -0,0 +1,57 @@
+/*
+ * void *
+ * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ * bool *commit, unsigned arena_ind);
+ */
+typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
+
+/*
+ * bool
+ * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
+
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;
diff --git a/memory/jemalloc/src/include/msvc_compat/C99/stdbool.h b/memory/jemalloc/src/include/msvc_compat/C99/stdbool.h
new file mode 100644
index 000000000..d92160ebc
--- /dev/null
+++ b/memory/jemalloc/src/include/msvc_compat/C99/stdbool.h
@@ -0,0 +1,20 @@
+#ifndef stdbool_h
+#define stdbool_h
+
+#include <wtypes.h>
+
+/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
+/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
+/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
+ * a built-in type. */
+#ifndef __clang__
+typedef BOOL _Bool;
+#endif
+
+#define bool _Bool
+#define true 1
+#define false 0
+
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool_h */
diff --git a/memory/jemalloc/src/include/msvc_compat/C99/stdint.h b/memory/jemalloc/src/include/msvc_compat/C99/stdint.h
new file mode 100644
index 000000000..d02608a59
--- /dev/null
+++ b/memory/jemalloc/src/include/msvc_compat/C99/stdint.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef signed __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef signed __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 signed int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/memory/jemalloc/src/include/msvc_compat/strings.h b/memory/jemalloc/src/include/msvc_compat/strings.h
new file mode 100644
index 000000000..a3ee25063
--- /dev/null
+++ b/memory/jemalloc/src/include/msvc_compat/strings.h
@@ -0,0 +1,59 @@
+#ifndef strings_h
+#define strings_h
+
+/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
+ * for both */
+#ifdef _MSC_VER
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward)
+static __forceinline int ffsl(long x)
+{
+ unsigned long i;
+
+ if (_BitScanForward(&i, x))
+ return (i + 1);
+ return (0);
+}
+
+static __forceinline int ffs(int x)
+{
+
+ return (ffsl(x));
+}
+
+# ifdef _M_X64
+# pragma intrinsic(_BitScanForward64)
+# endif
+
+static __forceinline int ffsll(unsigned __int64 x)
+{
+ unsigned long i;
+#ifdef _M_X64
+ if (_BitScanForward64(&i, x))
+ return (i + 1);
+ return (0);
+#else
+// Fallback for 32-bit build where 64-bit version not available
+// assuming little endian
+ union {
+ unsigned __int64 ll;
+ unsigned long l[2];
+ } s;
+
+ s.ll = x;
+
+ if (_BitScanForward(&i, s.l[0]))
+ return (i + 1);
+ else if(_BitScanForward(&i, s.l[1]))
+ return (i + 33);
+ return (0);
+#endif
+}
+
+#else
+# define ffsll(x) __builtin_ffsll(x)
+# define ffsl(x) __builtin_ffsl(x)
+# define ffs(x) __builtin_ffs(x)
+#endif
+
+#endif /* strings_h */
diff --git a/memory/jemalloc/src/include/msvc_compat/windows_extra.h b/memory/jemalloc/src/include/msvc_compat/windows_extra.h
new file mode 100644
index 000000000..3008faa37
--- /dev/null
+++ b/memory/jemalloc/src/include/msvc_compat/windows_extra.h
@@ -0,0 +1,6 @@
+#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
+#define MSVC_COMPAT_WINDOWS_EXTRA_H
+
+#include <errno.h>
+
+#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/memory/jemalloc/src/jemalloc.pc.in b/memory/jemalloc/src/jemalloc.pc.in
new file mode 100644
index 000000000..a318e8dd3
--- /dev/null
+++ b/memory/jemalloc/src/jemalloc.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+install_suffix=@install_suffix@
+
+Name: jemalloc
+Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
+URL: http://jemalloc.net/
+Version: @jemalloc_version@
+Cflags: -I${includedir}
+Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/memory/jemalloc/src/msvc/ReadMe.txt b/memory/jemalloc/src/msvc/ReadMe.txt
new file mode 100644
index 000000000..77d567da0
--- /dev/null
+++ b/memory/jemalloc/src/msvc/ReadMe.txt
@@ -0,0 +1,24 @@
+
+How to build jemalloc for Windows
+=================================
+
+1. Install Cygwin with at least the following packages:
+ * autoconf
+ * autogen
+ * gawk
+ * grep
+ * sed
+
+2. Install Visual Studio 2015 with Visual C++
+
+3. Add Cygwin\bin to the PATH environment variable
+
+4. Open "VS2015 x86 Native Tools Command Prompt"
+ (note: x86/x64 doesn't matter at this point)
+
+5. Generate header files:
+ sh -c "CC=cl ./autogen.sh"
+
+6. Now the project can be opened and built in Visual Studio:
+ msvc\jemalloc_vc2015.sln
+
diff --git a/memory/jemalloc/src/msvc/jemalloc_vc2015.sln b/memory/jemalloc/src/msvc/jemalloc_vc2015.sln
new file mode 100644
index 000000000..aedd5e5ea
--- /dev/null
+++ b/memory/jemalloc/src/msvc/jemalloc_vc2015.sln
@@ -0,0 +1,63 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.24720.0
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
+ ProjectSection(SolutionItems) = preProject
+ ReadMe.txt = ReadMe.txt
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Debug-static|x64 = Debug-static|x64
+ Debug-static|x86 = Debug-static|x86
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ Release-static|x64 = Release-static|x64
+ Release-static|x86 = Release-static|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
+ {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
+ {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
new file mode 100644
index 000000000..8342ab3ab
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
@@ -0,0 +1,402 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug-static|Win32">
+ <Configuration>Debug-static</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug-static|x64">
+ <Configuration>Debug-static</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release-static|Win32">
+ <Configuration>Release-static</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release-static|x64">
+ <Configuration>Release-static</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
+ <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
+ <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
+ <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
+ <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\..\src\arena.c" />
+ <ClCompile Include="..\..\..\..\src\atomic.c" />
+ <ClCompile Include="..\..\..\..\src\base.c" />
+ <ClCompile Include="..\..\..\..\src\bitmap.c" />
+ <ClCompile Include="..\..\..\..\src\chunk.c" />
+ <ClCompile Include="..\..\..\..\src\chunk_dss.c" />
+ <ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
+ <ClCompile Include="..\..\..\..\src\ckh.c" />
+ <ClCompile Include="..\..\..\..\src\ctl.c" />
+ <ClCompile Include="..\..\..\..\src\extent.c" />
+ <ClCompile Include="..\..\..\..\src\hash.c" />
+ <ClCompile Include="..\..\..\..\src\huge.c" />
+ <ClCompile Include="..\..\..\..\src\jemalloc.c" />
+ <ClCompile Include="..\..\..\..\src\mb.c" />
+ <ClCompile Include="..\..\..\..\src\mutex.c" />
+ <ClCompile Include="..\..\..\..\src\nstime.c" />
+ <ClCompile Include="..\..\..\..\src\pages.c" />
+ <ClCompile Include="..\..\..\..\src\prng.c" />
+ <ClCompile Include="..\..\..\..\src\prof.c" />
+ <ClCompile Include="..\..\..\..\src\quarantine.c" />
+ <ClCompile Include="..\..\..\..\src\rtree.c" />
+ <ClCompile Include="..\..\..\..\src\spin.c" />
+ <ClCompile Include="..\..\..\..\src\stats.c" />
+ <ClCompile Include="..\..\..\..\src\tcache.c" />
+ <ClCompile Include="..\..\..\..\src\ticker.c" />
+ <ClCompile Include="..\..\..\..\src\tsd.c" />
+ <ClCompile Include="..\..\..\..\src\util.c" />
+ <ClCompile Include="..\..\..\..\src\witness.c" />
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>jemalloc</RootNamespace>
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="Shared">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)d</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)d</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <DebugInformationFormat>OldStyle</DebugInformationFormat>
+ <MinimalRebuild>false</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
+ <DebugInformationFormat>OldStyle</DebugInformationFormat>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
new file mode 100644
index 000000000..37f0f02ae
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
@@ -0,0 +1,272 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="Header Files\internal">
+ <UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Header Files\msvc_compat">
+ <UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Header Files\msvc_compat\C99">
+ <UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
+ <Filter>Header Files\internal</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
+ <Filter>Header Files\msvc_compat</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
+ <Filter>Header Files\msvc_compat</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
+ <Filter>Header Files\msvc_compat\C99</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
+ <Filter>Header Files\msvc_compat\C99</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\..\..\src\arena.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\atomic.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\base.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\bitmap.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\chunk.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\chunk_dss.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\chunk_mmap.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\ckh.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\ctl.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\extent.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\hash.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\huge.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\jemalloc.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\mb.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\mutex.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\nstime.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\pages.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\prng.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\prof.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\quarantine.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\rtree.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\spin.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\stats.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\tcache.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\ticker.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\tsd.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\util.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\..\..\src\witness.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+</Project>
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.cpp b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.cpp
new file mode 100644
index 000000000..c8cb7d66a
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.cpp
@@ -0,0 +1,89 @@
+// jemalloc C++ threaded test
+// Author: Rustam Abdullaev
+// Public Domain
+
+#include <atomic>
+#include <functional>
+#include <future>
+#include <random>
+#include <thread>
+#include <vector>
+#include <stdio.h>
+#include <jemalloc/jemalloc.h>
+
+using std::vector;
+using std::thread;
+using std::uniform_int_distribution;
+using std::minstd_rand;
+
+int test_threads()
+{
+ je_malloc_conf = "narenas:3";
+ int narenas = 0;
+ size_t sz = sizeof(narenas);
+ je_mallctl("opt.narenas", &narenas, &sz, NULL, 0);
+ if (narenas != 3) {
+ printf("Error: unexpected number of arenas: %d\n", narenas);
+ return 1;
+ }
+ static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
+ static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
+ vector<thread> workers;
+ static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
+ je_malloc_stats_print(NULL, NULL, NULL);
+ size_t allocated1;
+ size_t sz1 = sizeof(allocated1);
+ je_mallctl("stats.active", &allocated1, &sz1, NULL, 0);
+ printf("\nPress Enter to start threads...\n");
+ getchar();
+ printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
+ for (int i = 0; i < numThreads; i++) {
+ workers.emplace_back([tid=i]() {
+ uniform_int_distribution<int> sizeDist(0, numSizes - 1);
+ minstd_rand rnd(tid * 17);
+ uint8_t* ptrs[numAllocsMax];
+ int ptrsz[numAllocsMax];
+ for (int i = 0; i < numIter1; ++i) {
+ thread t([&]() {
+ for (int i = 0; i < numIter2; ++i) {
+ const int numAllocs = numAllocsMax - sizeDist(rnd);
+ for (int j = 0; j < numAllocs; j += 64) {
+ const int x = sizeDist(rnd);
+ const int sz = sizes[x];
+ ptrsz[j] = sz;
+ ptrs[j] = (uint8_t*)je_malloc(sz);
+ if (!ptrs[j]) {
+ printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
+ exit(1);
+ }
+ for (int k = 0; k < sz; k++)
+ ptrs[j][k] = tid + k;
+ }
+ for (int j = 0; j < numAllocs; j += 64) {
+ for (int k = 0, sz = ptrsz[j]; k < sz; k++)
+ if (ptrs[j][k] != (uint8_t)(tid + k)) {
+ printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
+ exit(1);
+ }
+ je_free(ptrs[j]);
+ }
+ }
+ });
+ t.join();
+ }
+ });
+ }
+ for (thread& t : workers) {
+ t.join();
+ }
+ je_malloc_stats_print(NULL, NULL, NULL);
+ size_t allocated2;
+ je_mallctl("stats.active", &allocated2, &sz1, NULL, 0);
+ size_t leaked = allocated2 - allocated1;
+ printf("\nDone. Leaked: %zd bytes\n", leaked);
+ bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
+ printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
+ printf("\nPress Enter to continue...\n");
+ getchar();
+ return failed ? 1 : 0;
+}
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.h b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.h
new file mode 100644
index 000000000..64d0cdb33
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.h
@@ -0,0 +1,3 @@
+#pragma once
+
+int test_threads();
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj
new file mode 100644
index 000000000..f5e9898f2
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj
@@ -0,0 +1,327 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug-static|Win32">
+ <Configuration>Debug-static</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug-static|x64">
+ <Configuration>Debug-static</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release-static|Win32">
+ <Configuration>Release-static</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release-static|x64">
+ <Configuration>Release-static</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>test_threads</RootNamespace>
+ <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v140</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="Shared">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>true</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>true</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+ <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
+ <IntDir>$(Platform)\$(Configuration)\</IntDir>
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="test_threads.cpp" />
+ <ClCompile Include="test_threads_main.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
+ <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
+ </ProjectReference>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="test_threads.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
new file mode 100644
index 000000000..4c2334073
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="test_threads.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="test_threads_main.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="test_threads.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads_main.cpp b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads_main.cpp
new file mode 100644
index 000000000..ffd96e6ab
--- /dev/null
+++ b/memory/jemalloc/src/msvc/projects/vc2015/test_threads/test_threads_main.cpp
@@ -0,0 +1,12 @@
+#include "test_threads.h"
+#include <future>
+#include <functional>
+#include <chrono>
+
+using namespace std::chrono_literals;
+
+int main(int argc, char** argv)
+{
+ int rc = test_threads();
+ return rc;
+}
diff --git a/memory/jemalloc/src/src/arena.c b/memory/jemalloc/src/src/arena.c
new file mode 100644
index 000000000..e196b1337
--- /dev/null
+++ b/memory/jemalloc/src/src/arena.c
@@ -0,0 +1,3781 @@
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+purge_mode_t opt_purge = PURGE_DEFAULT;
+const char *purge_mode_names[] = {
+ "ratio",
+ "decay",
+ "N/A"
+};
+ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+static ssize_t lg_dirty_mult_default;
+ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
+static ssize_t decay_time_default;
+
+arena_bin_info_t arena_bin_info[NBINS];
+
+size_t map_bias;
+size_t map_misc_offset;
+size_t arena_maxrun; /* Max run size for arenas. */
+size_t large_maxclass; /* Max large size class. */
+unsigned nlclasses; /* Number of large size classes. */
+unsigned nhclasses; /* Number of huge size classes. */
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk);
+static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
+ size_t ndirty_limit);
+static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
+ bool dirty, bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
+
+/******************************************************************************/
+
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (arena_mapbits_size_decode(mapbits));
+}
+
+JEMALLOC_INLINE_C int
+arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+ const arena_chunk_map_misc_t *b)
+{
+ uintptr_t a_miscelm = (uintptr_t)a;
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
+}
+
+/* Generate pairing heap functions. */
+ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
+ ph_link, arena_run_addr_comp)
+
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
+#endif
+static size_t
+run_quantize_floor(size_t size)
+{
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
+
+ pind = psz2ind(size - large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return (size);
+ }
+ ret = pind2sz(pind - 1) + large_pad;
+ assert(ret <= size);
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
+run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
+#endif
+static size_t
+run_quantize_ceil(size_t size)
+{
+ size_t ret;
+
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = run_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large run,
+ * because under-sized runs may be mixed in. This only happens
+ * when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+ }
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
+run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
+#endif
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
+ arena_miscelm_get_const(chunk, pageind))));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert((npages << LG_PAGE) < chunksize);
+ assert(pind2sz(pind) <= chunksize);
+ arena_run_heap_insert(&arena->runs_avail[pind],
+ arena_miscelm_get_mutable(chunk, pageind));
+}
+
+static void
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
+ arena_miscelm_get_const(chunk, pageind))));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert((npages << LG_PAGE) < chunksize);
+ assert(pind2sz(pind) <= chunksize);
+ arena_run_heap_remove(&arena->runs_avail[pind],
+ arena_miscelm_get_mutable(chunk, pageind));
+}
+
+static void
+arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
+
+ qr_new(&miscelm->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
+ arena->ndirty += npages;
+}
+
+static void
+arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
+
+ qr_remove(&miscelm->rd, rd_link);
+ assert(arena->ndirty >= npages);
+ arena->ndirty -= npages;
+}
+
+static size_t
+arena_chunk_dirty_npages(const extent_node_t *node)
+{
+
+ return (extent_node_size_get(node) >> LG_PAGE);
+}
+
+void
+arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
+{
+
+ if (cache) {
+ extent_node_dirty_linkage_init(node);
+ extent_node_dirty_insert(node, &arena->runs_dirty,
+ &arena->chunks_cache);
+ arena->ndirty += arena_chunk_dirty_npages(node);
+ }
+}
+
+void
+arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
+{
+
+ if (dirty) {
+ extent_node_dirty_remove(node);
+ assert(arena->ndirty >= arena_chunk_dirty_npages(node));
+ arena->ndirty -= arena_chunk_dirty_npages(node);
+ }
+}
+
+JEMALLOC_INLINE_C void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
+{
+ void *ret;
+ size_t regind;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(run->nfree > 0);
+ assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
+
+ regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+ ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
+ (uintptr_t)(bin_info->reg_interval * regind));
+ run->nfree--;
+ return (ret);
+}
+
+JEMALLOC_INLINE_C void
+arena_run_reg_dalloc(arena_run_t *run, void *ptr)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size_t regind = arena_run_regind(run, bin_info, ptr);
+
+ assert(run->nfree < bin_info->nregs);
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr -
+ ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
+ (uintptr_t)bin_info->reg0_offset)) %
+ (uintptr_t)bin_info->reg_interval == 0);
+ assert((uintptr_t)ptr >=
+ (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
+ (uintptr_t)bin_info->reg0_offset);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
+ run->nfree++;
+}
+
+JEMALLOC_INLINE_C void
+arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
+{
+
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (npages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
+ (npages << LG_PAGE));
+}
+
+JEMALLOC_INLINE_C void
+arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
+ << LG_PAGE)), PAGE);
+}
+
+JEMALLOC_INLINE_C void
+arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+ size_t i;
+ UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+
+ arena_run_page_mark_zeroed(chunk, run_ind);
+ for (i = 0; i < PAGE / sizeof(size_t); i++)
+ assert(p[i] == 0);
+}
+
+static void
+arena_nactive_add(arena_t *arena, size_t add_pages)
+{
+
+ if (config_stats) {
+ size_t cactive_add = CHUNK_CEILING((arena->nactive +
+ add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
+ if (cactive_add != 0)
+ stats_cactive_add(cactive_add);
+ }
+ arena->nactive += add_pages;
+}
+
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages)
+{
+
+ if (config_stats) {
+ size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+ CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
+ if (cactive_sub != 0)
+ stats_cactive_sub(cactive_sub);
+ }
+ arena->nactive -= sub_pages;
+}
+
+static void
+arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
+ size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
+{
+ size_t total_pages, rem_pages;
+
+ assert(flag_dirty == 0 || flag_decommitted == 0);
+
+ total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
+ LG_PAGE;
+ assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
+ flag_dirty);
+ assert(need_pages <= total_pages);
+ rem_pages = total_pages - need_pages;
+
+ arena_avail_remove(arena, chunk, run_ind, total_pages);
+ if (flag_dirty != 0)
+ arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
+ arena_nactive_add(arena, need_pages);
+
+ /* Keep track of trailing unused pages for later use. */
+ if (rem_pages > 0) {
+ size_t flags = flag_dirty | flag_decommitted;
+ size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
+ 0;
+
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
+ flag_unzeroed_mask));
+ arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
+ flag_unzeroed_mask));
+ if (flag_dirty != 0) {
+ arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
+ rem_pages);
+ }
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
+ }
+}
+
+static bool
+arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
+ bool remove, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages;
+ size_t flag_unzeroed_mask;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
+ if (remove) {
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+ }
+
+ if (zero) {
+ if (flag_decommitted != 0) {
+ /* The run is untouched, and therefore zeroed. */
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
+ (need_pages << LG_PAGE));
+ } else if (flag_dirty != 0) {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
+ } else {
+ /*
+ * The run is clean, so some pages may be zeroed (i.e.
+ * never before touched).
+ */
+ size_t i;
+ for (i = 0; i < need_pages; i++) {
+ if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
+ != 0)
+ arena_run_zero(chunk, run_ind+i, 1);
+ else if (config_debug) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ } else {
+ arena_run_page_mark_zeroed(chunk,
+ run_ind+i);
+ }
+ }
+ }
+ } else {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ }
+
+ /*
+ * Set the last element first, in case the run only contains one page
+ * (i.e. both statements set the same element).
+ */
+ flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1)));
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
+ return (false);
+}
+
+static bool
+arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ return (arena_run_split_large_helper(arena, run, size, true, zero));
+}
+
+static bool
+arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ return (arena_run_split_large_helper(arena, run, size, false, zero));
+}
+
+static bool
+arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
+ szind_t binind)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
+
+ assert(binind != BININD_INVALID);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+
+ for (i = 0; i < need_pages; i++) {
+ size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
+ run_ind+i);
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind,
+ flag_unzeroed);
+ if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+i);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ return (false);
+}
+
+static arena_chunk_t *
+arena_chunk_init_spare(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ assert(arena->spare != NULL);
+
+ chunk = arena->spare;
+ arena->spare = NULL;
+
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxrun);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxrun);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
+
+ return (chunk);
+}
+
+static bool
+arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ bool zero)
+{
+
+ /*
+ * The extent node notion of "committed" doesn't directly apply to
+ * arena chunks. Arbitrarily mark them as committed. The commit state
+ * of runs is tracked individually, and upon chunk deallocation the
+ * entire chunk is in a consistent commit state.
+ */
+ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+ extent_node_achunk_set(&chunk->node, true);
+ return (chunk_register(tsdn, chunk, &chunk->node));
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
+{
+ arena_chunk_t *chunk;
+
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
+ NULL, chunksize, chunksize, zero, commit);
+ if (chunk != NULL && !*commit) {
+ /* Commit header. */
+ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
+ (void *)chunk, chunksize, *zero, *commit);
+ chunk = NULL;
+ }
+ }
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ if (!*commit) {
+ /* Undo commit of header. */
+ chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
+ chunksize, *zero, *commit);
+ chunk = NULL;
+ }
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
+ bool *commit)
+{
+ arena_chunk_t *chunk;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
+ chunksize, zero, commit, true);
+ if (chunk != NULL) {
+ if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
+ chunksize, true);
+ return (NULL);
+ }
+ }
+ if (chunk == NULL) {
+ chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
+ &chunk_hooks, zero, commit);
+ }
+
+ if (config_stats && chunk != NULL) {
+ arena->stats.mapped += chunksize;
+ arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ }
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero, commit;
+ size_t flag_unzeroed, flag_decommitted, i;
+
+ assert(arena->spare == NULL);
+
+ zero = false;
+ commit = false;
+ chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
+ if (chunk == NULL)
+ return (NULL);
+
+ /*
+ * Initialize the map to contain one maximal free untouched run. Mark
+ * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
+ * or decommitted chunk.
+ */
+ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
+ flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
+ flag_unzeroed | flag_decommitted);
+ /*
+ * There is no need to initialize the internal page map entries unless
+ * the chunk is not zeroed.
+ */
+ if (!zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ arena_mapbits_internal_set(chunk, i, flag_unzeroed);
+ } else {
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ if (config_debug) {
+ for (i = map_bias+1; i < chunk_npages-1; i++) {
+ assert(arena_mapbits_unzeroed_get(chunk, i) ==
+ flag_unzeroed);
+ }
+ }
+ }
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
+ flag_unzeroed);
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ if (arena->spare != NULL)
+ chunk = arena_chunk_init_spare(arena);
+ else {
+ chunk = arena_chunk_init_hard(tsdn, arena);
+ if (chunk == NULL)
+ return (NULL);
+ }
+
+ ql_elm_new(&chunk->node, ql_link);
+ ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
+
+ return (chunk);
+}
+
+static void
+arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ bool committed;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk_deregister(chunk, &chunk->node);
+
+ committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted even if
+ * header decommit fails, since treating a partially committed
+ * chunk as committed has a high potential for causing later
+ * access of decommitted memory.
+ */
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
+ arena->ind);
+ }
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
+ committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+}
+
+static void
+arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+{
+
+ assert(arena->spare != spare);
+
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ arena_chunk_discard(tsdn, arena, spare);
+}
+
+static void
+arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ arena_chunk_t *spare;
+
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxrun);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxrun);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
+ assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
+ arena_mapbits_decommitted_get(chunk, chunk_npages-1));
+
+ /* Remove run from runs_avail, so that the arena does not use it. */
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
+
+ ql_remove(&arena->achunks, &chunk->node, ql_link);
+ spare = arena->spare;
+ arena->spare = chunk;
+ if (spare != NULL)
+ arena_spare_discard(tsdn, arena, spare);
+}
+
+static void
+arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge++;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].nmalloc++;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge--;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].nmalloc--;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].ndalloc++;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.hstats[index].ndalloc--;
+}
+
+static void
+arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge--;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].ndalloc--;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+{
+
+ arena_huge_dalloc_stats_update(arena, oldsize);
+ arena_huge_malloc_stats_update(arena, usize);
+}
+
+static void
+arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
+ size_t usize)
+{
+
+ arena_huge_dalloc_stats_update_undo(arena, oldsize);
+ arena_huge_malloc_stats_update_undo(arena, usize);
+}
+
+extent_node_t *
+arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
+{
+ extent_node_t *node;
+
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ node = ql_last(&arena->node_cache, ql_link);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (base_alloc(tsdn, sizeof(extent_node_t)));
+ }
+ ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (node);
+}
+
+void
+arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
+{
+
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->node_cache, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+}
+
+static void *
+arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
+ size_t csize)
+{
+ void *ret;
+ bool commit = true;
+
+ ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
+ alignment, zero, &commit);
+ if (ret == NULL) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_malloc_stats_update_undo(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena_nactive_sub(arena, usize >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+
+ return (ret);
+}
+
+void *
+arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero)
+{
+ void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize = CHUNK_CEILING(usize);
+ bool commit = true;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_malloc_stats_update(arena, usize);
+ arena->stats.mapped += usize;
+ }
+ arena_nactive_add(arena, usize >> LG_PAGE);
+
+ ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
+ alignment, zero, &commit, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (ret == NULL) {
+ ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
+ usize, alignment, zero, csize);
+ }
+
+ return (ret);
+}
+
+void
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
+{
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize;
+
+ csize = CHUNK_CEILING(usize);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_dalloc_stats_update(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena_nactive_sub(arena, usize >> LG_PAGE);
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
+{
+
+ assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
+ assert(oldsize != usize);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats)
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (oldsize < usize)
+ arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
+ else
+ arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
+{
+ size_t udiff = oldsize - usize;
+ size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (cdiff != 0)
+ arena->stats.mapped -= cdiff;
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+
+ if (cdiff != 0) {
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ void *nchunk = (void *)((uintptr_t)chunk +
+ CHUNK_CEILING(usize));
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ true);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static bool
+arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
+ bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+{
+ bool err;
+ bool commit = true;
+
+ err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ chunksize, zero, &commit) == NULL);
+ if (err) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update_undo(arena, oldsize,
+ usize);
+ arena->stats.mapped -= cdiff;
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ *zero, true);
+ err = true;
+ }
+ return (err);
+}
+
+bool
+arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero)
+{
+ bool err;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
+ size_t udiff = usize - oldsize;
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ bool commit = true;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ arena->stats.mapped += cdiff;
+ }
+ arena_nactive_add(arena, udiff >> LG_PAGE);
+
+ err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ chunksize, zero, &commit, true) == NULL);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (err) {
+ err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
+ &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
+ cdiff);
+ } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ *zero, true);
+ err = true;
+ }
+
+ return (err);
+}
+
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are indexed, so not all candidate runs are necessarily exactly the
+ * same size.
+ */
+static arena_run_t *
+arena_run_first_best_fit(arena_t *arena, size_t size)
+{
+ pszind_t pind, i;
+
+ pind = psz2ind(run_quantize_ceil(size));
+
+ for (i = pind; pind2sz(i) <= chunksize; i++) {
+ arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
+ &arena->runs_avail[i]);
+ if (miscelm != NULL)
+ return (&miscelm->run);
+ }
+
+ return (NULL);
+}
+
+static arena_run_t *
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_large_helper(arena, size, zero);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(tsdn, arena);
+ if (chunk != NULL) {
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_large_helper(arena, size, zero));
+}
+
+static arena_run_t *
+arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
+ assert(binind != BININD_INVALID);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_small_helper(arena, size, binind);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(tsdn, arena);
+ if (chunk != NULL) {
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_small_helper(arena, size, binind));
+}
+
+static bool
+arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
+{
+
+ return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
+ << 3));
+}
+
+ssize_t
+arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
+{
+ ssize_t lg_dirty_mult;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ lg_dirty_mult = arena->lg_dirty_mult;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (lg_dirty_mult);
+}
+
+bool
+arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena->lg_dirty_mult = lg_dirty_mult;
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (false);
+}
+
+static void
+arena_decay_deadline_init(arena_t *arena)
+{
+
+ assert(opt_purge == purge_mode_decay);
+
+ /*
+ * Generate a new deadline that is uniformly random within the next
+ * epoch after the current one.
+ */
+ nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
+ nstime_add(&arena->decay.deadline, &arena->decay.interval);
+ if (arena->decay.time > 0) {
+ nstime_t jitter;
+
+ nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
+ nstime_ns(&arena->decay.interval)));
+ nstime_add(&arena->decay.deadline, &jitter);
+ }
+}
+
+static bool
+arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
+{
+
+ assert(opt_purge == purge_mode_decay);
+
+ return (nstime_compare(&arena->decay.deadline, time) <= 0);
+}
+
+static size_t
+arena_decay_backlog_npages_limit(const arena_t *arena)
+{
+ static const uint64_t h_steps[] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+ };
+ uint64_t sum;
+ size_t npages_limit_backlog;
+ unsigned i;
+
+ assert(opt_purge == purge_mode_decay);
+
+ /*
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+ sum += arena->decay.backlog[i] * h_steps[i];
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
+
+ return (npages_limit_backlog);
+}
+
+static void
+arena_decay_backlog_update_last(arena_t *arena)
+{
+ size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
+ arena->ndirty - arena->decay.ndirty : 0;
+ arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
+}
+
+static void
+arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
+{
+
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
+ }
+ }
+
+ arena_decay_backlog_update_last(arena);
+}
+
+static void
+arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
+{
+ uint64_t nadvance_u64;
+ nstime_t delta;
+
+ assert(opt_purge == purge_mode_decay);
+ assert(arena_decay_deadline_reached(arena, time));
+
+ nstime_copy(&delta, time);
+ nstime_subtract(&delta, &arena->decay.epoch);
+ nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
+ assert(nadvance_u64 > 0);
+
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &arena->decay.interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&arena->decay.epoch, &delta);
+
+ /* Set a new deadline. */
+ arena_decay_deadline_init(arena);
+
+ /* Update the backlog. */
+ arena_decay_backlog_update(arena, nadvance_u64);
+}
+
+static void
+arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
+{
+ size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
+
+ if (arena->ndirty > ndirty_limit)
+ arena_purge_to_limit(tsdn, arena, ndirty_limit);
+ arena->decay.ndirty = arena->ndirty;
+}
+
+static void
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
+{
+
+ arena_decay_epoch_advance_helper(arena, time);
+ arena_decay_epoch_advance_purge(tsdn, arena);
+}
+
+static void
+arena_decay_init(arena_t *arena, ssize_t decay_time)
+{
+
+ arena->decay.time = decay_time;
+ if (decay_time > 0) {
+ nstime_init2(&arena->decay.interval, decay_time, 0);
+ nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
+ }
+
+ nstime_init(&arena->decay.epoch, 0);
+ nstime_update(&arena->decay.epoch);
+ arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
+ arena_decay_deadline_init(arena);
+ arena->decay.ndirty = arena->ndirty;
+ memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+static bool
+arena_decay_time_valid(ssize_t decay_time)
+{
+
+ if (decay_time < -1)
+ return (false);
+ if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
+ return (true);
+ return (false);
+}
+
+ssize_t
+arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
+{
+ ssize_t decay_time;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ decay_time = arena->decay.time;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (decay_time);
+}
+
+bool
+arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
+{
+
+ if (!arena_decay_time_valid(decay_time))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ /*
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_time changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
+ */
+ arena_decay_init(arena, decay_time);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (false);
+}
+
+static void
+arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
+{
+
+ assert(opt_purge == purge_mode_ratio);
+
+ /* Don't purge if the option is disabled. */
+ if (arena->lg_dirty_mult < 0)
+ return;
+
+ /*
+ * Iterate, since preventing recursive purging could otherwise leave too
+ * many dirty pages.
+ */
+ while (true) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ if (threshold < chunk_npages)
+ threshold = chunk_npages;
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (arena->ndirty <= threshold)
+ return;
+ arena_purge_to_limit(tsdn, arena, threshold);
+ }
+}
+
+static void
+arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
+{
+ nstime_t time;
+
+ assert(opt_purge == purge_mode_decay);
+
+ /* Purge all or nothing if the option is disabled. */
+ if (arena->decay.time <= 0) {
+ if (arena->decay.time == 0)
+ arena_purge_to_limit(tsdn, arena, 0);
+ return;
+ }
+
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
+ &time) > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&arena->decay.epoch, &time);
+ arena_decay_deadline_init(arena);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
+ }
+
+ /*
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances.
+ */
+ if (arena_decay_deadline_reached(arena, &time))
+ arena_decay_epoch_advance(tsdn, arena, &time);
+}
+
+void
+arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
+{
+
+ /* Don't recursively purge. */
+ if (arena->purging)
+ return;
+
+ if (opt_purge == purge_mode_ratio)
+ arena_maybe_purge_ratio(tsdn, arena);
+ else
+ arena_maybe_purge_decay(tsdn, arena);
+}
+
+static size_t
+arena_dirty_count(arena_t *arena)
+{
+ size_t ndirty = 0;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
+
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ assert(arena_mapbits_allocated_get(chunk, pageind) ==
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ ndirty += npages;
+ }
+
+ return (ndirty);
+}
+
+static size_t
+arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+ size_t nstashed = 0;
+
+ /* Stash runs/chunks according to ndirty_limit. */
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+ size_t npages;
+ rdelm_next = qr_next(rdelm, rd_link);
+
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next;
+ bool zero, commit;
+ UNUSED void *chunk;
+
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ if (opt_purge == purge_mode_decay && arena->ndirty -
+ (nstashed + npages) < ndirty_limit)
+ break;
+
+ chunkselm_next = qr_next(chunkselm, cc_link);
+ /*
+ * Allocate. chunkselm remains valid due to the
+ * dalloc_node=false argument to chunk_alloc_cache().
+ */
+ zero = false;
+ commit = false;
+ chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
+ extent_node_addr_get(chunkselm),
+ extent_node_size_get(chunkselm), chunksize, &zero,
+ &commit, false);
+ assert(chunk == extent_node_addr_get(chunkselm));
+ assert(zero == extent_node_zeroed_get(chunkselm));
+ extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
+ purge_chunks_sentinel);
+ assert(npages == (extent_node_size_get(chunkselm) >>
+ LG_PAGE));
+ chunkselm = chunkselm_next;
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ arena_run_t *run = &miscelm->run;
+ size_t run_size =
+ arena_mapbits_unallocated_size_get(chunk, pageind);
+
+ npages = run_size >> LG_PAGE;
+ if (opt_purge == purge_mode_decay && arena->ndirty -
+ (nstashed + npages) < ndirty_limit)
+ break;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk, pageind+npages-1));
+
+ /*
+ * If purging the spare chunk's run, make it available
+ * prior to allocation.
+ */
+ if (chunk == arena->spare)
+ arena_chunk_alloc(tsdn, arena);
+
+ /* Temporarily allocate the free dirty run. */
+ arena_run_split_large(arena, run, run_size, false);
+ /* Stash. */
+ if (false)
+ qr_new(rdelm, rd_link); /* Redundant. */
+ else {
+ assert(qr_next(rdelm, rd_link) == rdelm);
+ assert(qr_prev(rdelm, rd_link) == rdelm);
+ }
+ qr_meld(purge_runs_sentinel, rdelm, rd_link);
+ }
+
+ nstashed += npages;
+ if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
+ ndirty_limit)
+ break;
+ }
+
+ return (nstashed);
+}
+
+static size_t
+arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ size_t npurged, nmadvise;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
+
+ if (config_stats)
+ nmadvise = 0;
+ npurged = 0;
+
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ /*
+ * Don't actually purge the chunk here because 1)
+ * chunkselm is embedded in the chunk and must remain
+ * valid, and 2) we deallocate the chunk in
+ * arena_unstash_purged(), where it is destroyed,
+ * decommitted, or purged, depending on chunk
+ * deallocation policy.
+ */
+ size_t size = extent_node_size_get(chunkselm);
+ npages = size >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ size_t pageind, run_size, flag_unzeroed, flags, i;
+ bool decommitted;
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ run_size = arena_mapbits_large_size_get(chunk, pageind);
+ npages = run_size >> LG_PAGE;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(!arena_mapbits_decommitted_get(chunk, pageind));
+ assert(!arena_mapbits_decommitted_get(chunk,
+ pageind+npages-1));
+ decommitted = !chunk_hooks->decommit(chunk, chunksize,
+ pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
+ if (decommitted) {
+ flag_unzeroed = 0;
+ flags = CHUNK_MAP_DECOMMITTED;
+ } else {
+ flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
+ chunk_hooks, chunk, chunksize, pageind <<
+ LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
+ flags = flag_unzeroed;
+ }
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0,
+ flags);
+ arena_mapbits_large_set(chunk, pageind, run_size,
+ flags);
+
+ /*
+ * Set the unzeroed flag for internal pages, now that
+ * chunk_purge_wrapper() has returned whether the pages
+ * were zeroed as a side effect of purging. This chunk
+ * map modification is safe even though the arena mutex
+ * isn't currently owned by this thread, because the run
+ * is marked as allocated, thus protecting it from being
+ * modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 1; i < npages-1; i++) {
+ arena_mapbits_internal_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
+ }
+
+ npurged += npages;
+ if (config_stats)
+ nmadvise++;
+ }
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ if (config_stats) {
+ arena->stats.nmadvise += nmadvise;
+ arena->stats.purged += npurged;
+ }
+
+ return (npurged);
+}
+
+static void
+arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+
+ /* Deallocate chunks/runs. */
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
+ rdelm_next = qr_next(rdelm, rd_link);
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next = qr_next(chunkselm,
+ cc_link);
+ void *addr = extent_node_addr_get(chunkselm);
+ size_t size = extent_node_size_get(chunkselm);
+ bool zeroed = extent_node_zeroed_get(chunkselm);
+ bool committed = extent_node_committed_get(chunkselm);
+ extent_node_dirty_remove(chunkselm);
+ arena_node_dalloc(tsdn, arena, chunkselm);
+ chunkselm = chunkselm_next;
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
+ size, zeroed, committed);
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ pageind) != 0);
+ arena_run_t *run = &miscelm->run;
+ qr_remove(rdelm, rd_link);
+ arena_run_dalloc(tsdn, arena, run, false, true,
+ decommitted);
+ }
+ }
+}
+
+/*
+ * NB: ndirty_limit is interpreted differently depending on opt_purge:
+ * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
+ * desired state:
+ * (arena->ndirty <= ndirty_limit)
+ * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
+ * violating the invariant:
+ * (arena->ndirty >= ndirty_limit)
+ */
+static void
+arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
+{
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ size_t npurge, npurged;
+ arena_runs_dirty_link_t purge_runs_sentinel;
+ extent_node_t purge_chunks_sentinel;
+
+ arena->purging = true;
+
+ /*
+ * Calls to arena_dirty_count() are disabled even for debug builds
+ * because overhead grows nonlinearly as memory usage increases.
+ */
+ if (false && config_debug) {
+ size_t ndirty = arena_dirty_count(arena);
+ assert(ndirty == arena->ndirty);
+ }
+ assert(opt_purge != purge_mode_ratio || (arena->nactive >>
+ arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
+
+ qr_new(&purge_runs_sentinel, rd_link);
+ extent_node_dirty_linkage_init(&purge_chunks_sentinel);
+
+ npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ if (npurge == 0)
+ goto label_return;
+ npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ assert(npurged == npurge);
+ arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+
+ if (config_stats)
+ arena->stats.npurge++;
+
+label_return:
+ arena->purging = false;
+}
+
+void
+arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (all)
+ arena_purge_to_limit(tsdn, arena, 0);
+ else
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static void
+arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
+{
+ size_t pageind, npages;
+
+ cassert(config_prof);
+ assert(opt_prof);
+
+ /*
+ * Iterate over the allocated runs and remove profiled allocations from
+ * the sample set.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
+ if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ void *ptr = (void *)((uintptr_t)chunk + (pageind
+ << LG_PAGE));
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr,
+ config_prof);
+
+ prof_free(tsd, ptr, usize);
+ npages = arena_mapbits_large_size_get(chunk,
+ pageind) >> LG_PAGE;
+ } else {
+ /* Skip small run. */
+ size_t binind = arena_mapbits_binind_get(chunk,
+ pageind);
+ arena_bin_info_t *bin_info =
+ &arena_bin_info[binind];
+ npages = bin_info->run_size >> LG_PAGE;
+ }
+ } else {
+ /* Skip unallocated run. */
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ assert(pageind + npages <= chunk_npages);
+ }
+}
+
+void
+arena_reset(tsd_t *tsd, arena_t *arena)
+{
+ unsigned i;
+ extent_node_t *node;
+
+ /*
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
+ */
+
+ /* Remove large allocations from prof sample set. */
+ if (config_prof && opt_prof) {
+ ql_foreach(node, &arena->achunks, ql_link) {
+ arena_achunk_prof_reset(tsd, arena,
+ extent_node_addr_get(node));
+ }
+ }
+
+ /* Reset curruns for large size classes. */
+ if (config_stats) {
+ for (i = 0; i < nlclasses; i++)
+ arena->stats.lstats[i].curruns = 0;
+ }
+
+ /* Huge allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
+ ql_last(&arena->huge, ql_link)) {
+ void *ptr = extent_node_addr_get(node);
+ size_t usize;
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+ if (config_stats || (config_prof && opt_prof))
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ /* Remove huge allocation from prof sample set. */
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ huge_dalloc(tsd_tsdn(tsd), ptr);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ /* Cancel out unwanted effects on stats. */
+ if (config_stats)
+ arena_huge_reset_stats_cancel(arena, usize);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+
+ /* Bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curruns = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ /*
+ * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
+ * chains directly correspond.
+ */
+ qr_new(&arena->runs_dirty, rd_link);
+ for (node = qr_next(&arena->chunks_cache, cc_link);
+ node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
+ qr_new(&node->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &node->rd, rd_link);
+ }
+
+ /* Arena chunks. */
+ for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
+ ql_last(&arena->achunks, ql_link)) {
+ ql_remove(&arena->achunks, node, ql_link);
+ arena_chunk_discard(tsd_tsdn(tsd), arena,
+ extent_node_addr_get(node));
+ }
+
+ /* Spare. */
+ if (arena->spare != NULL) {
+ arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
+ arena->spare = NULL;
+ }
+
+ assert(!arena->purging);
+ arena->nactive = 0;
+
+ for (i = 0; i < NPSIZES; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+}
+
+static void
+arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
+ size_t flag_decommitted)
+{
+ size_t size = *p_size;
+ size_t run_ind = *p_run_ind;
+ size_t run_pages = *p_run_pages;
+
+ /* Try to coalesce forward. */
+ if (run_ind + run_pages < chunk_npages &&
+ arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
+ flag_decommitted) {
+ size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages);
+ size_t nrun_pages = nrun_size >> LG_PAGE;
+
+ /*
+ * Remove successor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert(arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == nrun_size);
+ assert(arena_mapbits_dirty_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_dirty);
+ assert(arena_mapbits_decommitted_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
+
+ /*
+ * If the successor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
+ nrun_pages);
+ }
+
+ size += nrun_size;
+ run_pages += nrun_pages;
+
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
+ }
+
+ /* Try to coalesce backward. */
+ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
+ run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
+ flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
+ flag_decommitted) {
+ size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind-1);
+ size_t prun_pages = prun_size >> LG_PAGE;
+
+ run_ind -= prun_pages;
+
+ /*
+ * Remove predecessor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ prun_size);
+ assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind, prun_pages);
+
+ /*
+ * If the predecessor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind,
+ prun_pages);
+ }
+
+ size += prun_size;
+ run_pages += prun_pages;
+
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
+ }
+
+ *p_size = size;
+ *p_run_ind = run_ind;
+ *p_run_pages = run_pages;
+}
+
+static size_t
+arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t run_ind)
+{
+ size_t size;
+
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+
+ if (arena_mapbits_large_get(chunk, run_ind) != 0) {
+ size = arena_mapbits_large_size_get(chunk, run_ind);
+ assert(size == PAGE || arena_mapbits_large_size_get(chunk,
+ run_ind+(size>>LG_PAGE)-1) == 0);
+ } else {
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
+ size = bin_info->run_size;
+ }
+
+ return (size);
+}
+
+static void
+arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ size = arena_run_size_get(arena, chunk, run, run_ind);
+ run_pages = (size >> LG_PAGE);
+ arena_nactive_sub(arena, run_pages);
+
+ /*
+ * The run is dirty if the caller claims to have dirtied it, as well as
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
+ */
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
+ != 0)
+ dirty = true;
+ flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+ flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty || decommitted) {
+ size_t flags = flag_dirty | flag_decommitted;
+ arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ flags);
+ } else {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind));
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ }
+
+ arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
+ flag_dirty, flag_decommitted);
+
+ /* Insert into runs_avail, now that coalescing is complete. */
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
+ arena_avail_insert(arena, chunk, run_ind, run_pages);
+
+ if (dirty)
+ arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
+
+ /* Deallocate chunk if it is now completely unused. */
+ if (size == arena_maxrun) {
+ assert(run_ind == map_bias);
+ assert(run_pages == (arena_maxrun >> LG_PAGE));
+ arena_chunk_dalloc(tsdn, arena, chunk);
+ }
+
+ /*
+ * It is okay to do dirty page processing here even if the chunk was
+ * deallocated above, since in that case it is the spare. Waiting
+ * until after possible chunk deallocation to do dirty processing
+ * allows for an old spare to be fully deallocated, thus decreasing the
+ * chances of spuriously crossing the dirty page purging threshold.
+ */
+ if (dirty)
+ arena_maybe_purge(tsdn, arena);
+}
+
+static void
+arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ size_t head_npages = (oldsize - newsize) >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * leading run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = newsize >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
+
+ arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
+ 0));
+}
+
+static void
+arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ size_t head_npages = newsize >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_chunk_map_misc_t *tail_miscelm;
+ arena_run_t *tail_run;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * trailing run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
+
+ tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
+ tail_run = &tail_miscelm->run;
+ arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
+ != 0));
+}
+
+static void
+arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+
+ arena_run_heap_insert(&bin->runs, miscelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_chunk_map_misc_t *miscelm;
+
+ miscelm = arena_run_heap_remove_first(&bin->runs);
+ if (miscelm == NULL)
+ return (NULL);
+ if (config_stats)
+ bin->stats.reruns++;
+
+ return (&miscelm->run);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+{
+ arena_run_t *run;
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+
+ /* Look for a usable run. */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+ /* No existing runs have any space available. */
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+
+ /* Allocate a new run. */
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ /******************************/
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
+ if (run != NULL) {
+ /* Initialize run internals. */
+ run->binind = binind;
+ run->nfree = bin_info->nregs;
+ bitmap_init(run->bitmap, &bin_info->bitmap_info);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ /********************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (run != NULL) {
+ if (config_stats) {
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ }
+ return (run);
+ }
+
+ /*
+ * arena_run_alloc_small() failed, but another thread may have made
+ * sufficient memory available while this one dropped bin->lock above,
+ * so search one more time.
+ */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+
+ return (NULL);
+}
+
+/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+static void *
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+{
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+ arena_run_t *run;
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+ bin->runcur = NULL;
+ run = arena_bin_nonfull_run_get(tsdn, arena, bin);
+ if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ /*
+ * Another thread updated runcur while this one ran without the
+ * bin lock in arena_bin_nonfull_run_get().
+ */
+ void *ret;
+ assert(bin->runcur->nfree > 0);
+ ret = arena_run_reg_alloc(bin->runcur, bin_info);
+ if (run != NULL) {
+ arena_chunk_t *chunk;
+
+ /*
+ * arena_run_alloc_small() may have allocated run, or
+ * it may have pulled run from the bin's run tree.
+ * Therefore it is unsafe to make any assumptions about
+ * how run has previously been used, and
+ * arena_bin_lower_run() must be called, as if a region
+ * were just deallocated from the run.
+ */
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ if (run->nfree == bin_info->nregs) {
+ arena_dalloc_bin_run(tsdn, arena, chunk, run,
+ bin);
+ } else
+ arena_bin_lower_run(arena, chunk, run, bin);
+ }
+ return (ret);
+ }
+
+ if (run == NULL)
+ return (NULL);
+
+ bin->runcur = run;
+
+ assert(bin->runcur->nfree > 0);
+
+ return (arena_run_reg_alloc(bin->runcur, bin_info));
+}
+
+void
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
+ szind_t binind, uint64_t prof_accumbytes)
+{
+ unsigned i, nfill;
+ arena_bin_t *bin;
+
+ assert(tbin->ncached == 0);
+
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ prof_idump(tsdn);
+ bin = &arena->bins[binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
+ tbin->lg_fill_div); i < nfill; i++) {
+ arena_run_t *run;
+ void *ptr;
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved just before tbin->avail before bailing out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail - i, tbin->avail - nfill,
+ i * sizeof(void *));
+ }
+ break;
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+ true);
+ }
+ /* Insert such that low regions get used first. */
+ *(tbin->avail - nfill + i) = ptr;
+ }
+ if (config_stats) {
+ bin->stats.nmalloc += i;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
+ bin->stats.nfills++;
+ tbin->tstats.nrequests = 0;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ tbin->ncached = i;
+ arena_decay_tick(tsdn, arena);
+}
+
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+{
+
+ size_t redzone_size = bin_info->redzone_size;
+
+ if (zero) {
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ } else {
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
+ }
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
+#endif
+static void
+arena_redzone_corruption(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
+ "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
+ after ? "after" : "before", ptr, usize, byte);
+}
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
+arena_redzone_corruption_t *arena_redzone_corruption =
+ JEMALLOC_N(n_arena_redzone_corruption);
+#endif
+
+static void
+arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
+{
+ bool error = false;
+
+ if (opt_junk_alloc) {
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+
+ for (i = 1; i <= redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
+ error = true;
+ arena_redzone_corruption(ptr, size, false, i,
+ *byte);
+ if (reset)
+ *byte = JEMALLOC_ALLOC_JUNK;
+ }
+ }
+ for (i = 0; i < redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
+ error = true;
+ arena_redzone_corruption(ptr, size, true, i,
+ *byte);
+ if (reset)
+ *byte = JEMALLOC_ALLOC_JUNK;
+ }
+ }
+ }
+
+ if (opt_abort && error)
+ abort();
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
+#endif
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t redzone_size = bin_info->redzone_size;
+
+ arena_redzones_validate(ptr, bin_info, false);
+ memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
+ bin_info->reg_interval);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+arena_dalloc_junk_small_t *arena_dalloc_junk_small =
+ JEMALLOC_N(n_arena_dalloc_junk_small);
+#endif
+
+void
+arena_quarantine_junk_small(void *ptr, size_t usize)
+{
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+ cassert(config_fill);
+ assert(opt_junk_free);
+ assert(opt_quarantine);
+ assert(usize <= SMALL_MAXCLASS);
+
+ binind = size2index(usize);
+ bin_info = &arena_bin_info[binind];
+ arena_redzones_validate(ptr, bin_info, true);
+}
+
+static void *
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+{
+ void *ret;
+ arena_bin_t *bin;
+ size_t usize;
+ arena_run_t *run;
+
+ assert(binind < NBINS);
+ bin = &arena->bins[binind];
+ usize = index2size(binind);
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ret = arena_bin_malloc_hard(tsdn, arena, bin);
+
+ if (ret == NULL) {
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return (NULL);
+ }
+
+ if (config_stats) {
+ bin->stats.nmalloc++;
+ bin->stats.nrequests++;
+ bin->stats.curregs++;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ prof_idump(tsdn);
+
+ if (!zero) {
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ } else {
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ memset(ret, 0, usize);
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+{
+ void *ret;
+ size_t usize;
+ uintptr_t random_offset;
+ arena_run_t *run;
+ arena_chunk_map_misc_t *miscelm;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+
+ /* Large allocation. */
+ usize = index2size(binind);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_cache_oblivious) {
+ uint64_t r;
+
+ /*
+ * Compute a uniformly distributed offset within the first page
+ * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
+ * for 4 KiB pages and 64-byte cachelines.
+ */
+ r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
+ LG_CACHELINE, false);
+ random_offset = ((uintptr_t)r) << LG_CACHELINE;
+ } else
+ random_offset = 0;
+ run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
+ if (run == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ miscelm = arena_run_to_miscelm(run);
+ ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
+ random_offset);
+ if (config_stats) {
+ szind_t index = binind - NBINS;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ if (config_prof)
+ idump = arena_prof_accum_locked(arena, usize);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (config_prof && idump)
+ prof_idump(tsdn);
+
+ if (!zero) {
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero)
+{
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ if (likely(size <= SMALL_MAXCLASS))
+ return (arena_malloc_small(tsdn, arena, ind, zero));
+ if (likely(size <= large_maxclass))
+ return (arena_malloc_large(tsdn, arena, ind, zero));
+ return (huge_malloc(tsdn, arena, index2size(ind), zero));
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
+{
+ void *ret;
+ size_t alloc_size, leadsize, trailsize;
+ arena_run_t *run;
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+ assert(usize == PAGE_CEILING(usize));
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ alignment = PAGE_CEILING(alignment);
+ alloc_size = usize + large_pad + alignment - PAGE;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
+ if (run == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+
+ leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
+ (uintptr_t)rpages;
+ assert(alloc_size >= leadsize + usize);
+ trailsize = alloc_size - leadsize - usize - large_pad;
+ if (leadsize != 0) {
+ arena_chunk_map_misc_t *head_miscelm = miscelm;
+ arena_run_t *head_run = run;
+
+ miscelm = arena_miscelm_get_mutable(chunk,
+ arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
+ LG_PAGE));
+ run = &miscelm->run;
+
+ arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
+ alloc_size - leadsize);
+ }
+ if (trailsize != 0) {
+ arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
+ trailsize, usize + large_pad, false);
+ }
+ if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
+ size_t run_ind =
+ arena_miscelm_to_pageind(arena_run_to_miscelm(run));
+ bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ run_ind) != 0);
+
+ assert(decommitted); /* Cause of OOM. */
+ arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ ret = arena_miscelm_to_rpages(miscelm);
+
+ if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ if (config_fill && !zero) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache)
+{
+ void *ret;
+
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special run placement. */
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
+ tcache, true);
+ } else if (usize <= large_maxclass && alignment <= PAGE) {
+ /*
+ * Large; alignment doesn't require special run placement.
+ * However, the cached pointer may be at a random offset from
+ * the base of the run, so do some bit manipulation to retrieve
+ * the base.
+ */
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
+ tcache, true);
+ if (config_cache_oblivious)
+ ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+ } else {
+ if (likely(usize <= large_maxclass)) {
+ ret = arena_palloc_large(tsdn, arena, usize, alignment,
+ zero);
+ } else if (likely(alignment <= chunksize))
+ ret = huge_malloc(tsdn, arena, usize, zero);
+ else {
+ ret = huge_palloc(tsdn, arena, usize, alignment, zero);
+ }
+ }
+ return (ret);
+}
+
+void
+arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
+{
+ arena_chunk_t *chunk;
+ size_t pageind;
+ szind_t binind;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
+ assert(size <= SMALL_MAXCLASS);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ binind = size2index(size);
+ assert(binind < NBINS);
+ arena_mapbits_large_binind_set(chunk, pageind, binind);
+
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == size);
+}
+
+static void
+arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+
+ /* Dissociate run from bin. */
+ if (run == bin->runcur)
+ bin->runcur = NULL;
+ else {
+ szind_t binind = arena_bin_index(extent_node_arena_get(
+ &chunk->node), bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ /*
+ * The following block's conditional is necessary because if the
+ * run only contains one region, then it never gets inserted
+ * into the non-full runs tree.
+ */
+ if (bin_info->nregs != 1) {
+ arena_chunk_map_misc_t *miscelm =
+ arena_run_to_miscelm(run);
+
+ arena_run_heap_remove(&bin->runs, miscelm);
+ }
+ }
+}
+
+static void
+arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin)
+{
+
+ assert(run != bin->runcur);
+
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ /******************************/
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ /****************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats)
+ bin->stats.curruns--;
+}
+
+static void
+arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+
+ /*
+ * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+ * non-full run. It is okay to NULL runcur out rather than proactively
+ * keeping it pointing at the lowest non-full run.
+ */
+ if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ /* Switch runcur. */
+ if (bin->runcur->nfree > 0)
+ arena_bin_runs_insert(bin, bin->runcur);
+ bin->runcur = run;
+ if (config_stats)
+ bin->stats.reruns++;
+ } else
+ arena_bin_runs_insert(bin, run);
+}
+
+static void
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
+{
+ size_t pageind, rpages_ind;
+ arena_run_t *run;
+ arena_bin_t *bin;
+ arena_bin_info_t *bin_info;
+ szind_t binind;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ binind = run->binind;
+ bin = &arena->bins[binind];
+ bin_info = &arena_bin_info[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free))
+ arena_dalloc_junk_small(ptr, bin_info);
+
+ arena_run_reg_dalloc(run, ptr);
+ if (run->nfree == bin_info->nregs) {
+ arena_dissociate_bin_run(chunk, run, bin);
+ arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
+ } else if (run->nfree == 1 && run != bin->runcur)
+ arena_bin_lower_run(arena, chunk, run, bin);
+
+ if (config_stats) {
+ bin->stats.ndalloc++;
+ bin->stats.curregs--;
+ }
+}
+
+void
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
+{
+
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
+}
+
+void
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_bits_t *bitselm)
+{
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t rpages_ind;
+
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ bin = &arena->bins[run->binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
+
+void
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind)
+{
+ arena_chunk_map_bits_t *bitselm;
+
+ if (config_debug) {
+ /* arena_ptr_small_binind_get() does extra sanity checking. */
+ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+ pageind)) != BININD_INVALID);
+ }
+ bitselm = arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
+ arena_decay_tick(tsdn, arena);
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
+#endif
+void
+arena_dalloc_junk_large(void *ptr, size_t usize)
+{
+
+ if (config_fill && unlikely(opt_junk_free))
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
+arena_dalloc_junk_large_t *arena_dalloc_junk_large =
+ JEMALLOC_N(n_arena_dalloc_junk_large);
+#endif
+
+static void
+arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, bool junked)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+ arena_run_t *run = &miscelm->run;
+
+ if (config_fill || config_stats) {
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
+
+ if (!junked)
+ arena_dalloc_junk_large(ptr, usize);
+ if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= usize;
+ arena->stats.lstats[index].ndalloc++;
+ arena->stats.lstats[index].curruns--;
+ }
+ }
+
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+}
+
+void
+arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr)
+{
+
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
+}
+
+void
+arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_decay_tick(tsdn, arena);
+}
+
+static void
+arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+ arena_run_t *run = &miscelm->run;
+
+ assert(size < oldsize);
+
+ /*
+ * Shrink the run, and make trailing pages available for other
+ * allocations.
+ */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
+ large_pad, true);
+ if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t npages = (oldsize + large_pad) >> LG_PAGE;
+ size_t followsize;
+
+ assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad);
+
+ /* Try to extend the run. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
+ pageind+npages) != 0)
+ goto label_fail;
+ followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
+ if (oldsize + followsize >= usize_min) {
+ /*
+ * The next run is available and sufficiently large. Split the
+ * following run, then merge the first part with the existing
+ * allocation.
+ */
+ arena_run_t *run;
+ size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
+
+ usize = usize_max;
+ while (oldsize + followsize < usize)
+ usize = index2size(size2index(usize)-1);
+ assert(usize >= usize_min);
+ assert(usize >= oldsize);
+ splitsize = usize - oldsize;
+ if (splitsize == 0)
+ goto label_fail;
+
+ run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
+ if (arena_run_split_large(arena, run, splitsize, zero))
+ goto label_fail;
+
+ if (config_cache_oblivious && zero) {
+ /*
+ * Zero the trailing bytes of the original allocation's
+ * last page, since they are in an indeterminate state.
+ * There will always be trailing bytes, because ptr's
+ * offset from the beginning of the run is a multiple of
+ * CACHELINE in [0 .. PAGE).
+ */
+ void *zbase = (void *)((uintptr_t)ptr + oldsize);
+ void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
+ PAGE));
+ size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
+ assert(nzero > 0);
+ memset(zbase, 0, nzero);
+ }
+
+ size = oldsize + splitsize;
+ npages = (size + large_pad) >> LG_PAGE;
+
+ /*
+ * Mark the extended run as dirty if either portion of the run
+ * was dirty before allocation. This is rather pedantic,
+ * because there's not actually any sequence of events that
+ * could cause the resulting run to be passed to
+ * arena_run_dalloc() with the dirty argument set to false
+ * (which is when dirty flag consistency would really matter).
+ */
+ flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
+ arena_mapbits_dirty_get(chunk, pageind+npages-1);
+ flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, pageind, size + large_pad,
+ flag_dirty | (flag_unzeroed_mask &
+ arena_mapbits_unzeroed_get(chunk, pageind)));
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+npages-1)));
+
+ if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (false);
+ }
+label_fail:
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (true);
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
+#endif
+static void
+arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
+{
+
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
+ old_usize - usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
+arena_ralloc_junk_large_t *arena_ralloc_junk_large =
+ JEMALLOC_N(n_arena_ralloc_junk_large);
+#endif
+
+/*
+ * Try to resize a large allocation, in order to avoid copying. This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_t *arena;
+
+ if (oldsize == usize_max) {
+ /* Current size class is compatible and maximal. */
+ return (false);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = extent_node_arena_get(&chunk->node);
+
+ if (oldsize < usize_max) {
+ bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
+ oldsize, usize_min, usize_max, zero);
+ if (config_fill && !ret && !zero) {
+ if (unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
+ } else if (unlikely(opt_zero)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
+ }
+ }
+ return (ret);
+ }
+
+ assert(oldsize > usize_max);
+ /* Fill before shrinking in order avoid a race. */
+ arena_ralloc_junk_large(ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
+ return (false);
+}
+
+bool
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero)
+{
+ size_t usize_min, usize_max;
+
+ /* Calls with non-zero extra had to clamp extra. */
+ assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
+
+ if (unlikely(size > HUGE_MAXCLASS))
+ return (true);
+
+ usize_min = s2u(size);
+ usize_max = s2u(size + extra);
+ if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+ arena_chunk_t *chunk;
+
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(arena_bin_info[size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS ||
+ size2index(usize_max) != size2index(oldsize)) &&
+ (size > oldsize || usize_max < oldsize))
+ return (true);
+ } else {
+ if (usize_max <= SMALL_MAXCLASS)
+ return (true);
+ if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero))
+ return (true);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
+ return (false);
+ } else {
+ return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero));
+ }
+}
+
+static void *
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+
+ if (alignment == 0)
+ return (arena_malloc(tsdn, arena, usize, size2index(usize),
+ zero, tcache, true));
+ usize = sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return (NULL);
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+}
+
+void *
+arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+ void *ret;
+ size_t usize;
+
+ usize = s2u(size);
+ if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
+ return (NULL);
+
+ if (likely(usize <= large_maxclass)) {
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
+ zero))
+ return (ptr);
+
+ /*
+ * size and oldsize are different enough that we need to move
+ * the object. In that case, fall back to allocating new space
+ * and copying.
+ */
+ ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
+ alignment, zero, tcache);
+ if (ret == NULL)
+ return (NULL);
+
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+
+ copysize = (usize < oldsize) ? usize : oldsize;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
+ } else {
+ ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
+ zero, tcache);
+ }
+ return (ret);
+}
+
+dss_prec_t
+arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
+{
+ dss_prec_t ret;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ret = arena->dss_prec;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (ret);
+}
+
+bool
+arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
+{
+
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena->dss_prec = dss_prec;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (false);
+}
+
+ssize_t
+arena_lg_dirty_mult_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+}
+
+bool
+arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
+{
+
+ if (opt_purge != purge_mode_ratio)
+ return (true);
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+ atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
+ return (false);
+}
+
+ssize_t
+arena_decay_time_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
+}
+
+bool
+arena_decay_time_default_set(ssize_t decay_time)
+{
+
+ if (opt_purge != purge_mode_decay)
+ return (true);
+ if (!arena_decay_time_valid(decay_time))
+ return (true);
+ atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
+ return (false);
+}
+
+static void
+arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
+{
+
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
+ *decay_time = arena->decay.time;
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+}
+
+void
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+ decay_time, nactive, ndirty);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
+{
+ unsigned i;
+
+ cassert(config_stats);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+ decay_time, nactive, ndirty);
+
+ astats->mapped += arena->stats.mapped;
+ astats->retained += arena->stats.retained;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->metadata_mapped += arena->stats.metadata_mapped;
+ astats->metadata_allocated += arena_metadata_allocated_get(arena);
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+ astats->allocated_huge += arena->stats.allocated_huge;
+ astats->nmalloc_huge += arena->stats.nmalloc_huge;
+ astats->ndalloc_huge += arena->stats.ndalloc_huge;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
+ hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+ hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ }
+}
+
+unsigned
+arena_nthreads_get(arena_t *arena, bool internal)
+{
+
+ return (atomic_read_u(&arena->nthreads[internal]));
+}
+
+void
+arena_nthreads_inc(arena_t *arena, bool internal)
+{
+
+ atomic_add_u(&arena->nthreads[internal], 1);
+}
+
+void
+arena_nthreads_dec(arena_t *arena, bool internal)
+{
+
+ atomic_sub_u(&arena->nthreads[internal], 1);
+}
+
+arena_t *
+arena_new(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+ unsigned i;
+
+ /*
+ * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
+ * because there is no way to clean up if base_alloc() OOMs.
+ */
+ if (config_stats) {
+ arena = (arena_t *)base_alloc(tsdn,
+ CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
+ + (nhclasses * sizeof(malloc_huge_stats_t)));
+ } else
+ arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ if (arena == NULL)
+ return (NULL);
+
+ arena->ind = ind;
+ arena->nthreads[0] = arena->nthreads[1] = 0;
+ if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
+ return (NULL);
+
+ if (config_stats) {
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+ arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)));
+ memset(arena->stats.lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
+ memset(arena->stats.hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (config_tcache)
+ ql_new(&arena->tcache_ql);
+ }
+
+ if (config_prof)
+ arena->prof_accumbytes = 0;
+
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ arena->offset_state = config_debug ? ind :
+ (size_t)(uintptr_t)arena;
+ }
+
+ arena->dss_prec = chunk_dss_prec_get();
+
+ ql_new(&arena->achunks);
+
+ arena->spare = NULL;
+
+ arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
+ arena->purging = false;
+ arena->nactive = 0;
+ arena->ndirty = 0;
+
+ for (i = 0; i < NPSIZES; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ qr_new(&arena->runs_dirty, rd_link);
+ qr_new(&arena->chunks_cache, cc_link);
+
+ if (opt_purge == purge_mode_decay)
+ arena_decay_init(arena, arena_decay_time_default_get());
+
+ ql_new(&arena->huge);
+ if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
+ WITNESS_RANK_ARENA_HUGE))
+ return (NULL);
+
+ extent_tree_szad_new(&arena->chunks_szad_cached);
+ extent_tree_ad_new(&arena->chunks_ad_cached);
+ extent_tree_szad_new(&arena->chunks_szad_retained);
+ extent_tree_ad_new(&arena->chunks_ad_retained);
+ if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
+ WITNESS_RANK_ARENA_CHUNKS))
+ return (NULL);
+ ql_new(&arena->node_cache);
+ if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
+ WITNESS_RANK_ARENA_NODE_CACHE))
+ return (NULL);
+
+ arena->chunk_hooks = chunk_hooks_default;
+
+ /* Initialize bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN))
+ return (NULL);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats)
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ }
+
+ return (arena);
+}
+
+/*
+ * Calculate bin_info->run_size such that it meets the following constraints:
+ *
+ * *) bin_info->run_size <= arena_maxrun
+ * *) bin_info->nregs <= RUN_MAXREGS
+ *
+ * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
+ * these settings are all interdependent.
+ */
+static void
+bin_info_run_size_calc(arena_bin_info_t *bin_info)
+{
+ size_t pad_size;
+ size_t try_run_size, perfect_run_size, actual_run_size;
+ uint32_t try_nregs, perfect_nregs, actual_nregs;
+
+ /*
+ * Determine redzone size based on minimum alignment and minimum
+ * redzone size. Add padding to the end of the run if it is needed to
+ * align the regions. The padding allows each redzone to be half the
+ * minimum alignment; without the padding, each redzone would have to
+ * be twice as large in order to maintain alignment.
+ */
+ if (config_fill && unlikely(opt_redzone)) {
+ size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
+ if (align_min <= REDZONE_MINSIZE) {
+ bin_info->redzone_size = REDZONE_MINSIZE;
+ pad_size = 0;
+ } else {
+ bin_info->redzone_size = align_min >> 1;
+ pad_size = bin_info->redzone_size;
+ }
+ } else {
+ bin_info->redzone_size = 0;
+ pad_size = 0;
+ }
+ bin_info->reg_interval = bin_info->reg_size +
+ (bin_info->redzone_size << 1);
+
+ /*
+ * Compute run size under ideal conditions (no redzones, no limit on run
+ * size).
+ */
+ try_run_size = PAGE;
+ try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ do {
+ perfect_run_size = try_run_size;
+ perfect_nregs = try_nregs;
+
+ try_run_size += PAGE;
+ try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
+ assert(perfect_nregs <= RUN_MAXREGS);
+
+ actual_run_size = perfect_run_size;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+
+ /*
+ * Redzones can require enough padding that not even a single region can
+ * fit within the number of pages that would normally be dedicated to a
+ * run for this size class. Increase the run size until at least one
+ * region fits.
+ */
+ while (actual_nregs == 0) {
+ assert(config_fill && unlikely(opt_redzone));
+
+ actual_run_size += PAGE;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+ }
+
+ /*
+ * Make sure that the run will fit within an arena chunk.
+ */
+ while (actual_run_size > arena_maxrun) {
+ actual_run_size -= PAGE;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+ }
+ assert(actual_nregs > 0);
+ assert(actual_run_size == s2u(actual_run_size));
+
+ /* Copy final settings. */
+ bin_info->run_size = actual_run_size;
+ bin_info->nregs = actual_nregs;
+ bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size);
+
+ assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+ * bin_info->reg_interval) + pad_size == bin_info->run_size);
+}
+
+static void
+bin_info_init(void)
+{
+ arena_bin_info_t *bin_info;
+
+#define BIN_INFO_INIT_bin_yes(index, size) \
+ bin_info = &arena_bin_info[index]; \
+ bin_info->reg_size = size; \
+ bin_info_run_size_calc(bin_info); \
+ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+#define BIN_INFO_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef BIN_INFO_INIT_bin_yes
+#undef BIN_INFO_INIT_bin_no
+#undef SC
+}
+
+void
+arena_boot(void)
+{
+ unsigned i;
+
+ arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+ arena_decay_time_default_set(opt_decay_time);
+
+ /*
+ * Compute the header size such that it is large enough to contain the
+ * page map. The page map is biased to omit entries for the header
+ * itself, so some iteration is necessary to compute the map bias.
+ *
+ * 1) Compute safe header_size and map_bias values that include enough
+ * space for an unbiased page map.
+ * 2) Refine map_bias based on (1) to omit the header pages in the page
+ * map. The resulting map_bias may be one too small.
+ * 3) Refine map_bias based on (2). The result will be >= the result
+ * from (2), and will always be correct.
+ */
+ map_bias = 0;
+ for (i = 0; i < 3; i++) {
+ size_t header_size = offsetof(arena_chunk_t, map_bits) +
+ ((sizeof(arena_chunk_map_bits_t) +
+ sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
+ map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
+ }
+ assert(map_bias > 0);
+
+ map_misc_offset = offsetof(arena_chunk_t, map_bits) +
+ sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
+
+ arena_maxrun = chunksize - (map_bias << LG_PAGE);
+ assert(arena_maxrun > 0);
+ large_maxclass = index2size(size2index(chunksize)-1);
+ if (large_maxclass > arena_maxrun) {
+ /*
+ * For small chunk sizes it's possible for there to be fewer
+ * non-header pages available than are necessary to serve the
+ * size classes just below chunksize.
+ */
+ large_maxclass = arena_maxrun;
+ }
+ assert(large_maxclass > 0);
+ nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
+ nhclasses = NSIZES - nlclasses - NBINS;
+
+ bin_info_init();
+}
+
+void
+arena_prefork0(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->lock);
+}
+
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
+}
+
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
+}
+
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ malloc_mutex_prefork(tsdn, &arena->huge_mtx);
+}
+
+void
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->lock);
+}
+
+void
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->lock);
+}
diff --git a/memory/jemalloc/src/src/atomic.c b/memory/jemalloc/src/src/atomic.c
new file mode 100644
index 000000000..77ee31311
--- /dev/null
+++ b/memory/jemalloc/src/src/atomic.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_ATOMIC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/base.c b/memory/jemalloc/src/src/base.c
new file mode 100644
index 000000000..81b0801fd
--- /dev/null
+++ b/memory/jemalloc/src/src/base.c
@@ -0,0 +1,177 @@
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_mutex_t base_mtx;
+static extent_tree_t base_avail_szad;
+static extent_node_t *base_nodes;
+static size_t base_allocated;
+static size_t base_resident;
+static size_t base_mapped;
+
+/******************************************************************************/
+
+static extent_node_t *
+base_node_try_alloc(tsdn_t *tsdn)
+{
+ extent_node_t *node;
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
+ if (base_nodes == NULL)
+ return (NULL);
+ node = base_nodes;
+ base_nodes = *(extent_node_t **)node;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ return (node);
+}
+
+static void
+base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
+{
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
+}
+
+static extent_node_t *
+base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
+{
+ extent_node_t *node;
+ size_t csize, nsize;
+ void *addr;
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+ assert(minsize != 0);
+ node = base_node_try_alloc(tsdn);
+ /* Allocate enough space to also carve a node out if necessary. */
+ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+ csize = CHUNK_CEILING(minsize + nsize);
+ addr = chunk_alloc_base(csize);
+ if (addr == NULL) {
+ if (node != NULL)
+ base_node_dalloc(tsdn, node);
+ return (NULL);
+ }
+ base_mapped += csize;
+ if (node == NULL) {
+ node = (extent_node_t *)addr;
+ addr = (void *)((uintptr_t)addr + nsize);
+ csize -= nsize;
+ if (config_stats) {
+ base_allocated += nsize;
+ base_resident += PAGE_CEILING(nsize);
+ }
+ }
+ extent_node_init(node, NULL, addr, csize, true, true);
+ return (node);
+}
+
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
+void *
+base_alloc(tsdn_t *tsdn, size_t size)
+{
+ void *ret;
+ size_t csize, usize;
+ extent_node_t *node;
+ extent_node_t key;
+
+ /*
+ * Round size up to nearest multiple of the cacheline size, so that
+ * there is no chance of false cache line sharing.
+ */
+ csize = CACHELINE_CEILING(size);
+
+ usize = s2u(csize);
+ extent_node_init(&key, NULL, NULL, usize, false, false);
+ malloc_mutex_lock(tsdn, &base_mtx);
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+ if (node != NULL) {
+ /* Use existing space. */
+ extent_tree_szad_remove(&base_avail_szad, node);
+ } else {
+ /* Try to allocate more space. */
+ node = base_chunk_alloc(tsdn, csize);
+ }
+ if (node == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = extent_node_addr_get(node);
+ if (extent_node_size_get(node) > csize) {
+ extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+ extent_node_size_set(node, extent_node_size_get(node) - csize);
+ extent_tree_szad_insert(&base_avail_szad, node);
+ } else
+ base_node_dalloc(tsdn, node);
+ if (config_stats) {
+ base_allocated += csize;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
+ PAGE_CEILING((uintptr_t)ret);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
+label_return:
+ malloc_mutex_unlock(tsdn, &base_mtx);
+ return (ret);
+}
+
+void
+base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped)
+{
+
+ malloc_mutex_lock(tsdn, &base_mtx);
+ assert(base_allocated <= base_resident);
+ assert(base_resident <= base_mapped);
+ *allocated = base_allocated;
+ *resident = base_resident;
+ *mapped = base_mapped;
+ malloc_mutex_unlock(tsdn, &base_mtx);
+}
+
+bool
+base_boot(void)
+{
+
+ if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
+ return (true);
+ extent_tree_szad_new(&base_avail_szad);
+ base_nodes = NULL;
+
+ return (false);
+}
+
+void
+base_prefork(tsdn_t *tsdn)
+{
+
+ malloc_mutex_prefork(tsdn, &base_mtx);
+}
+
+void
+base_postfork_parent(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_parent(tsdn, &base_mtx);
+}
+
+void
+base_postfork_child(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_child(tsdn, &base_mtx);
+}
diff --git a/memory/jemalloc/src/src/bitmap.c b/memory/jemalloc/src/src/bitmap.c
new file mode 100644
index 000000000..ac0f3b381
--- /dev/null
+++ b/memory/jemalloc/src/src/bitmap.c
@@ -0,0 +1,111 @@
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+#ifdef USE_TREE
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+ unsigned i;
+ size_t group_count;
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ /*
+ * Compute the number of groups necessary to store nbits bits, and
+ * progressively work upward through the levels until reaching a level
+ * that requires only one group.
+ */
+ binfo->levels[0].group_offset = 0;
+ group_count = BITMAP_BITS2GROUPS(nbits);
+ for (i = 1; group_count > 1; i++) {
+ assert(i < BITMAP_MAX_LEVELS);
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ group_count = BITMAP_BITS2GROUPS(group_count);
+ }
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
+ binfo->nlevels = i;
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->levels[binfo->nlevels].group_offset);
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+ unsigned i;
+
+ /*
+ * Bits are actually inverted with regard to the external bitmap
+ * interface, so the bitmap starts out with all 1 bits, except for
+ * trailing unused bits (if any). Note that each group uses bit 0 to
+ * correspond to the first logical bit in the group, so extra bits
+ * are the most significant bits of the last group.
+ */
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ for (i = 1; i < binfo->nlevels; i++) {
+ size_t group_count = binfo->levels[i].group_offset -
+ binfo->levels[i-1].group_offset;
+ extra = (BITMAP_GROUP_NBITS - (group_count &
+ BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
+}
+
+#else /* USE_TREE */
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->ngroups);
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->ngroups - 1] >>= extra;
+}
+
+#endif /* USE_TREE */
+
+size_t
+bitmap_size(const bitmap_info_t *binfo)
+{
+
+ return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
+}
diff --git a/memory/jemalloc/src/src/chunk.c b/memory/jemalloc/src/src/chunk.c
new file mode 100644
index 000000000..07e26f77c
--- /dev/null
+++ b/memory/jemalloc/src/src/chunk.c
@@ -0,0 +1,783 @@
+#define JEMALLOC_CHUNK_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+const char *opt_dss = DSS_DEFAULT;
+size_t opt_lg_chunk = 0;
+
+/* Used exclusively for gdump triggering. */
+static size_t curchunks;
+static size_t highchunks;
+
+rtree_t chunks_rtree;
+
+/* Various chunk-related settings. */
+size_t chunksize;
+size_t chunksize_mask; /* (chunksize - 1). */
+size_t chunk_npages;
+
+static void *chunk_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind);
+static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t chunk_hooks_default = {
+ chunk_alloc_default,
+ chunk_dalloc_default,
+ chunk_commit_default,
+ chunk_decommit_default,
+ chunk_purge_default,
+ chunk_split_default,
+ chunk_merge_default
+};
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void chunk_record(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
+ bool committed);
+
+/******************************************************************************/
+
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
+{
+ chunk_hooks_t chunk_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks = chunk_hooks_get_locked(arena);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+ chunk_hooks_t old_chunk_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ old_chunk_hooks = arena->chunk_hooks;
+ /*
+ * Copy each field atomically so that it is impossible for readers to
+ * see partially updated pointers. There are places where readers only
+ * need one hook function pointer (therefore no need to copy the
+ * entirety of arena->chunk_hooks), and stale reads do not affect
+ * correctness, so they perform unlocked reads.
+ */
+#define ATOMIC_COPY_HOOK(n) do { \
+ union { \
+ chunk_##n##_t **n; \
+ void **v; \
+ } u; \
+ u.n = &arena->chunk_hooks.n; \
+ atomic_write_p(u.v, chunk_hooks->n); \
+} while (0)
+ ATOMIC_COPY_HOOK(alloc);
+ ATOMIC_COPY_HOOK(dalloc);
+ ATOMIC_COPY_HOOK(commit);
+ ATOMIC_COPY_HOOK(decommit);
+ ATOMIC_COPY_HOOK(purge);
+ ATOMIC_COPY_HOOK(split);
+ ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool locked)
+{
+ static const chunk_hooks_t uninitialized_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+
+ if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+ 0) {
+ *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+ chunk_hooks_get(tsdn, arena);
+ }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
+}
+
+bool
+chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == chunk);
+
+ if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
+ return (true);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nadd = (size == 0) ? 1 : size / chunksize;
+ size_t cur = atomic_add_z(&curchunks, nadd);
+ size_t high = atomic_read_z(&highchunks);
+ while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highchunks update race.
+ */
+ high = atomic_read_z(&highchunks);
+ }
+ if (cur > high && prof_gdump_get_unlocked())
+ prof_gdump(tsdn);
+ }
+
+ return (false);
+}
+
+void
+chunk_deregister(const void *chunk, const extent_node_t *node)
+{
+ bool err;
+
+ err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
+ assert(!err);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nsub = (size == 0) ? 1 : size / chunksize;
+ assert(atomic_read_z(&curchunks) >= nsub);
+ atomic_sub_z(&curchunks, nsub);
+ }
+}
+
+/*
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
+ */
+static extent_node_t *
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size)
+{
+ extent_node_t key;
+
+ assert(size == CHUNK_CEILING(size));
+
+ extent_node_init(&key, arena, NULL, size, false, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
+}
+
+static void *
+chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
+{
+ void *ret;
+ extent_node_t *node;
+ size_t alloc_size, leadsize, trailsize;
+ bool zeroed, committed;
+
+ assert(new_addr == NULL || alignment == chunksize);
+ /*
+ * Cached chunks use the node linkage embedded in their headers, in
+ * which case dalloc_node is true, and new_addr is non-NULL because
+ * we're operating on a specific chunk.
+ */
+ assert(dalloc_node || new_addr != NULL);
+
+ alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ if (new_addr != NULL) {
+ extent_node_t key;
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
+ false);
+ node = extent_tree_ad_search(chunks_ad, &key);
+ } else {
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+ alloc_size);
+ }
+ if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
+ size)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ return (NULL);
+ }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
+ alignment) - (uintptr_t)extent_node_addr_get(node);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_node_size_get(node) >= leadsize + size);
+ trailsize = extent_node_size_get(node) - leadsize - size;
+ ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ zeroed = extent_node_zeroed_get(node);
+ if (zeroed)
+ *zero = true;
+ committed = extent_node_committed_get(node);
+ if (committed)
+ *commit = true;
+ /* Split the lead. */
+ if (leadsize != 0 &&
+ chunk_hooks->split(extent_node_addr_get(node),
+ extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ return (NULL);
+ }
+ /* Remove node from the tree. */
+ extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_ad_remove(chunks_ad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ if (leadsize != 0) {
+ /* Insert the leading space as a smaller chunk. */
+ extent_node_size_set(node, leadsize);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ node = NULL;
+ }
+ if (trailsize != 0) {
+ /* Split the trail. */
+ if (chunk_hooks->split(ret, size + trailsize, size,
+ trailsize, false, arena->ind)) {
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(tsdn, arena, node);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize, zeroed,
+ committed);
+ return (NULL);
+ }
+ /* Insert the trailing space as a smaller chunk. */
+ if (node == NULL) {
+ node = arena_node_alloc(tsdn, arena);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks,
+ chunks_szad, chunks_ad, cache, ret, size +
+ trailsize, zeroed, committed);
+ return (NULL);
+ }
+ }
+ extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
+ trailsize, zeroed, committed);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ node = NULL;
+ }
+ if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size, zeroed, committed);
+ return (NULL);
+ }
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ assert(dalloc_node || node != NULL);
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(tsdn, arena, node);
+ if (*zero) {
+ if (!zeroed)
+ memset(ret, 0, size);
+ else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ }
+ return (ret);
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
+ * advantage of this to avoid demanding zeroed chunks, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+ /* mmap. */
+ if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+
+ /* All strategies for allocation failed. */
+ return (NULL);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero, commit;
+
+ /*
+ * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
+ * because it's critical that chunk_alloc_base() return untouched
+ * demand-zeroed virtual memory.
+ */
+ zero = true;
+ commit = true;
+ ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ return (ret);
+}
+
+void *
+chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
+ new_addr, size, alignment, zero, commit, dalloc_node);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (ret);
+}
+
+static arena_t *
+chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
+{
+ arena_t *arena;
+
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+ return (arena);
+}
+
+static void *
+chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ return (ret);
+}
+
+static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = chunk_arena_get(tsdn, arena_ind);
+
+ return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
+ zero, commit));
+}
+
+static void *
+chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true);
+
+ if (config_stats && ret != NULL)
+ arena->stats.retained -= size;
+
+ return (ret);
+}
+
+void *
+chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+
+ ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
+ alignment, zero, commit);
+ if (ret == NULL) {
+ if (chunk_hooks->alloc == chunk_alloc_default) {
+ /* Call directly to propagate tsdn. */
+ ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
+ size, alignment, zero, commit);
+ } else {
+ ret = chunk_hooks->alloc(new_addr, size, alignment,
+ zero, commit, arena->ind);
+ }
+
+ if (ret == NULL)
+ return (NULL);
+
+ if (config_valgrind && chunk_hooks->alloc !=
+ chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
+ }
+
+ return (ret);
+}
+
+static void
+chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed)
+{
+ bool unzeroed;
+ extent_node_t *node, *prev;
+ extent_node_t key;
+
+ assert(!cache || !zeroed);
+ unzeroed = cache || !zeroed;
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+ false, false);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && extent_node_addr_get(node) ==
+ extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+ committed && !chunk_hooks->merge(chunk, size,
+ extent_node_addr_get(node), extent_node_size_get(node), false,
+ arena->ind)) {
+ /*
+ * Coalesce chunk with the following address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, chunk);
+ extent_node_size_set(node, size + extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ !unzeroed);
+ extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ } else {
+ /* Coalescing forward failed, so insert a new node. */
+ node = arena_node_alloc(tsdn, arena);
+ if (node == NULL) {
+ /*
+ * Node allocation failed, which is an exceedingly
+ * unlikely failure. Leak chunk after making sure its
+ * pages have already been purged, so that this is only
+ * a virtual memory leak.
+ */
+ if (cache) {
+ chunk_purge_wrapper(tsdn, arena, chunk_hooks,
+ chunk, size, 0, size);
+ }
+ goto label_return;
+ }
+ extent_node_init(node, arena, chunk, size, !unzeroed,
+ committed);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ }
+
+ /* Try to coalesce backward. */
+ prev = extent_tree_ad_prev(chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
+ extent_node_size_get(prev)) == chunk &&
+ extent_node_committed_get(prev) == committed &&
+ !chunk_hooks->merge(extent_node_addr_get(prev),
+ extent_node_size_get(prev), chunk, size, false, arena->ind)) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert node from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
+ arena_chunk_cache_maybe_remove(arena, prev, cache);
+ extent_tree_szad_remove(chunks_szad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, extent_node_addr_get(prev));
+ extent_node_size_set(node, extent_node_size_get(prev) +
+ extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
+ extent_node_zeroed_get(node));
+ extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+
+ arena_node_dalloc(tsdn, arena, prev);
+ }
+
+label_return:
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+}
+
+void
+chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ arena_maybe_purge(tsdn, arena);
+}
+
+static bool
+chunk_dalloc_default_impl(void *chunk, size_t size)
+{
+
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ return (chunk_dalloc_default_impl(chunk, size));
+}
+
+void
+chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed)
+{
+ bool err;
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ /* Try to deallocate. */
+ if (chunk_hooks->dalloc == chunk_dalloc_default) {
+ /* Call directly to propagate tsdn. */
+ err = chunk_dalloc_default_impl(chunk, size);
+ } else
+ err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+
+ if (!err)
+ return;
+ /* Try to decommit; purge if that fails. */
+ if (committed) {
+ committed = chunk_hooks->decommit(chunk, size, 0, size,
+ arena->ind);
+ }
+ zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
+ arena->ind);
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+
+ if (config_stats)
+ arena->stats.retained += size;
+}
+
+static bool
+chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+bool
+chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length)
+{
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+static bool
+chunk_merge_default_impl(void *chunk_a, void *chunk_b)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
+ return (true);
+
+ return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ return (chunk_merge_default_impl(chunk_a, chunk_b));
+}
+
+static rtree_node_elm_t *
+chunks_rtree_node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
+ sizeof(rtree_node_elm_t)));
+}
+
+bool
+chunk_boot(void)
+{
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+
+ /*
+ * Verify actual page size is equal to or an integral multiple of
+ * configured page size.
+ */
+ if (info.dwPageSize & ((1U << LG_PAGE) - 1))
+ return (true);
+
+ /*
+ * Configure chunksize (if not set) to match granularity (usually 64K),
+ * so pages_map will always take fast path.
+ */
+ if (!opt_lg_chunk) {
+ opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
+ - 1;
+ }
+#else
+ if (!opt_lg_chunk)
+ opt_lg_chunk = LG_CHUNK_DEFAULT;
+#endif
+
+ /* Set variables according to the value of opt_lg_chunk. */
+ chunksize = (ZU(1) << opt_lg_chunk);
+ assert(chunksize >= PAGE);
+ chunksize_mask = chunksize - 1;
+ chunk_npages = (chunksize >> LG_PAGE);
+
+ if (have_dss)
+ chunk_dss_boot();
+ if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk), chunks_rtree_node_alloc, NULL))
+ return (true);
+
+ return (false);
+}
diff --git a/memory/jemalloc/src/src/chunk_dss.c b/memory/jemalloc/src/src/chunk_dss.c
new file mode 100644
index 000000000..85a13548f
--- /dev/null
+++ b/memory/jemalloc/src/src/chunk_dss.c
@@ -0,0 +1,237 @@
+#define JEMALLOC_CHUNK_DSS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+/* Data. */
+
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/*
+ * Current dss precedence default, used when creating new arenas. NB: This is
+ * stored as unsigned rather than dss_prec_t because in principle there's no
+ * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
+ * atomic operations to synchronize the setting.
+ */
+static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
+
+/* Base address of the DSS. */
+static void *dss_base;
+/* Atomic boolean indicating whether the DSS is exhausted. */
+static unsigned dss_exhausted;
+/* Atomic current upper limit on DSS addresses. */
+static void *dss_max;
+
+/******************************************************************************/
+
+static void *
+chunk_dss_sbrk(intptr_t increment)
+{
+
+#ifdef JEMALLOC_DSS
+ return (sbrk(increment));
+#else
+ not_implemented();
+ return (NULL);
+#endif
+}
+
+dss_prec_t
+chunk_dss_prec_get(void)
+{
+ dss_prec_t ret;
+
+ if (!have_dss)
+ return (dss_prec_disabled);
+ ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
+ return (ret);
+}
+
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
+ atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
+ return (false);
+}
+
+static void *
+chunk_dss_max_update(void *new_addr)
+{
+ void *max_cur;
+ spin_t spinner;
+
+ /*
+ * Get the current end of the DSS as max_cur and assure that dss_max is
+ * up to date.
+ */
+ spin_init(&spinner);
+ while (true) {
+ void *max_prev = atomic_read_p(&dss_max);
+
+ max_cur = chunk_dss_sbrk(0);
+ if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
+ /*
+ * Another thread optimistically updated dss_max. Wait
+ * for it to finish.
+ */
+ spin_adaptive(&spinner);
+ continue;
+ }
+ if (!atomic_cas_p(&dss_max, max_prev, max_cur))
+ break;
+ }
+ /* Fixed new_addr can only be supported if it is at the edge of DSS. */
+ if (new_addr != NULL && max_cur != new_addr)
+ return (NULL);
+
+ return (max_cur);
+}
+
+void *
+chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit)
+{
+ cassert(have_dss);
+ assert(size > 0 && (size & chunksize_mask) == 0);
+ assert(alignment > 0 && (alignment & chunksize_mask) == 0);
+
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a huge allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0)
+ return (NULL);
+
+ if (!atomic_read_u(&dss_exhausted)) {
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ while (true) {
+ void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
+ size_t gap_size, cpad_size;
+ intptr_t incr;
+
+ max_cur = chunk_dss_max_update(new_addr);
+ if (max_cur == NULL)
+ goto label_oom;
+
+ /*
+ * Calculate how much padding is necessary to
+ * chunk-align the end of the DSS.
+ */
+ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
+ chunksize_mask;
+ /*
+ * Compute how much chunk-aligned pad space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ cpad = (void *)((uintptr_t)dss_max + gap_size);
+ ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
+ alignment);
+ cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
+ dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)dss_max ||
+ (uintptr_t)dss_next < (uintptr_t)dss_max)
+ goto label_oom; /* Wrap-around. */
+ incr = gap_size + cpad_size + size;
+
+ /*
+ * Optimistically update dss_max, and roll back below if
+ * sbrk() fails. No other thread will try to extend the
+ * DSS while dss_max is greater than the current DSS
+ * max reported by sbrk(0).
+ */
+ if (atomic_cas_p(&dss_max, max_cur, dss_next))
+ continue;
+
+ /* Try to allocate. */
+ dss_prev = chunk_dss_sbrk(incr);
+ if (dss_prev == max_cur) {
+ /* Success. */
+ if (cpad_size != 0) {
+ chunk_hooks_t chunk_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+ chunk_dalloc_wrapper(tsdn, arena,
+ &chunk_hooks, cpad, cpad_size,
+ false, true);
+ }
+ if (*zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ ret, size);
+ memset(ret, 0, size);
+ }
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
+ return (ret);
+ }
+
+ /*
+ * Failure, whether due to OOM or a race with a raw
+ * sbrk() call from outside the allocator. Try to roll
+ * back optimistic dss_max update; if rollback fails,
+ * it's due to another caller of this function having
+ * succeeded since this invocation started, in which
+ * case rollback is not necessary.
+ */
+ atomic_cas_p(&dss_max, dss_next, max_cur);
+ if (dss_prev == (void *)-1) {
+ /* OOM. */
+ atomic_write_u(&dss_exhausted, (unsigned)true);
+ goto label_oom;
+ }
+ }
+ }
+label_oom:
+ return (NULL);
+}
+
+static bool
+chunk_in_dss_helper(void *chunk, void *max)
+{
+
+ return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
+ (uintptr_t)max);
+}
+
+bool
+chunk_in_dss(void *chunk)
+{
+
+ cassert(have_dss);
+
+ return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
+}
+
+bool
+chunk_dss_mergeable(void *chunk_a, void *chunk_b)
+{
+ void *max;
+
+ cassert(have_dss);
+
+ max = atomic_read_p(&dss_max);
+ return (chunk_in_dss_helper(chunk_a, max) ==
+ chunk_in_dss_helper(chunk_b, max));
+}
+
+void
+chunk_dss_boot(void)
+{
+
+ cassert(have_dss);
+
+ dss_base = chunk_dss_sbrk(0);
+ dss_exhausted = (unsigned)(dss_base == (void *)-1);
+ dss_max = dss_base;
+}
+
+/******************************************************************************/
diff --git a/memory/jemalloc/src/src/chunk_mmap.c b/memory/jemalloc/src/src/chunk_mmap.c
new file mode 100644
index 000000000..73fc497af
--- /dev/null
+++ b/memory/jemalloc/src/src/chunk_mmap.c
@@ -0,0 +1,78 @@
+#define JEMALLOC_CHUNK_MMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+ size_t alloc_size;
+
+ alloc_size = size + alignment - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ do {
+ void *pages;
+ size_t leadsize;
+ pages = pages_map(NULL, alloc_size, commit);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size, commit);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ *zero = true;
+ return (ret);
+}
+
+void *
+chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit)
+{
+ void *ret;
+ size_t offset;
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * pages_unmap().
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
+
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = pages_map(new_addr, size, commit);
+ if (ret == NULL || ret == new_addr)
+ return (ret);
+ assert(new_addr == NULL);
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ pages_unmap(ret, size);
+ return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
+ }
+
+ assert(ret != NULL);
+ *zero = true;
+ return (ret);
+}
+
+bool
+chunk_dalloc_mmap(void *chunk, size_t size)
+{
+
+ if (config_munmap)
+ pages_unmap(chunk, size);
+
+ return (!config_munmap);
+}
diff --git a/memory/jemalloc/src/src/ckh.c b/memory/jemalloc/src/src/ckh.c
new file mode 100644
index 000000000..159bd8ae1
--- /dev/null
+++ b/memory/jemalloc/src/src/ckh.c
@@ -0,0 +1,569 @@
+/*
+ *******************************************************************************
+ * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
+ * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
+ * functions are employed. The original cuckoo hashing algorithm was described
+ * in:
+ *
+ * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
+ * 51(2):122-144.
+ *
+ * Generalization of cuckoo hashing was discussed in:
+ *
+ * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
+ * alternative to traditional hash tables. In Proceedings of the 7th
+ * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
+ * January 2006.
+ *
+ * This implementation uses precisely two hash functions because that is the
+ * fewest that can work, and supporting multiple hashes is an implementation
+ * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
+ * that shows approximate expected maximum load factors for various
+ * configurations:
+ *
+ * | #cells/bucket |
+ * #hashes | 1 | 2 | 4 | 8 |
+ * --------+-------+-------+-------+-------+
+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 |
+ * 2 | 0.49 | 0.86 |>0.93< |>0.96< |
+ * 3 | 0.91 | 0.97 | 0.98 | 0.999 |
+ * 4 | 0.97 | 0.99 | 0.999 | |
+ *
+ * The number of cells per bucket is chosen such that a bucket fits in one cache
+ * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
+ * respectively.
+ *
+ ******************************************************************************/
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
+
+/******************************************************************************/
+
+/*
+ * Search bucket for key and return the cell number if found; SIZE_T_MAX
+ * otherwise.
+ */
+JEMALLOC_INLINE_C size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
+{
+ ckhc_t *cell;
+ unsigned i;
+
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ if (cell->key != NULL && ckh->keycomp(key, cell->key))
+ return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ }
+
+ return (SIZE_T_MAX);
+}
+
+/*
+ * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
+ */
+JEMALLOC_INLINE_C size_t
+ckh_isearch(ckh_t *ckh, const void *key)
+{
+ size_t hashes[2], bucket, cell;
+
+ assert(ckh != NULL);
+
+ ckh->hash(key, hashes);
+
+ /* Search primary bucket. */
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ if (cell != SIZE_T_MAX)
+ return (cell);
+
+ /* Search secondary bucket. */
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ return (cell);
+}
+
+JEMALLOC_INLINE_C bool
+ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
+ const void *data)
+{
+ ckhc_t *cell;
+ unsigned offset, i;
+
+ /*
+ * Cycle through the cells in the bucket, starting at a random position.
+ * The randomness avoids worst-case search overhead as buckets fill up.
+ */
+ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
+ ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
+ if (cell->key == NULL) {
+ cell->key = key;
+ cell->data = data;
+ ckh->count++;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+/*
+ * No space is available in bucket. Randomly evict an item, then try to find an
+ * alternate location for that item. Iteratively repeat this
+ * eviction/relocation procedure until either success or detection of an
+ * eviction/relocation bucket cycle.
+ */
+JEMALLOC_INLINE_C bool
+ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
+ void const **argdata)
+{
+ const void *key, *data, *tkey, *tdata;
+ ckhc_t *cell;
+ size_t hashes[2], bucket, tbucket;
+ unsigned i;
+
+ bucket = argbucket;
+ key = *argkey;
+ data = *argdata;
+ while (true) {
+ /*
+ * Choose a random item within the bucket to evict. This is
+ * critical to correct function, because without (eventually)
+ * evicting all items within a bucket during iteration, it
+ * would be possible to get stuck in an infinite loop if there
+ * were an item for which both hashes indicated the same
+ * bucket.
+ */
+ i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ assert(cell->key != NULL);
+
+ /* Swap cell->{key,data} and {key,data} (evict). */
+ tkey = cell->key; tdata = cell->data;
+ cell->key = key; cell->data = data;
+ key = tkey; data = tdata;
+
+#ifdef CKH_COUNT
+ ckh->nrelocs++;
+#endif
+
+ /* Find the alternate bucket for the evicted item. */
+ ckh->hash(key, hashes);
+ tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (tbucket == bucket) {
+ tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
+ - 1);
+ /*
+ * It may be that (tbucket == bucket) still, if the
+ * item's hashes both indicate this bucket. However,
+ * we are guaranteed to eventually escape this bucket
+ * during iteration, assuming pseudo-random item
+ * selection (true randomness would make infinite
+ * looping a remote possibility). The reason we can
+ * never get trapped forever is that there are two
+ * cases:
+ *
+ * 1) This bucket == argbucket, so we will quickly
+ * detect an eviction cycle and terminate.
+ * 2) An item was evicted to this bucket from another,
+ * which means that at least one item in this bucket
+ * has hashes that indicate distinct buckets.
+ */
+ }
+ /* Check for a cycle. */
+ if (tbucket == argbucket) {
+ *argkey = key;
+ *argdata = data;
+ return (true);
+ }
+
+ bucket = tbucket;
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+ }
+}
+
+JEMALLOC_INLINE_C bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
+{
+ size_t hashes[2], bucket;
+ const void *key = *argkey;
+ const void *data = *argdata;
+
+ ckh->hash(key, hashes);
+
+ /* Try to insert in primary bucket. */
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+
+ /* Try to insert in secondary bucket. */
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+
+ /*
+ * Try to find a place for this item via iterative eviction/relocation.
+ */
+ return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+}
+
+/*
+ * Try to rebuild the hash table from scratch by inserting all items from the
+ * old table into the new.
+ */
+JEMALLOC_INLINE_C bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
+{
+ size_t count, i, nins;
+ const void *key, *data;
+
+ count = ckh->count;
+ ckh->count = 0;
+ for (i = nins = 0; nins < count; i++) {
+ if (aTab[i].key != NULL) {
+ key = aTab[i].key;
+ data = aTab[i].data;
+ if (ckh_try_insert(ckh, &key, &data)) {
+ ckh->count = count;
+ return (true);
+ }
+ nins++;
+ }
+ }
+
+ return (false);
+}
+
+static bool
+ckh_grow(tsd_t *tsd, ckh_t *ckh)
+{
+ bool ret;
+ ckhc_t *tab, *ttab;
+ unsigned lg_prevbuckets, lg_curcells;
+
+#ifdef CKH_COUNT
+ ckh->ngrows++;
+#endif
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table will have to be doubled more than once in order to create a
+ * usable table.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
+ while (true) {
+ size_t usize;
+
+ lg_curcells++;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ ret = true;
+ goto label_return;
+ }
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
+ true, NULL, true, arena_ichoose(tsd, NULL));
+ if (tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ break;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+static void
+ckh_shrink(tsd_t *tsd, ckh_t *ckh)
+{
+ ckhc_t *tab, *ttab;
+ size_t usize;
+ unsigned lg_prevbuckets, lg_curcells;
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table rebuild will fail.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return;
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsd, NULL));
+ if (tab == NULL) {
+ /*
+ * An OOM error isn't worth propagating, since it doesn't
+ * prevent this or future operations from proceeding.
+ */
+ return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+#ifdef CKH_COUNT
+ ckh->nshrinks++;
+#endif
+ return;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+#ifdef CKH_COUNT
+ ckh->nshrinkfails++;
+#endif
+}
+
+bool
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp)
+{
+ bool ret;
+ size_t mincells, usize;
+ unsigned lg_mincells;
+
+ assert(minitems > 0);
+ assert(hash != NULL);
+ assert(keycomp != NULL);
+
+#ifdef CKH_COUNT
+ ckh->ngrows = 0;
+ ckh->nshrinks = 0;
+ ckh->nshrinkfails = 0;
+ ckh->ninserts = 0;
+ ckh->nrelocs = 0;
+#endif
+ ckh->prng_state = 42; /* Value doesn't really matter. */
+ ckh->count = 0;
+
+ /*
+ * Find the minimum power of 2 that is large enough to fit minitems
+ * entries. We are using (2+,2) cuckoo hashing, which has an expected
+ * maximum load factor of at least ~0.86, so 0.75 is a conservative load
+ * factor that will typically allow mincells items to fit without ever
+ * growing the table.
+ */
+ assert(LG_CKH_BUCKET_CELLS > 0);
+ mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
+ for (lg_mincells = LG_CKH_BUCKET_CELLS;
+ (ZU(1) << lg_mincells) < mincells;
+ lg_mincells++)
+ ; /* Do nothing. */
+ ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->hash = hash;
+ ckh->keycomp = keycomp;
+
+ usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ ret = true;
+ goto label_return;
+ }
+ ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
+ NULL, true, arena_ichoose(tsd, NULL));
+ if (ckh->tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+void
+ckh_delete(tsd_t *tsd, ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+#ifdef CKH_VERBOSE
+ malloc_printf(
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
+ (unsigned long long)ckh->ngrows,
+ (unsigned long long)ckh->nshrinks,
+ (unsigned long long)ckh->nshrinkfails,
+ (unsigned long long)ckh->ninserts,
+ (unsigned long long)ckh->nrelocs);
+#endif
+
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ if (config_debug)
+ memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+}
+
+size_t
+ckh_count(ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+ return (ckh->count);
+}
+
+bool
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
+{
+ size_t i, ncells;
+
+ for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
+ LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
+ if (ckh->tab[i].key != NULL) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[i].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[i].data;
+ *tabind = i + 1;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+bool
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
+{
+ bool ret;
+
+ assert(ckh != NULL);
+ assert(ckh_search(ckh, key, NULL, NULL));
+
+#ifdef CKH_COUNT
+ ckh->ninserts++;
+#endif
+
+ while (ckh_try_insert(ckh, &key, &data)) {
+ if (ckh_grow(tsd, ckh)) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+bool
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ ckh->tab[cell].key = NULL;
+ ckh->tab[cell].data = NULL; /* Not necessary. */
+
+ ckh->count--;
+ /* Try to halve the table if it is less than 1/4 full. */
+ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
+ > ckh->lg_minbuckets) {
+ /* Ignore error due to OOM. */
+ ckh_shrink(tsd, ckh);
+ }
+
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ return (false);
+ }
+
+ return (true);
+}
+
+void
+ckh_string_hash(const void *key, size_t r_hash[2])
+{
+
+ hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
+}
+
+bool
+ckh_string_keycomp(const void *k1, const void *k2)
+{
+
+ assert(k1 != NULL);
+ assert(k2 != NULL);
+
+ return (strcmp((char *)k1, (char *)k2) ? false : true);
+}
+
+void
+ckh_pointer_hash(const void *key, size_t r_hash[2])
+{
+ union {
+ const void *v;
+ size_t i;
+ } u;
+
+ assert(sizeof(u.v) == sizeof(u.i));
+ u.v = key;
+ hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
+}
+
+bool
+ckh_pointer_keycomp(const void *k1, const void *k2)
+{
+
+ return ((k1 == k2) ? true : false);
+}
diff --git a/memory/jemalloc/src/src/ctl.c b/memory/jemalloc/src/src/ctl.c
new file mode 100644
index 000000000..bc78b2055
--- /dev/null
+++ b/memory/jemalloc/src/src/ctl.c
@@ -0,0 +1,2254 @@
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * ctl_mtx protects the following:
+ * - ctl_stats.*
+ */
+static malloc_mutex_t ctl_mtx;
+static bool ctl_initialized;
+static uint64_t ctl_epoch;
+static ctl_stats_t ctl_stats;
+
+/******************************************************************************/
+/* Helpers for named and indexed nodes. */
+
+JEMALLOC_INLINE_C const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node)
+{
+
+ return ((node->named) ? (const ctl_named_node_t *)node : NULL);
+}
+
+JEMALLOC_INLINE_C const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index)
+{
+ const ctl_named_node_t *children = ctl_named_node(node->children);
+
+ return (children ? &children[index] : NULL);
+}
+
+JEMALLOC_INLINE_C const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node)
+{
+
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
+}
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+#define CTL_PROTO(n) \
+static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+
+#define INDEX_PROTO(n) \
+static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
+ const size_t *mib, size_t miblen, size_t i);
+
+static bool ctl_arena_init(ctl_arena_stats_t *astats);
+static void ctl_arena_clear(ctl_arena_stats_t *astats);
+static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
+ arena_t *arena);
+static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
+ ctl_arena_stats_t *astats);
+static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
+static bool ctl_grow(tsdn_t *tsdn);
+static void ctl_refresh(tsdn_t *tsdn);
+static bool ctl_init(tsdn_t *tsdn);
+static int ctl_lookup(tsdn_t *tsdn, const char *name,
+ ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
+
+CTL_PROTO(version)
+CTL_PROTO(epoch)
+CTL_PROTO(thread_tcache_enabled)
+CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
+CTL_PROTO(thread_arena)
+CTL_PROTO(thread_allocated)
+CTL_PROTO(thread_allocatedp)
+CTL_PROTO(thread_deallocated)
+CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
+CTL_PROTO(config_debug)
+CTL_PROTO(config_fill)
+CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_malloc_conf)
+CTL_PROTO(config_munmap)
+CTL_PROTO(config_prof)
+CTL_PROTO(config_prof_libgcc)
+CTL_PROTO(config_prof_libunwind)
+CTL_PROTO(config_stats)
+CTL_PROTO(config_tcache)
+CTL_PROTO(config_tls)
+CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
+CTL_PROTO(config_xmalloc)
+CTL_PROTO(opt_abort)
+CTL_PROTO(opt_dss)
+CTL_PROTO(opt_lg_chunk)
+CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_purge)
+CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_decay_time)
+CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_junk)
+CTL_PROTO(opt_zero)
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
+CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_lg_tcache_max)
+CTL_PROTO(opt_prof)
+CTL_PROTO(opt_prof_prefix)
+CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_prof_thread_active_init)
+CTL_PROTO(opt_lg_prof_sample)
+CTL_PROTO(opt_lg_prof_interval)
+CTL_PROTO(opt_prof_gdump)
+CTL_PROTO(opt_prof_final)
+CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
+static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
+CTL_PROTO(arena_i_purge)
+CTL_PROTO(arena_i_decay)
+CTL_PROTO(arena_i_reset)
+CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_decay_time)
+CTL_PROTO(arena_i_chunk_hooks)
+INDEX_PROTO(arena_i)
+CTL_PROTO(arenas_bin_i_size)
+CTL_PROTO(arenas_bin_i_nregs)
+CTL_PROTO(arenas_bin_i_run_size)
+INDEX_PROTO(arenas_bin_i)
+CTL_PROTO(arenas_lrun_i_size)
+INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_hchunk_i_size)
+INDEX_PROTO(arenas_hchunk_i)
+CTL_PROTO(arenas_narenas)
+CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_lg_dirty_mult)
+CTL_PROTO(arenas_decay_time)
+CTL_PROTO(arenas_quantum)
+CTL_PROTO(arenas_page)
+CTL_PROTO(arenas_tcache_max)
+CTL_PROTO(arenas_nbins)
+CTL_PROTO(arenas_nhbins)
+CTL_PROTO(arenas_nlruns)
+CTL_PROTO(arenas_nhchunks)
+CTL_PROTO(arenas_extend)
+CTL_PROTO(prof_thread_active_init)
+CTL_PROTO(prof_active)
+CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
+CTL_PROTO(prof_interval)
+CTL_PROTO(lg_prof_sample)
+CTL_PROTO(stats_arenas_i_small_allocated)
+CTL_PROTO(stats_arenas_i_small_nmalloc)
+CTL_PROTO(stats_arenas_i_small_ndalloc)
+CTL_PROTO(stats_arenas_i_small_nrequests)
+CTL_PROTO(stats_arenas_i_large_allocated)
+CTL_PROTO(stats_arenas_i_large_nmalloc)
+CTL_PROTO(stats_arenas_i_large_ndalloc)
+CTL_PROTO(stats_arenas_i_large_nrequests)
+CTL_PROTO(stats_arenas_i_huge_allocated)
+CTL_PROTO(stats_arenas_i_huge_nmalloc)
+CTL_PROTO(stats_arenas_i_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_huge_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
+CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
+CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
+CTL_PROTO(stats_arenas_i_bins_j_nfills)
+CTL_PROTO(stats_arenas_i_bins_j_nflushes)
+CTL_PROTO(stats_arenas_i_bins_j_nruns)
+CTL_PROTO(stats_arenas_i_bins_j_nreruns)
+CTL_PROTO(stats_arenas_i_bins_j_curruns)
+INDEX_PROTO(stats_arenas_i_bins_j)
+CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
+CTL_PROTO(stats_arenas_i_lruns_j_curruns)
+INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
+CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
+INDEX_PROTO(stats_arenas_i_hchunks_j)
+CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
+CTL_PROTO(stats_arenas_i_decay_time)
+CTL_PROTO(stats_arenas_i_pactive)
+CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_mapped)
+CTL_PROTO(stats_arenas_i_retained)
+CTL_PROTO(stats_arenas_i_npurge)
+CTL_PROTO(stats_arenas_i_nmadvise)
+CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
+INDEX_PROTO(stats_arenas_i)
+CTL_PROTO(stats_cactive)
+CTL_PROTO(stats_allocated)
+CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
+CTL_PROTO(stats_mapped)
+CTL_PROTO(stats_retained)
+
+/******************************************************************************/
+/* mallctl tree. */
+
+/* Maximum tree depth. */
+#define CTL_MAX_DEPTH 6
+
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
+ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
+ (ctl_node_t *)c##_node, \
+ NULL
+#define CTL(c) 0, NULL, c##_ctl
+
+/*
+ * Only handles internal indexed nodes, since there are currently no external
+ * ones.
+ */
+#define INDEX(i) {false}, i##_index
+
+static const ctl_named_node_t thread_tcache_node[] = {
+ {NAME("enabled"), CTL(thread_tcache_enabled)},
+ {NAME("flush"), CTL(thread_tcache_flush)}
+};
+
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
+static const ctl_named_node_t thread_node[] = {
+ {NAME("arena"), CTL(thread_arena)},
+ {NAME("allocated"), CTL(thread_allocated)},
+ {NAME("allocatedp"), CTL(thread_allocatedp)},
+ {NAME("deallocated"), CTL(thread_deallocated)},
+ {NAME("deallocatedp"), CTL(thread_deallocatedp)},
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
+};
+
+static const ctl_named_node_t config_node[] = {
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("malloc_conf"), CTL(config_malloc_conf)},
+ {NAME("munmap"), CTL(config_munmap)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("tcache"), CTL(config_tcache)},
+ {NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
+};
+
+static const ctl_named_node_t opt_node[] = {
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("purge"), CTL(opt_purge)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(opt_decay_time)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
+};
+
+static const ctl_named_node_t arena_i_node[] = {
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("decay"), CTL(arena_i_decay)},
+ {NAME("reset"), CTL(arena_i_reset)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(arena_i_decay_time)},
+ {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
+};
+static const ctl_named_node_t super_arena_i_node[] = {
+ {NAME(""), CHILD(named, arena_i)}
+};
+
+static const ctl_indexed_node_t arena_node[] = {
+ {INDEX(arena_i)}
+};
+
+static const ctl_named_node_t arenas_bin_i_node[] = {
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+};
+static const ctl_named_node_t super_arenas_bin_i_node[] = {
+ {NAME(""), CHILD(named, arenas_bin_i)}
+};
+
+static const ctl_indexed_node_t arenas_bin_node[] = {
+ {INDEX(arenas_bin_i)}
+};
+
+static const ctl_named_node_t arenas_lrun_i_node[] = {
+ {NAME("size"), CTL(arenas_lrun_i_size)}
+};
+static const ctl_named_node_t super_arenas_lrun_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lrun_i)}
+};
+
+static const ctl_indexed_node_t arenas_lrun_node[] = {
+ {INDEX(arenas_lrun_i)}
+};
+
+static const ctl_named_node_t arenas_hchunk_i_node[] = {
+ {NAME("size"), CTL(arenas_hchunk_i_size)}
+};
+static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
+ {NAME(""), CHILD(named, arenas_hchunk_i)}
+};
+
+static const ctl_indexed_node_t arenas_hchunk_node[] = {
+ {INDEX(arenas_hchunk_i)}
+};
+
+static const ctl_named_node_t arenas_node[] = {
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("initialized"), CTL(arenas_initialized)},
+ {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(arenas_decay_time)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlruns"), CTL(arenas_nlruns)},
+ {NAME("lrun"), CHILD(indexed, arenas_lrun)},
+ {NAME("nhchunks"), CTL(arenas_nhchunks)},
+ {NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
+ {NAME("extend"), CTL(arenas_extend)}
+};
+
+static const ctl_named_node_t prof_node[] = {
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
+ {NAME("active"), CTL(prof_active)},
+ {NAME("dump"), CTL(prof_dump)},
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)}
+};
+
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+ {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
+ {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
+};
+
+static const ctl_named_node_t stats_arenas_i_small_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_large_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_huge_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
+ {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
+ {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+};
+static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
+ {INDEX(stats_arenas_i_bins_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
+ {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+};
+static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
+ {INDEX(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
+ {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
+ {INDEX(stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_node[] = {
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("retained"), CTL(stats_arenas_i_retained)},
+ {NAME("npurge"), CTL(stats_arenas_i_npurge)},
+ {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
+ {NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
+ {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i)}
+};
+
+static const ctl_indexed_node_t stats_arenas_node[] = {
+ {INDEX(stats_arenas_i)}
+};
+
+static const ctl_named_node_t stats_node[] = {
+ {NAME("cactive"), CTL(stats_cactive)},
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("retained"), CTL(stats_retained)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
+};
+
+static const ctl_named_node_t root_node[] = {
+ {NAME("version"), CTL(version)},
+ {NAME("epoch"), CTL(epoch)},
+ {NAME("thread"), CHILD(named, thread)},
+ {NAME("config"), CHILD(named, config)},
+ {NAME("opt"), CHILD(named, opt)},
+ {NAME("tcache"), CHILD(named, tcache)},
+ {NAME("arena"), CHILD(indexed, arena)},
+ {NAME("arenas"), CHILD(named, arenas)},
+ {NAME("prof"), CHILD(named, prof)},
+ {NAME("stats"), CHILD(named, stats)}
+};
+static const ctl_named_node_t super_root_node[] = {
+ {NAME(""), CHILD(named, root)}
+};
+
+#undef NAME
+#undef CHILD
+#undef CTL
+#undef INDEX
+
+/******************************************************************************/
+
+static bool
+ctl_arena_init(ctl_arena_stats_t *astats)
+{
+
+ if (astats->lstats == NULL) {
+ astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (astats->lstats == NULL)
+ return (true);
+ }
+
+ if (astats->hstats == NULL) {
+ astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (astats->hstats == NULL)
+ return (true);
+ }
+
+ return (false);
+}
+
+static void
+ctl_arena_clear(ctl_arena_stats_t *astats)
+{
+
+ astats->nthreads = 0;
+ astats->dss = dss_prec_names[dss_prec_limit];
+ astats->lg_dirty_mult = -1;
+ astats->decay_time = -1;
+ astats->pactive = 0;
+ astats->pdirty = 0;
+ if (config_stats) {
+ memset(&astats->astats, 0, sizeof(arena_stats_t));
+ astats->allocated_small = 0;
+ astats->nmalloc_small = 0;
+ astats->ndalloc_small = 0;
+ astats->nrequests_small = 0;
+ memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
+ memset(astats->lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ memset(astats->hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
+ }
+}
+
+static void
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
+{
+ unsigned i;
+
+ if (config_stats) {
+ arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
+ &cstats->lg_dirty_mult, &cstats->decay_time,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats,
+ cstats->bstats, cstats->lstats, cstats->hstats);
+
+ for (i = 0; i < NBINS; i++) {
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
+ cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+ cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+ cstats->nrequests_small += cstats->bstats[i].nrequests;
+ }
+ } else {
+ arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
+ &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
+ &cstats->pactive, &cstats->pdirty);
+ }
+}
+
+static void
+ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
+{
+ unsigned i;
+
+ sstats->nthreads += astats->nthreads;
+ sstats->pactive += astats->pactive;
+ sstats->pdirty += astats->pdirty;
+
+ if (config_stats) {
+ sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.retained += astats->astats.retained;
+ sstats->astats.npurge += astats->astats.npurge;
+ sstats->astats.nmadvise += astats->astats.nmadvise;
+ sstats->astats.purged += astats->astats.purged;
+
+ sstats->astats.metadata_mapped +=
+ astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated +=
+ astats->astats.metadata_allocated;
+
+ sstats->allocated_small += astats->allocated_small;
+ sstats->nmalloc_small += astats->nmalloc_small;
+ sstats->ndalloc_small += astats->ndalloc_small;
+ sstats->nrequests_small += astats->nrequests_small;
+
+ sstats->astats.allocated_large +=
+ astats->astats.allocated_large;
+ sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sstats->astats.nrequests_large +=
+ astats->astats.nrequests_large;
+
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+
+ for (i = 0; i < NBINS; i++) {
+ sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sstats->bstats[i].nrequests +=
+ astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
+ if (config_tcache) {
+ sstats->bstats[i].nfills +=
+ astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ }
+ sstats->bstats[i].nruns += astats->bstats[i].nruns;
+ sstats->bstats[i].reruns += astats->bstats[i].reruns;
+ sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ }
+
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests +=
+ astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks +=
+ astats->hstats[i].curhchunks;
+ }
+ }
+}
+
+static void
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
+{
+ ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
+
+ ctl_arena_clear(astats);
+ ctl_arena_stats_amerge(tsdn, astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+}
+
+static bool
+ctl_grow(tsdn_t *tsdn)
+{
+ ctl_arena_stats_t *astats;
+
+ /* Initialize new arena. */
+ if (arena_init(tsdn, ctl_stats.narenas) == NULL)
+ return (true);
+
+ /* Allocate extended arena stats. */
+ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
+ sizeof(ctl_arena_stats_t));
+ if (astats == NULL)
+ return (true);
+
+ /* Initialize the new astats element. */
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+ memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
+ a0dalloc(astats);
+ return (true);
+ }
+ /* Swap merged stats to their new location. */
+ {
+ ctl_arena_stats_t tstats;
+ memcpy(&tstats, &astats[ctl_stats.narenas],
+ sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas],
+ &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas + 1], &tstats,
+ sizeof(ctl_arena_stats_t));
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = astats;
+ ctl_stats.narenas++;
+
+ return (false);
+}
+
+static void
+ctl_refresh(tsdn_t *tsdn)
+{
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ /*
+ * Clear sum stats, since they will be merged into by
+ * ctl_arena_refresh().
+ */
+ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
+
+ for (i = 0; i < ctl_stats.narenas; i++)
+ tarenas[i] = arena_get(tsdn, i, false);
+
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ bool initialized = (tarenas[i] != NULL);
+
+ ctl_stats.arenas[i].initialized = initialized;
+ if (initialized)
+ ctl_arena_refresh(tsdn, tarenas[i], i);
+ }
+
+ if (config_stats) {
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(tsdn, &base_allocated, &base_resident,
+ &base_mapped);
+ ctl_stats.allocated =
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
+ ctl_stats.active =
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
+ ctl_stats.metadata = base_allocated +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats
+ .metadata_allocated;
+ ctl_stats.resident = base_resident +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ((ctl_stats.arenas[ctl_stats.narenas].pactive +
+ ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
+ ctl_stats.mapped = base_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
+ ctl_stats.retained =
+ ctl_stats.arenas[ctl_stats.narenas].astats.retained;
+ }
+
+ ctl_epoch++;
+}
+
+static bool
+ctl_init(tsdn_t *tsdn)
+{
+ bool ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (!ctl_initialized) {
+ /*
+ * Allocate space for one extra arena stats element, which
+ * contains summed stats across all arenas.
+ */
+ ctl_stats.narenas = narenas_total_get();
+ ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
+ if (ctl_stats.arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ /*
+ * Initialize all stats structures, regardless of whether they
+ * ever get used. Lazy initialization would allow errors to
+ * cause inconsistent state to be viewable by the application.
+ */
+ if (config_stats) {
+ unsigned i;
+ for (i = 0; i <= ctl_stats.narenas; i++) {
+ if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ unsigned j;
+ for (j = 0; j < i; j++) {
+ a0dalloc(
+ ctl_stats.arenas[j].lstats);
+ a0dalloc(
+ ctl_stats.arenas[j].hstats);
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = NULL;
+ ret = true;
+ goto label_return;
+ }
+ }
+ }
+ ctl_stats.arenas[ctl_stats.narenas].initialized = true;
+
+ ctl_epoch = 0;
+ ctl_refresh(tsdn);
+ ctl_initialized = true;
+ }
+
+ ret = false;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
+
+static int
+ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp)
+{
+ int ret;
+ const char *elm, *tdot, *dot;
+ size_t elen, i, j;
+ const ctl_named_node_t *node;
+
+ elm = name;
+ /* Equivalent to strchrnul(). */
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ if (elen == 0) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = super_root_node;
+ for (i = 0; i < *depthp; i++) {
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
+ const ctl_named_node_t *pnode = node;
+
+ /* Children are named. */
+ for (j = 0; j < node->nchildren; j++) {
+ const ctl_named_node_t *child =
+ ctl_named_children(node, j);
+ if (strlen(child->name) == elen &&
+ strncmp(elm, child->name, elen) == 0) {
+ node = child;
+ if (nodesp != NULL)
+ nodesp[i] =
+ (const ctl_node_t *)node;
+ mibp[i] = j;
+ break;
+ }
+ }
+ if (node == pnode) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ } else {
+ uintmax_t index;
+ const ctl_indexed_node_t *inode;
+
+ /* Children are indexed. */
+ index = malloc_strtoumax(elm, NULL, 10);
+ if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(tsdn, mibp, *depthp, (size_t)index);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ if (nodesp != NULL)
+ nodesp[i] = (const ctl_node_t *)node;
+ mibp[i] = (size_t)index;
+ }
+
+ if (node->ctl != NULL) {
+ /* Terminal node. */
+ if (*dot != '\0') {
+ /*
+ * The name contains more elements than are
+ * in this path through the tree.
+ */
+ ret = ENOENT;
+ goto label_return;
+ }
+ /* Complete lookup successful. */
+ *depthp = i + 1;
+ break;
+ }
+
+ /* Update elm. */
+ if (*dot == '\0') {
+ /* No more elements. */
+ ret = ENOENT;
+ goto label_return;
+ }
+ elm = &dot[1];
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
+ strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+int
+ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ size_t depth;
+ ctl_node_t const *nodes[CTL_MAX_DEPTH];
+ size_t mib[CTL_MAX_DEPTH];
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ depth = CTL_MAX_DEPTH;
+ ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ if (ret != 0)
+ goto label_return;
+
+ node = ctl_named_node(nodes[depth-1]);
+ if (node != NULL && node->ctl)
+ ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ else {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
+int
+ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
+{
+ int ret;
+
+ if (!ctl_initialized && ctl_init(tsdn)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const ctl_named_node_t *node;
+ size_t i;
+
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ /* Iterate down the tree. */
+ node = super_root_node;
+ for (i = 0; i < miblen; i++) {
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
+ /* Children are named. */
+ if (node->nchildren <= (unsigned)mib[i]) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = ctl_named_children(node, mib[i]);
+ } else {
+ const ctl_indexed_node_t *inode;
+
+ /* Indexed element. */
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ }
+ }
+
+ /* Call the ctl function. */
+ if (node && node->ctl)
+ ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ else {
+ /* Partial MIB. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
+bool
+ctl_boot(void)
+{
+
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
+ return (true);
+
+ ctl_initialized = false;
+
+ return (false);
+}
+
+void
+ctl_prefork(tsdn_t *tsdn)
+{
+
+ malloc_mutex_prefork(tsdn, &ctl_mtx);
+}
+
+void
+ctl_postfork_parent(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
+}
+
+void
+ctl_postfork_child(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
+}
+
+/******************************************************************************/
+/* *_ctl() functions. */
+
+#define READONLY() do { \
+ if (newp != NULL || newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define WRITEONLY() do { \
+ if (oldp != NULL || oldlenp != NULL) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ(v, t) do { \
+ if (oldp != NULL && oldlenp != NULL) { \
+ if (*oldlenp != sizeof(t)) { \
+ size_t copylen = (sizeof(t) <= *oldlenp) \
+ ? sizeof(t) : *oldlenp; \
+ memcpy(oldp, (void *)&(v), copylen); \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ *(t *)oldp = (v); \
+ } \
+} while (0)
+
+#define WRITE(v, t) do { \
+ if (newp != NULL) { \
+ if (newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ (v) = *(t *)newp; \
+ } \
+} while (0)
+
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define CTL_RO_CLGEN(c, l, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ if (l) \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ if (l) \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_CGEN(c, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_GEN(n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+/*
+ * ctl_mtx is not acquired, under the assumption that no pertinent data will
+ * mutate during the call.
+ */
+#define CTL_RO_NL_CGEN(c, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_NL_GEN(n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_CONFIG_GEN(n, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
+ oldval = n; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
+
+static int
+epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ UNUSED uint64_t newval;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITE(newval, uint64_t);
+ if (newp != NULL)
+ ctl_refresh(tsd_tsdn(tsd));
+ READ(ctl_epoch, uint64_t);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
+CTL_RO_CONFIG_GEN(config_debug, bool)
+CTL_RO_CONFIG_GEN(config_fill, bool)
+CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
+CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
+CTL_RO_CONFIG_GEN(config_munmap, bool)
+CTL_RO_CONFIG_GEN(config_prof, bool)
+CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
+CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
+CTL_RO_CONFIG_GEN(config_stats, bool)
+CTL_RO_CONFIG_GEN(config_tcache, bool)
+CTL_RO_CONFIG_GEN(config_tls, bool)
+CTL_RO_CONFIG_GEN(config_utrace, bool)
+CTL_RO_CONFIG_GEN(config_valgrind, bool)
+CTL_RO_CONFIG_GEN(config_xmalloc, bool)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
+CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+
+/******************************************************************************/
+
+static int
+thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ arena_t *oldarena;
+ unsigned newind, oldind;
+
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL)
+ return (EAGAIN);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ newind = oldind = oldarena->ind;
+ WRITE(newind, unsigned);
+ READ(oldind, unsigned);
+ if (newind != oldind) {
+ arena_t *newarena;
+
+ if (newind >= ctl_stats.narenas) {
+ /* New arena index is out of range. */
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ /* Initialize arena if necessary. */
+ newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ if (newarena == NULL) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ if (config_tcache) {
+ tcache_t *tcache = tsd_tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ oldarena, newarena);
+ }
+ }
+ }
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
+
+static int
+thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0)
+ goto label_return;
+ } else {
+ const char *oldname = prof_thread_name_get(tsd);
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ oldval = prof_thread_active_get(tsd);
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static void
+arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
+{
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ {
+ unsigned narenas = ctl_stats.narenas;
+
+ if (arena_ind == narenas) {
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+
+ for (i = 0; i < narenas; i++)
+ tarenas[i] = arena_get(tsdn, i, false);
+
+ /*
+ * No further need to hold ctl_mtx, since narenas and
+ * tarenas contain everything needed below.
+ */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ for (i = 0; i < narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge(tsdn, tarenas[i], all);
+ }
+ } else {
+ arena_t *tarena;
+
+ assert(arena_ind < narenas);
+
+ tarena = arena_get(tsdn, arena_ind, false);
+
+ /* No further need to hold ctl_mtx. */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ if (tarena != NULL)
+ arena_purge(tsdn, tarena, all);
+ }
+ }
+}
+
+static int
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ READONLY();
+ WRITEONLY();
+
+ if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
+ unlikely(opt_quarantine))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_ind = (unsigned)mib[1];
+ if (config_debug) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ assert(arena_ind < ctl_stats.narenas);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ }
+ assert(arena_ind >= opt_narenas);
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+
+ arena_reset(tsd, arena);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const char *dss = NULL;
+ unsigned arena_ind = (unsigned)mib[1];
+ dss_prec_t dss_prec_old = dss_prec_limit;
+ dss_prec_t dss_prec = dss_prec_limit;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITE(dss, const char *);
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ }
+
+ if (arena_ind < ctl_stats.narenas) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
+ } else {
+ if (dss_prec != dss_prec_limit &&
+ chunk_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = chunk_dss_prec_get();
+ }
+
+ dss = dss_prec_names[dss_prec_old];
+ READ(dss, const char *);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_decay_time_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ if (newp != NULL) {
+ chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
+ WRITE(new_chunk_hooks, chunk_hooks_t);
+ old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
+ &new_chunk_hooks);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ } else {
+ chunk_hooks_t old_chunk_hooks =
+ chunk_hooks_get(tsd_tsdn(tsd), arena);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ }
+ } else {
+ ret = EFAULT;
+ goto label_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static const ctl_named_node_t *
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t *ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (i > ctl_stats.narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_arena_i_node;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (*oldlenp != sizeof(unsigned)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned nread, i;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
+ ret = EINVAL;
+ nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
+ ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
+ } else {
+ ret = 0;
+ nread = ctl_stats.narenas;
+ }
+
+ for (i = 0; i < nread; i++)
+ ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
+
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_decay_time_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_decay_time_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
+CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+static const ctl_named_node_t *
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > NBINS)
+ return (NULL);
+ return (super_arenas_bin_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
+static const ctl_named_node_t *
+arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nlclasses)
+ return (NULL);
+ return (super_arenas_lrun_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
+ size_t)
+static const ctl_named_node_t *
+arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nhclasses)
+ return (NULL);
+ return (super_arenas_hchunk_i_node);
+}
+
+static int
+arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (ctl_grow(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas - 1;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
+ *(bool *)newp);
+ } else
+ oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else
+ oldval = prof_active_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const char *filename = NULL;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(filename, const char *);
+
+ if (prof_mdump(tsd, filename)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else
+ oldval = prof_gdump_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3))
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
+
+/******************************************************************************/
+
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
+ ctl_stats.arenas[mib[2]].astats.retained, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+ ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+ ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
+ ctl_stats.arenas[mib[2]].allocated_small, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
+ ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
+ ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
+ ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > NBINS)
+ return (NULL);
+ return (super_stats_arenas_i_bins_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > nlclasses)
+ return (NULL);
+ return (super_stats_arenas_i_lruns_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > nhclasses)
+ return (NULL);
+ return (super_stats_arenas_i_hchunks_j_node);
+}
+
+static const ctl_named_node_t *
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t * ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_stats_arenas_i_node;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
diff --git a/memory/jemalloc/src/src/extent.c b/memory/jemalloc/src/src/extent.c
new file mode 100644
index 000000000..9f5146e5f
--- /dev/null
+++ b/memory/jemalloc/src/src/extent.c
@@ -0,0 +1,53 @@
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+JEMALLOC_INLINE_C size_t
+extent_quantize(size_t size)
+{
+
+ /*
+ * Round down to the nearest chunk size that can actually be requested
+ * during normal huge allocation.
+ */
+ return (index2size(size2index(size + 1) - 1));
+}
+
+JEMALLOC_INLINE_C int
+extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ int ret;
+ size_t a_qsize = extent_quantize(extent_node_size_get(a));
+ size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful extents only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
+
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
+
+ return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
+ extent_szad_comp)
+
+JEMALLOC_INLINE_C int
+extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/memory/jemalloc/src/src/hash.c b/memory/jemalloc/src/src/hash.c
new file mode 100644
index 000000000..cfa4da027
--- /dev/null
+++ b/memory/jemalloc/src/src/hash.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/huge.c b/memory/jemalloc/src/src/huge.c
new file mode 100644
index 000000000..62e6932b7
--- /dev/null
+++ b/memory/jemalloc/src/src/huge.c
@@ -0,0 +1,473 @@
+#define JEMALLOC_HUGE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static extent_node_t *
+huge_node_get(const void *ptr)
+{
+ extent_node_t *node;
+
+ node = chunk_lookup(ptr, true);
+ assert(!extent_node_achunk_get(node));
+
+ return (node);
+}
+
+static bool
+huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == ptr);
+ assert(!extent_node_achunk_get(node));
+ return (chunk_register(tsdn, ptr, node));
+}
+
+static void
+huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+{
+ bool err;
+
+ err = huge_node_set(tsdn, ptr, node);
+ assert(!err);
+}
+
+static void
+huge_node_unset(const void *ptr, const extent_node_t *node)
+{
+
+ chunk_deregister(ptr, node);
+}
+
+void *
+huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
+{
+
+ assert(usize == s2u(usize));
+
+ return (huge_palloc(tsdn, arena, usize, chunksize, zero));
+}
+
+void *
+huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
+{
+ void *ret;
+ size_t ausize;
+ arena_t *iarena;
+ extent_node_t *node;
+ bool is_zeroed;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ ausize = sa2u(usize, alignment);
+ if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
+ return (NULL);
+ assert(ausize >= chunksize);
+
+ /* Allocate an extent node with which to track the chunk. */
+ iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get();
+ node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, NULL, true, iarena);
+ if (node == NULL)
+ return (NULL);
+
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
+ arena, usize, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsdn, node, NULL, true, true);
+ return (NULL);
+ }
+
+ extent_node_init(node, arena, ret, usize, is_zeroed, true);
+
+ if (huge_node_set(tsdn, ret, node)) {
+ arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
+ idalloctm(tsdn, node, NULL, true, true);
+ return (NULL);
+ }
+
+ /* Insert node into huge. */
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed)
+ memset(ret, 0, usize);
+ } else if (config_fill && unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
+#endif
+static void
+huge_dalloc_junk(void *ptr, size_t usize)
+{
+
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
+ /*
+ * Only bother junk filling if the chunk isn't about to be
+ * unmapped.
+ */
+ if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
+#endif
+
+static void
+huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize_min, size_t usize_max, bool zero)
+{
+ size_t usize, usize_next;
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool pre_zeroed, post_zeroed;
+
+ /* Increase usize to incorporate extra. */
+ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
+ <= oldsize; usize = usize_next)
+ ; /* Do nothing. */
+
+ if (oldsize == usize)
+ return;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+
+ /* Fill if necessary (shrinking). */
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize),
+ JEMALLOC_FREE_JUNK, sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
+ sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
+ assert(extent_node_size_get(node) != usize);
+ extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
+
+ /* Fill if necessary (growing). */
+ if (oldsize < usize) {
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!pre_zeroed) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ usize - oldsize);
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK, usize - oldsize);
+ }
+ }
+}
+
+static bool
+huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize)
+{
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks;
+ size_t cdiff;
+ bool pre_zeroed, post_zeroed;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+
+ assert(oldsize > usize);
+
+ /* Split excess chunks. */
+ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
+ CHUNK_CEILING(usize), cdiff, true, arena->ind))
+ return (true);
+
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+ sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
+ usize), CHUNK_CEILING(oldsize),
+ CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
+ extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ /* Zap the excess chunks. */
+ arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
+
+ return (false);
+}
+
+static bool
+huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize, bool zero) {
+ extent_node_t *node;
+ arena_t *arena;
+ bool is_zeroed_subchunk, is_zeroed_chunk;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ is_zeroed_subchunk = extent_node_zeroed_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ /*
+ * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
+ * update extent's zeroed field, and zero as necessary.
+ */
+ is_zeroed_chunk = false;
+ if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
+ &is_zeroed_chunk))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ huge_node_unset(ptr, node);
+ extent_node_size_set(node, usize);
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ is_zeroed_chunk);
+ huge_node_reset(tsdn, ptr, node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed_subchunk) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ CHUNK_CEILING(oldsize) - oldsize);
+ }
+ if (!is_zeroed_chunk) {
+ memset((void *)((uintptr_t)ptr +
+ CHUNK_CEILING(oldsize)), 0, usize -
+ CHUNK_CEILING(oldsize));
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
+ usize - oldsize);
+ }
+
+ return (false);
+}
+
+bool
+huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
+{
+
+ assert(s2u(oldsize) == oldsize);
+ /* The following should have been caught by callers. */
+ assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
+
+ /* Both allocations must be huge to avoid a move. */
+ if (oldsize < chunksize || usize_max < chunksize)
+ return (true);
+
+ if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+ /* Attempt to expand the allocation in-place. */
+ if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
+ zero)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
+ ptr, oldsize, usize_min, zero)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ }
+
+ /*
+ * Avoid moving the allocation if the existing chunk size accommodates
+ * the new size.
+ */
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
+ huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero);
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+
+ /* Attempt to shrink the allocation in-place. */
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
+ if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
+ usize_max)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ }
+ return (true);
+}
+
+static void *
+huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero)
+{
+
+ if (alignment <= chunksize)
+ return (huge_malloc(tsdn, arena, usize, zero));
+ return (huge_palloc(tsdn, arena, usize, alignment, zero));
+}
+
+void *
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache)
+{
+ void *ret;
+ size_t copysize;
+
+ /* The following should have been caught by callers. */
+ assert(usize > 0 && usize <= HUGE_MAXCLASS);
+
+ /* Try to avoid moving the allocation. */
+ if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
+ zero))
+ return (ptr);
+
+ /*
+ * usize and oldsize are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
+ zero);
+ if (ret == NULL)
+ return (NULL);
+
+ copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
+ return (ret);
+}
+
+void
+huge_dalloc(tsdn_t *tsdn, void *ptr)
+{
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ huge_node_unset(ptr, node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ ql_remove(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ huge_dalloc_junk(extent_node_addr_get(node),
+ extent_node_size_get(node));
+ arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node));
+ idalloctm(tsdn, node, NULL, true, true);
+
+ arena_decay_tick(tsdn, arena);
+}
+
+arena_t *
+huge_aalloc(const void *ptr)
+{
+
+ return (extent_node_arena_get(huge_node_get(ptr)));
+}
+
+size_t
+huge_salloc(tsdn_t *tsdn, const void *ptr)
+{
+ size_t size;
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ size = extent_node_size_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ return (size);
+}
+
+prof_tctx_t *
+huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+{
+ prof_tctx_t *tctx;
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ tctx = extent_node_prof_tctx_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ return (tctx);
+}
+
+void
+huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
+{
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ extent_node_prof_tctx_set(node, tctx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+}
+
+void
+huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
+{
+
+ huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
+}
diff --git a/memory/jemalloc/src/src/jemalloc.c b/memory/jemalloc/src/src/jemalloc.c
new file mode 100644
index 000000000..38650ff06
--- /dev/null
+++ b/memory/jemalloc/src/src/jemalloc.c
@@ -0,0 +1,2897 @@
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/* Runtime configuration options. */
+const char *je_malloc_conf
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
+bool opt_abort =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+const char *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk_free =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+
+size_t opt_quarantine = ZU(0);
+bool opt_redzone = false;
+bool opt_utrace = false;
+bool opt_xmalloc = false;
+bool opt_zero = false;
+unsigned opt_narenas = 0;
+
+/* Initialized to true if the process is running inside Valgrind. */
+bool in_valgrind;
+
+unsigned ncpus;
+
+/* Protects arenas initialization. */
+static malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ */
+arena_t **arenas;
+static unsigned narenas_total; /* Use narenas_total_*(). */
+static arena_t *a0; /* arenas[0]; read-only after initialization. */
+unsigned narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+/* False should be the common case. Set to true to trigger initialization. */
+static bool malloc_slow = true;
+
+/* When malloc_slow is true, set the corresponding bits for sanity check. */
+enum {
+ flag_opt_junk_alloc = (1U),
+ flag_opt_junk_free = (1U << 1),
+ flag_opt_quarantine = (1U << 2),
+ flag_opt_zero = (1U << 3),
+ flag_opt_utrace = (1U << 4),
+ flag_in_valgrind = (1U << 5),
+ flag_opt_xmalloc = (1U << 6)
+};
+static uint8_t malloc_slow_flags;
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t pind2sz_tab[NPSIZES] = {
+#define PSZ_yes(lg_grp, ndelta, lg_delta) \
+ (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
+#define PSZ_no(lg_grp, ndelta, lg_delta)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ PSZ_##psz(lg_grp, ndelta, lg_delta)
+ SIZE_CLASSES
+#undef PSZ_yes
+#undef PSZ_no
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t index2size_tab[NSIZES] = {
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+ SIZE_CLASSES
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_0(i) i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_1(i) i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_2(i) i,
+#elif LG_TINY_MIN == 3
+#define S2B_3(i) i,
+#elif LG_TINY_MIN == 4
+#define S2B_4(i) i,
+#elif LG_TINY_MIN == 5
+#define S2B_5(i) i,
+#elif LG_TINY_MIN == 6
+#define S2B_6(i) i,
+#elif LG_TINY_MIN == 7
+#define S2B_7(i) i,
+#elif LG_TINY_MIN == 8
+#define S2B_8(i) i,
+#elif LG_TINY_MIN == 9
+#define S2B_9(i) i,
+#elif LG_TINY_MIN == 10
+#define S2B_10(i) i,
+#elif LG_TINY_MIN == 11
+#define S2B_11(i) i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define S2B_1(i) S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define S2B_2(i) S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define S2B_3(i) S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define S2B_4(i) S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define S2B_5(i) S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define S2B_6(i) S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define S2B_7(i) S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define S2B_8(i) S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define S2B_9(i) S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define S2B_10(i) S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define S2B_11(i) S2B_10(i) S2B_10(i)
+#endif
+#define S2B_no(i)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ S2B_##lg_delta_lookup(index)
+ SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
+
+#ifdef JEMALLOC_THREADED_INIT
+/* Used to let the initializing thread recursively allocate. */
+# define NO_INITIALIZER ((unsigned long)0)
+# define INITIALIZER pthread_self()
+# define IS_INITIALIZER (malloc_initializer == pthread_self())
+static pthread_t malloc_initializer = NO_INITIALIZER;
+#else
+# define NO_INITIALIZER false
+# define INITIALIZER true
+# define IS_INITIALIZER malloc_initializer
+static bool malloc_initializer = NO_INITIALIZER;
+#endif
+
+/* Used to avoid initialization races. */
+#ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
+static malloc_mutex_t init_lock;
+static bool init_lock_initialized = false;
+
+JEMALLOC_ATTR(constructor)
+static void WINAPI
+_init_init_lock(void)
+{
+
+ /* If another constructor in the same binary is using mallctl to
+ * e.g. setup chunk hooks, it may end up running before this one,
+ * and malloc_init_hard will crash trying to lock the uninitialized
+ * lock. So we force an initialization of the lock in
+ * malloc_init_hard as well. We don't try to care about atomicity
+ * of the accessed to the init_lock_initialized boolean, since it
+ * really only matters early in the process creation, before any
+ * separate thread normally starts doing anything. */
+ if (!init_lock_initialized)
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+ init_lock_initialized = true;
+}
+
+#ifdef _MSC_VER
+# pragma section(".CRT$XCU", read)
+JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
+static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
+#endif
+#endif
+#else
+static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
+#endif
+
+typedef struct {
+ void *p; /* Input pointer (as in realloc(p, s)). */
+ size_t s; /* Request size. */
+ void *r; /* Result pointer. */
+} malloc_utrace_t;
+
+#ifdef JEMALLOC_UTRACE
+# define UTRACE(a, b, c) do { \
+ if (unlikely(opt_utrace)) { \
+ int utrace_serrno = errno; \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ errno = utrace_serrno; \
+ } \
+} while (0)
+#else
+# define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool malloc_init_hard_a0(void);
+static bool malloc_init_hard(void);
+
+/******************************************************************************/
+/*
+ * Begin miscellaneous support functions.
+ */
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_initialized(void)
+{
+
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init_a0(void)
+{
+
+ if (unlikely(malloc_init_state == malloc_init_uninitialized))
+ return (malloc_init_hard_a0());
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init(void)
+{
+
+ if (unlikely(!malloc_initialized()) && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
+
+ return (false);
+}
+
+/*
+ * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_metadata)
+{
+
+ if (unlikely(malloc_init_a0()))
+ return (NULL);
+
+ return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
+ is_metadata, arena_get(TSDN_NULL, 0, true), true));
+}
+
+static void
+a0idalloc(void *ptr, bool is_metadata)
+{
+
+ idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
+}
+
+arena_t *
+a0get(void)
+{
+
+ return (a0);
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0ialloc(size, false, true));
+}
+
+void
+a0dalloc(void *ptr)
+{
+
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size)
+{
+
+ if (unlikely(size == 0))
+ size = 1;
+
+ return (a0ialloc(size, false, false));
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size)
+{
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return (a0ialloc(num_size, true, false));
+}
+
+void
+bootstrap_free(void *ptr)
+{
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ a0idalloc(ptr, false);
+}
+
+static void
+arena_set(unsigned ind, arena_t *arena)
+{
+
+ atomic_write_p((void **)&arenas[ind], arena);
+}
+
+static void
+narenas_total_set(unsigned narenas)
+{
+
+ atomic_write_u(&narenas_total, narenas);
+}
+
+static void
+narenas_total_inc(void)
+{
+
+ atomic_add_u(&narenas_total, 1);
+}
+
+unsigned
+narenas_total_get(void)
+{
+
+ return (atomic_read_u(&narenas_total));
+}
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+
+ assert(ind <= narenas_total_get());
+ if (ind > MALLOCX_ARENA_MAX)
+ return (NULL);
+ if (ind == narenas_total_get())
+ narenas_total_inc();
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arena_get(tsdn, ind, false);
+ if (arena != NULL) {
+ assert(ind < narenas_auto);
+ return (arena);
+ }
+
+ /* Actually initialize the arena. */
+ arena = arena_new(tsdn, ind);
+ arena_set(ind, arena);
+ return (arena);
+}
+
+arena_t *
+arena_init(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+
+ malloc_mutex_lock(tsdn, &arenas_lock);
+ arena = arena_init_locked(tsdn, ind);
+ malloc_mutex_unlock(tsdn, &arenas_lock);
+ return (arena);
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind, bool internal)
+{
+ arena_t *arena;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_inc(arena, internal);
+
+ if (internal)
+ tsd_iarena_set(tsd, arena);
+ else
+ tsd_arena_set(tsd, arena);
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
+{
+ arena_t *oldarena, *newarena;
+
+ oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
+ newarena = arena_get(tsd_tsdn(tsd), newind, false);
+ arena_nthreads_dec(oldarena, false);
+ arena_nthreads_inc(newarena, false);
+ tsd_arena_set(tsd, newarena);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
+{
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_dec(arena, internal);
+ if (internal)
+ tsd_iarena_set(tsd, NULL);
+ else
+ tsd_arena_set(tsd, NULL);
+}
+
+arena_tdata_t *
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
+{
+ arena_tdata_t *tdata, *arenas_tdata_old;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+ unsigned narenas_tdata_old, i;
+ unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
+
+ /*
+ * Dissociate old tdata array (and set up for deallocation upon return)
+ * if it's too small.
+ */
+ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
+ arenas_tdata_old = arenas_tdata;
+ narenas_tdata_old = narenas_tdata;
+ arenas_tdata = NULL;
+ narenas_tdata = 0;
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ } else {
+ arenas_tdata_old = NULL;
+ narenas_tdata_old = 0;
+ }
+
+ /* Allocate tdata array if it's missing. */
+ if (arenas_tdata == NULL) {
+ bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
+ narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
+ *arenas_tdata_bypassp = true;
+ arenas_tdata = (arena_tdata_t *)a0malloc(
+ sizeof(arena_tdata_t) * narenas_tdata);
+ *arenas_tdata_bypassp = false;
+ }
+ if (arenas_tdata == NULL) {
+ tdata = NULL;
+ goto label_return;
+ }
+ assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ }
+
+ /*
+ * Copy to tdata array. It's possible that the actual number of arenas
+ * has increased since narenas_total_get() was called above, but that
+ * causes no correctness issues unless two threads concurrently execute
+ * the arenas.extend mallctl, which we trust mallctl synchronization to
+ * prevent.
+ */
+
+ /* Copy/initialize tickers. */
+ for (i = 0; i < narenas_actual; i++) {
+ if (i < narenas_tdata_old) {
+ ticker_copy(&arenas_tdata[i].decay_ticker,
+ &arenas_tdata_old[i].decay_ticker);
+ } else {
+ ticker_init(&arenas_tdata[i].decay_ticker,
+ DECAY_NTICKS_PER_UPDATE);
+ }
+ }
+ if (narenas_tdata > narenas_actual) {
+ memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
+ * (narenas_tdata - narenas_actual));
+ }
+
+ /* Read the refreshed tdata array. */
+ tdata = &arenas_tdata[ind];
+label_return:
+ if (arenas_tdata_old != NULL)
+ a0dalloc(arenas_tdata_old);
+ return (tdata);
+}
+
+/* Slow path, called only by arena_choose(). */
+arena_t *
+arena_choose_hard(tsd_t *tsd, bool internal)
+{
+ arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (narenas_auto > 1) {
+ unsigned i, j, choose[2], first_null;
+
+ /*
+ * Determine binding for both non-internal and internal
+ * allocation.
+ *
+ * choose[0]: For application allocation.
+ * choose[1]: For internal metadata allocation.
+ */
+
+ for (j = 0; j < 2; j++)
+ choose[j] = 0;
+
+ first_null = narenas_auto;
+ malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
+ assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
+ for (i = 1; i < narenas_auto; i++) {
+ if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
+ /*
+ * Choose the first arena that has the lowest
+ * number of threads assigned to it.
+ */
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), i, false), !!j) <
+ arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), choose[j], false),
+ !!j))
+ choose[j] = i;
+ }
+ } else if (first_null == narenas_auto) {
+ /*
+ * Record the index of the first uninitialized
+ * arena, in case all extant arenas are in use.
+ *
+ * NB: It is possible for there to be
+ * discontinuities in terms of initialized
+ * versus uninitialized arenas, due to the
+ * "thread.arena" mallctl.
+ */
+ first_null = i;
+ }
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
+ choose[j], false), !!j) == 0 || first_null ==
+ narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded
+ * arena if all arenas are already initialized.
+ */
+ if (!!j == internal) {
+ ret = arena_get(tsd_tsdn(tsd),
+ choose[j], false);
+ }
+ } else {
+ arena_t *arena;
+
+ /* Initialize a new arena. */
+ choose[j] = first_null;
+ arena = arena_init_locked(tsd_tsdn(tsd),
+ choose[j]);
+ if (arena == NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd),
+ &arenas_lock);
+ return (NULL);
+ }
+ if (!!j == internal)
+ ret = arena;
+ }
+ arena_bind(tsd, choose[j], !!j);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+ } else {
+ ret = arena_get(tsd_tsdn(tsd), 0, false);
+ arena_bind(tsd, 0, false);
+ arena_bind(tsd, 0, true);
+ }
+
+ return (ret);
+}
+
+void
+thread_allocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+thread_deallocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+iarena_cleanup(tsd_t *tsd)
+{
+ arena_t *iarena;
+
+ iarena = tsd_iarena_get(tsd);
+ if (iarena != NULL)
+ arena_unbind(tsd, iarena->ind, true);
+}
+
+void
+arena_cleanup(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL)
+ arena_unbind(tsd, arena->ind, false);
+}
+
+void
+arenas_tdata_cleanup(tsd_t *tsd)
+{
+ arena_tdata_t *arenas_tdata;
+
+ /* Prevent tsd->arenas_tdata from being (re)created. */
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+
+ arenas_tdata = tsd_arenas_tdata_get(tsd);
+ if (arenas_tdata != NULL) {
+ tsd_arenas_tdata_set(tsd, NULL);
+ a0dalloc(arenas_tdata);
+ }
+}
+
+void
+narenas_tdata_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arenas_tdata_bypass_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+static void
+stats_print_atexit(void)
+{
+
+ if (config_tcache && config_stats) {
+ tsdn_t *tsdn;
+ unsigned narenas, i;
+
+ tsdn = tsdn_fetch();
+
+ /*
+ * Merge stats from extant threads. This is racy, since
+ * individual threads do not lock when recording tcache stats
+ * events. As a consequence, the final stats may be slightly
+ * out of date by the time they are reported, if other threads
+ * continue to allocate.
+ */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (arena != NULL) {
+ tcache_t *tcache;
+
+ /*
+ * tcache_stats_merge() locks bins, so if any
+ * code is introduced that acquires both arena
+ * and bin locks in the opposite order,
+ * deadlocks may result.
+ */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ tcache_stats_merge(tsdn, tcache, arena);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+ }
+ }
+ je_malloc_stats_print(NULL, NULL, NULL);
+}
+
+/*
+ * End miscellaneous support functions.
+ */
+/******************************************************************************/
+/*
+ * Begin initialization functions.
+ */
+
+#ifndef JEMALLOC_HAVE_SECURE_GETENV
+static char *
+secure_getenv(const char *name)
+{
+
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0)
+ return (NULL);
+# endif
+ return (getenv(name));
+}
+#endif
+
+static unsigned
+malloc_ncpus(void)
+{
+ long result;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwNumberOfProcessors;
+#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+ /*
+ * glibc >= 2.6 has the CPU_COUNT macro.
+ *
+ * glibc's sysconf() uses isspace(). glibc allocates for the first time
+ * *before* setting up the isspace tables. Therefore we need a
+ * different method to get the number of CPUs.
+ */
+ {
+ cpu_set_t set;
+
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+ result = CPU_COUNT(&set);
+ }
+#else
+ result = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+ return ((result == -1) ? 1 : (unsigned)result);
+}
+
+static bool
+malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
+ char const **v_p, size_t *vlen_p)
+{
+ bool accept;
+ const char *opts = *opts_p;
+
+ *k_p = opts;
+
+ for (accept = false; !accept;) {
+ switch (*opts) {
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ case '_':
+ opts++;
+ break;
+ case ':':
+ opts++;
+ *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+ *v_p = opts;
+ accept = true;
+ break;
+ case '\0':
+ if (opts != *opts_p) {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with key\n");
+ }
+ return (true);
+ default:
+ malloc_write("<jemalloc>: Malformed conf string\n");
+ return (true);
+ }
+ }
+
+ for (accept = false; !accept;) {
+ switch (*opts) {
+ case ',':
+ opts++;
+ /*
+ * Look ahead one character here, because the next time
+ * this function is called, it will assume that end of
+ * input has been cleanly reached if no input remains,
+ * but we have optimistically already consumed the
+ * comma if one exists.
+ */
+ if (*opts == '\0') {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with comma\n");
+ }
+ *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ case '\0':
+ *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ default:
+ opts++;
+ break;
+ }
+ }
+
+ *opts_p = opts;
+ return (false);
+}
+
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen)
+{
+
+ malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
+ (int)vlen, v);
+}
+
+static void
+malloc_slow_flag_init(void)
+{
+ /*
+ * Combine the runtime options into malloc_slow for fast path. Called
+ * after processing all the options.
+ */
+ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
+ | (opt_junk_free ? flag_opt_junk_free : 0)
+ | (opt_quarantine ? flag_opt_quarantine : 0)
+ | (opt_zero ? flag_opt_zero : 0)
+ | (opt_utrace ? flag_opt_utrace : 0)
+ | (opt_xmalloc ? flag_opt_xmalloc : 0);
+
+ if (config_valgrind)
+ malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
+
+ malloc_slow = (malloc_slow_flags != 0);
+}
+
+static void
+malloc_conf_init(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts, *k, *v;
+ size_t klen, vlen;
+
+ /*
+ * Automatically configure valgrind before processing options. The
+ * valgrind option remains in jemalloc 3.x for compatibility reasons.
+ */
+ if (config_valgrind) {
+ in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && unlikely(in_valgrind)) {
+ opt_junk = "false";
+ opt_junk_alloc = false;
+ opt_junk_free = false;
+ assert(!opt_zero);
+ opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ opt_redzone = true;
+ }
+ if (config_tcache && unlikely(in_valgrind))
+ opt_tcache = false;
+ }
+
+ for (i = 0; i < 4; i++) {
+ /* Get runtime configuration. */
+ switch (i) {
+ case 0:
+ opts = config_malloc_conf;
+ break;
+ case 1:
+ if (je_malloc_conf != NULL) {
+ /*
+ * Use options that were compiled into the
+ * program.
+ */
+ opts = je_malloc_conf;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 2: {
+ ssize_t linklen = 0;
+#ifndef _WIN32
+ int saved_errno = errno;
+ const char *linkname =
+# ifdef JEMALLOC_PREFIX
+ "/etc/"JEMALLOC_PREFIX"malloc.conf"
+# else
+ "/etc/malloc.conf"
+# endif
+ ;
+
+ /*
+ * Try to use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ linklen = readlink(linkname, buf, sizeof(buf) - 1);
+ if (linklen == -1) {
+ /* No configuration specified. */
+ linklen = 0;
+ /* Restore errno. */
+ set_errno(saved_errno);
+ }
+#endif
+ buf[linklen] = '\0';
+ opts = buf;
+ break;
+ } case 3: {
+ const char *envname =
+#ifdef JEMALLOC_PREFIX
+ JEMALLOC_CPREFIX"MALLOC_CONF"
+#else
+ "MALLOC_CONF"
+#endif
+ ;
+
+ if ((opts = secure_getenv(envname)) != NULL) {
+ /*
+ * Do nothing; opts is already initialized to
+ * the value of the MALLOC_CONF environment
+ * variable.
+ */
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ } default:
+ not_reached();
+ buf[0] = '\0';
+ opts = buf;
+ }
+
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n, cont) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) \
+ o = true; \
+ else if (CONF_MATCH_VALUE("false")) \
+ o = false; \
+ else { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } \
+ if (cont) \
+ continue; \
+ }
+#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
+ if (CONF_MATCH(n)) { \
+ uintmax_t um; \
+ char *end; \
+ \
+ set_errno(0); \
+ um = malloc_strtoumax(v, &end, 0); \
+ if (get_errno() != 0 || (uintptr_t)end -\
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (clip) { \
+ if ((min) != 0 && um < (min)) \
+ o = (t)(min); \
+ else if (um > (max)) \
+ o = (t)(max); \
+ else \
+ o = (t)um; \
+ } else { \
+ if (((min) != 0 && um < (min)) \
+ || um > (max)) { \
+ malloc_conf_error( \
+ "Out-of-range " \
+ "conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = (t)um; \
+ } \
+ continue; \
+ }
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
+ CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
+ CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (CONF_MATCH(n)) { \
+ long l; \
+ char *end; \
+ \
+ set_errno(0); \
+ l = strtol(v, &end, 0); \
+ if (get_errno() != 0 || (uintptr_t)end -\
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = l; \
+ continue; \
+ }
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (CONF_MATCH(n)) { \
+ size_t cpylen = (vlen <= \
+ sizeof(o)-1) ? vlen : \
+ sizeof(o)-1; \
+ strncpy(o, v, cpylen); \
+ o[cpylen] = '\0'; \
+ continue; \
+ }
+
+ CONF_HANDLE_BOOL(opt_abort, "abort", true)
+ /*
+ * Chunks always require at least one header page,
+ * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
+ * possibly an additional page in the presence of
+ * redzones. In order to simplify options processing,
+ * use a conservative bound that accommodates all these
+ * constraints.
+ */
+ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
+ LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
+ (sizeof(size_t) << 3) - 1, true)
+ if (strncmp("dss", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strncmp(dss_prec_names[i], v, vlen)
+ == 0) {
+ if (chunk_dss_prec_set(i)) {
+ malloc_conf_error(
+ "Error setting dss",
+ k, klen, v, vlen);
+ } else {
+ opt_dss =
+ dss_prec_names[i];
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
+ UINT_MAX, false)
+ if (strncmp("purge", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < purge_mode_limit; i++) {
+ if (strncmp(purge_mode_names[i], v,
+ vlen) == 0) {
+ opt_purge = (purge_mode_t)i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
+ -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
+ NSTIME_SEC_MAX);
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
+ if (config_fill) {
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ if (config_valgrind &&
+ unlikely(in_valgrind)) {
+ malloc_conf_error(
+ "Deallocation-time "
+ "junk filling cannot "
+ "be enabled while "
+ "running inside "
+ "Valgrind", k, klen, v,
+ vlen);
+ } else {
+ opt_junk = "true";
+ opt_junk_alloc = true;
+ opt_junk_free = true;
+ }
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ if (config_valgrind &&
+ unlikely(in_valgrind)) {
+ malloc_conf_error(
+ "Deallocation-time "
+ "junk filling cannot "
+ "be enabled while "
+ "running inside "
+ "Valgrind", k, klen, v,
+ vlen);
+ } else {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ }
+ } else {
+ malloc_conf_error(
+ "Invalid conf value", k,
+ klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
+ 0, SIZE_T_MAX, false)
+ CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
+ CONF_HANDLE_BOOL(opt_zero, "zero", true)
+ }
+ if (config_utrace) {
+ CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
+ }
+ if (config_xmalloc) {
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
+ }
+ if (config_tcache) {
+ CONF_HANDLE_BOOL(opt_tcache, "tcache",
+ !config_valgrind || !in_valgrind)
+ if (CONF_MATCH("tcache")) {
+ assert(config_valgrind && in_valgrind);
+ if (opt_tcache) {
+ opt_tcache = false;
+ malloc_conf_error(
+ "tcache cannot be enabled "
+ "while running inside Valgrind",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
+ "lg_tcache_max", -1,
+ (sizeof(size_t) << 3) - 1)
+ }
+ if (config_prof) {
+ CONF_HANDLE_BOOL(opt_prof, "prof", true)
+ CONF_HANDLE_CHAR_P(opt_prof_prefix,
+ "prof_prefix", "jeprof")
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init", true)
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
+ "lg_prof_sample", 0,
+ (sizeof(uint64_t) << 3) - 1, true)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
+ true)
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
+ "lg_prof_interval", -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
+ true)
+ }
+ malloc_conf_error("Invalid conf pair", k, klen, v,
+ vlen);
+#undef CONF_MATCH
+#undef CONF_HANDLE_BOOL
+#undef CONF_HANDLE_SIZE_T
+#undef CONF_HANDLE_SSIZE_T
+#undef CONF_HANDLE_CHAR_P
+ }
+ }
+}
+
+static bool
+malloc_init_hard_needed(void)
+{
+
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock, or this thread is the initializing
+ * thread, and it is recursively allocating.
+ */
+ return (false);
+ }
+#ifdef JEMALLOC_THREADED_INIT
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
+ spin_t spinner;
+
+ /* Busy-wait until the initializing thread completes. */
+ spin_init(&spinner);
+ do {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ spin_adaptive(&spinner);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ } while (!malloc_initialized());
+ return (false);
+ }
+#endif
+ return (true);
+}
+
+static bool
+malloc_init_hard_a0_locked()
+{
+
+ malloc_initializer = INITIALIZER;
+
+ if (config_prof)
+ prof_boot0();
+ malloc_conf_init();
+ if (opt_stats_print) {
+ /* Print statistics at exit. */
+ if (atexit(stats_print_atexit) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+ }
+ pages_boot();
+ if (base_boot())
+ return (true);
+ if (chunk_boot())
+ return (true);
+ if (ctl_boot())
+ return (true);
+ if (config_prof)
+ prof_boot1();
+ arena_boot();
+ if (config_tcache && tcache_boot(TSDN_NULL))
+ return (true);
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
+ return (true);
+ /*
+ * Create enough scaffolding to allow recursive allocation in
+ * malloc_ncpus().
+ */
+ narenas_auto = 1;
+ narenas_total_set(narenas_auto);
+ arenas = &a0;
+ memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
+ /*
+ * Initialize one arena here. The rest are lazily created in
+ * arena_choose_hard().
+ */
+ if (arena_init(TSDN_NULL, 0) == NULL)
+ return (true);
+
+ malloc_init_state = malloc_init_a0_initialized;
+
+ return (false);
+}
+
+static bool
+malloc_init_hard_a0(void)
+{
+ bool ret;
+
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (ret);
+}
+
+/* Initialize data structures which may trigger recursive allocation. */
+static bool
+malloc_init_hard_recursible(void)
+{
+
+ malloc_init_state = malloc_init_recursible;
+
+ ncpus = malloc_ncpus();
+
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32) && !defined(__native_client__))
+ /* LinuxThreads' pthread_atfork() allocates. */
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ return (true);
+ }
+#endif
+
+ return (false);
+}
+
+static bool
+malloc_init_hard_finish(tsdn_t *tsdn)
+{
+
+ if (malloc_mutex_boot())
+ return (true);
+
+ if (opt_narenas == 0) {
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1)
+ opt_narenas = ncpus << 2;
+ else
+ opt_narenas = 1;
+ }
+ narenas_auto = opt_narenas;
+ /*
+ * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
+ */
+ if (narenas_auto > MALLOCX_ARENA_MAX) {
+ narenas_auto = MALLOCX_ARENA_MAX;
+ malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
+ narenas_auto);
+ }
+ narenas_total_set(narenas_auto);
+
+ /* Allocate and initialize arenas. */
+ arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
+ (MALLOCX_ARENA_MAX+1));
+ if (arenas == NULL)
+ return (true);
+ /* Copy the pointer to the one arena that was already initialized. */
+ arena_set(0, a0);
+
+ malloc_init_state = malloc_init_initialized;
+ malloc_slow_flag_init();
+
+ return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+ tsd_t *tsd;
+
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+ _init_init_lock();
+#endif
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ if (!malloc_init_hard_needed()) {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (false);
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (true);
+ }
+
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ /* Recursive allocation relies on functional tsd. */
+ tsd = malloc_tsd_boot0();
+ if (tsd == NULL)
+ return (true);
+ if (malloc_init_hard_recursible())
+ return (true);
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+
+ if (config_prof && prof_boot2(tsd)) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ return (true);
+ }
+
+ if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ return (true);
+ }
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_tsd_boot1();
+ return (false);
+}
+
+/*
+ * End initialization functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+static void *
+ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
+ prof_tctx_t *tctx, bool slow_path)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ szind_t ind_large = size2index(LARGE_MINCLASS);
+ p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = ialloc(tsd, usize, ind, zero, slow_path);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
+ else
+ p = ialloc(tsd, usize, ind, zero, slow_path);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+
+ return (p);
+}
+
+/*
+ * ialloc_body() is inlined so that fast and slow paths are generated separately
+ * with statically known slow_path.
+ *
+ * This function guarantees that *tsdn is non-NULL on success.
+ */
+JEMALLOC_ALWAYS_INLINE_C void *
+ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
+{
+ tsd_t *tsd;
+ szind_t ind;
+
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
+ return (NULL);
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ ind = size2index(size);
+ if (unlikely(ind >= NSIZES))
+ return (NULL);
+
+ if (config_stats || (config_prof && opt_prof) || (slow_path &&
+ config_valgrind && unlikely(in_valgrind))) {
+ *usize = index2size(ind);
+ assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ }
+
+ if (config_prof && opt_prof)
+ return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
+
+ return (ialloc(tsd, size, ind, zero, slow_path));
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
+ bool update_errno, bool slow_path)
+{
+
+ assert(!tsdn_null(tsdn) || ret == NULL);
+
+ if (unlikely(ret == NULL)) {
+ if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_printf("<jemalloc>: Error in %s(): out of "
+ "memory\n", func);
+ abort();
+ }
+ if (update_errno)
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
+ }
+ witness_assert_lockless(tsdn);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ if (size == 0)
+ size = 1;
+
+ if (likely(!malloc_slow)) {
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
+ } else {
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+ }
+
+ return (ret);
+}
+
+static void *
+imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
+ p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = ipalloc(tsd, usize, alignment, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imemalign_prof_sample(tsd, alignment, usize, tctx);
+ else
+ p = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+
+ return (p);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+static int
+imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
+{
+ int ret;
+ tsd_t *tsd;
+ size_t usize;
+ void *result;
+
+ assert(min_alignment != 0);
+
+ if (unlikely(malloc_init())) {
+ tsd = NULL;
+ result = NULL;
+ goto label_oom;
+ }
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (size == 0)
+ size = 1;
+
+ /* Make sure that alignment is a large enough power of 2. */
+ if (unlikely(((alignment - 1) & alignment) != 0
+ || (alignment < min_alignment))) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
+ abort();
+ }
+ result = NULL;
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ result = NULL;
+ goto label_oom;
+ }
+
+ if (config_prof && opt_prof)
+ result = imemalign_prof(tsd, alignment, usize);
+ else
+ result = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(result == NULL))
+ goto label_oom;
+ assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+
+ *memptr = result;
+ ret = 0;
+label_return:
+ if (config_stats && likely(result != NULL)) {
+ assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, result);
+ JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
+ false);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+label_oom:
+ assert(result == NULL);
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating aligned memory: "
+ "out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ witness_assert_lockless(tsd_tsdn(tsd));
+ goto label_return;
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
+je_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int ret;
+
+ ret = imemalign(memptr, alignment, size, sizeof(void *));
+
+ return (ret);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
+je_aligned_alloc(size_t alignment, size_t size)
+{
+ void *ret;
+ int err;
+
+ if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
+ ret = NULL;
+ set_errno(err);
+ }
+
+ return (ret);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
+je_calloc(size_t num, size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn;
+ size_t num_size;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ if (num == 0 || size == 0)
+ num_size = 1;
+ else
+ num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
+ 2))) && (num_size / size != num)))
+ num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
+
+ if (likely(!malloc_slow)) {
+ ret = ialloc_body(num_size, true, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
+ } else {
+ ret = ialloc_body(num_size, true, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
+ }
+
+ return (ret);
+}
+
+static void *
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
+{
+ void *p;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ tctx = prof_alloc_prep(tsd, usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
+ else
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+ old_tctx);
+
+ return (p);
+}
+
+JEMALLOC_INLINE_C void
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+{
+ size_t usize;
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ prof_free(tsd, ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+
+ if (likely(!slow_path))
+ iqalloc(tsd, ptr, tcache, false);
+ else {
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ iqalloc(tsd, ptr, tcache, true);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ }
+}
+
+JEMALLOC_INLINE_C void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
+{
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ isqalloc(tsd, ptr, usize, tcache, slow_path);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t old_usize = 0;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ if (unlikely(size == 0)) {
+ if (ptr != NULL) {
+ tsd_t *tsd;
+
+ /* realloc(ptr, 0) is equivalent to free(ptr). */
+ UTRACE(ptr, 0, 0);
+ tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false), true);
+ return (NULL);
+ }
+ size = 1;
+ }
+
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind)) {
+ old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
+ u2rz(old_usize);
+ }
+
+ if (config_prof && opt_prof) {
+ usize = s2u(size);
+ ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
+ NULL : irealloc_prof(tsd, ptr, old_usize, usize);
+ } else {
+ if (config_stats || (config_valgrind &&
+ unlikely(in_valgrind)))
+ usize = s2u(size);
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false);
+ }
+ tsdn = tsd_tsdn(tsd);
+ } else {
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ if (likely(!malloc_slow))
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
+ else
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ assert(!tsdn_null(tsdn) || ret == NULL);
+ }
+
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ tsd_t *tsd;
+
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ tsd = tsdn_tsd(tsdn);
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ UTRACE(ptr, size, ret);
+ JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
+ old_rzsize, true, false);
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_free(void *ptr)
+{
+
+ UTRACE(ptr, 0, 0);
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (likely(!malloc_slow))
+ ifree(tsd, ptr, tcache_get(tsd, false), false);
+ else
+ ifree(tsd, ptr, tcache_get(tsd, false), true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard override functions.
+ */
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_memalign(size_t alignment, size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
+ ret = NULL;
+ return (ret);
+}
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_valloc(size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
+ ret = NULL;
+ return (ret);
+}
+#endif
+
+/*
+ * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
+ * #define je_malloc malloc
+ */
+#define malloc_is_malloc 1
+#define is_malloc_(a) malloc_is_ ## a
+#define is_malloc(a) is_malloc_(a)
+
+#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
+ je_memalign;
+# endif
+
+#ifdef CPU_COUNT
+/*
+ * To enable static linking with glibc, the libc specific malloc interface must
+ * be implemented also, so none of glibc's malloc.o functions are added to the
+ * link.
+ */
+#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
+/* To force macro expansion of je_ prefix before stringification. */
+#define PREALIAS(je_fn) ALIAS(je_fn)
+void *__libc_malloc(size_t size) PREALIAS(je_malloc);
+void __libc_free(void* ptr) PREALIAS(je_free);
+void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
+void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
+void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
+void *__libc_valloc(size_t size) PREALIAS(je_valloc);
+int __posix_memalign(void** r, size_t a, size_t s)
+ PREALIAS(je_posix_memalign);
+#undef PREALIAS
+#undef ALIAS
+
+#endif
+
+#endif
+
+/*
+ * End non-standard override functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
+ *alignment = 0;
+ *usize = s2u(size);
+ } else {
+ *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ *usize = sa2u(size, *alignment);
+ }
+ if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
+ return (true);
+ *zero = MALLOCX_ZERO_GET(flags);
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ *tcache = NULL;
+ else
+ *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ *tcache = tcache_get(tsd, true);
+ if ((flags & MALLOCX_ARENA_MASK) != 0) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(*arena == NULL))
+ return (true);
+ } else
+ *arena = NULL;
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
+{
+ szind_t ind;
+
+ if (unlikely(alignment != 0))
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+ ind = size2index(usize);
+ assert(ind < NSIZES);
+ return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
+ slow_path));
+}
+
+static void *
+imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
+{
+ void *p;
+
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
+ sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
+ p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
+ tcache, arena, slow_path);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsdn, p, usize);
+ } else {
+ p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
+ slow_path);
+ }
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
+{
+ void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+ prof_tctx_t *tctx;
+
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
+ } else
+ p = NULL;
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
+
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ bool slow_path)
+{
+ void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
+ arena, slow_path);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
+
+/* This function guarantees that *tsdn is non-NULL on success. */
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
+{
+ tsd_t *tsd;
+
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
+ return (NULL);
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ if (likely(flags == 0)) {
+ szind_t ind = size2index(size);
+ if (unlikely(ind >= NSIZES))
+ return (NULL);
+ if (config_stats || (config_prof && opt_prof) || (slow_path &&
+ config_valgrind && unlikely(in_valgrind))) {
+ *usize = index2size(ind);
+ assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ }
+
+ if (config_prof && opt_prof) {
+ return (ialloc_prof(tsd, *usize, ind, false,
+ slow_path));
+ }
+
+ return (ialloc(tsd, size, ind, false, slow_path));
+ }
+
+ if (config_prof && opt_prof)
+ return (imallocx_prof(tsd, size, flags, usize, slow_path));
+
+ return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags)
+{
+ tsdn_t *tsdn;
+ void *p;
+ size_t usize;
+
+ assert(size != 0);
+
+ if (likely(!malloc_slow)) {
+ p = imallocx_body(size, flags, &tsdn, &usize, false);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
+ } else {
+ p = imallocx_body(size, flags, &tsdn, &usize, true);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
+ MALLOCX_ZERO_GET(flags));
+ }
+
+ return (p);
+}
+
+static void *
+irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
+ zero, tcache, arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else {
+ p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
+ tcache, arena);
+ }
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena)
+{
+ void *p;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
+ alignment, zero, tcache, arena, tctx);
+ } else {
+ p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return (NULL);
+ }
+
+ if (p == old_ptr && alignment != 0) {
+ /*
+ * The allocation did not move, so it is possible that the size
+ * class is smaller than would guarantee the requested
+ * alignment, and that the alignment constraint was
+ * serendipitously satisfied. Additionally, old_usize may not
+ * be the same as the current usize because of in-place large
+ * reallocation. Therefore, query the actual value of usize.
+ */
+ *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ }
+ prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ old_usize, old_tctx);
+
+ return (p);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags)
+{
+ void *p;
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ bool zero = flags & MALLOCX_ZERO;
+ arena_t *arena;
+ tcache_t *tcache;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(arena == NULL))
+ goto label_oom;
+ } else
+ arena = NULL;
+
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, true);
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = u2rz(old_usize);
+
+ if (config_prof && opt_prof) {
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ goto label_oom;
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena);
+ if (unlikely(p == NULL))
+ goto label_oom;
+ } else {
+ p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ if (unlikely(p == NULL))
+ goto label_oom;
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ }
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+
+ if (config_stats) {
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ UTRACE(ptr, size, p);
+ JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
+ old_usize, old_rzsize, false, zero);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (p);
+label_oom:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(ptr, size, 0);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
+{
+ size_t usize;
+
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
+ return (old_usize);
+ usize = isalloc(tsdn, ptr, config_prof);
+
+ return (usize);
+}
+
+static size_t
+ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
+{
+ size_t usize;
+
+ if (tctx == NULL)
+ return (old_usize);
+ usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
+
+ return (usize);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
+{
+ size_t usize_max, usize;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ if (alignment == 0) {
+ usize_max = s2u(size+extra);
+ assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
+ } else {
+ usize_max = sa2u(size+extra, alignment);
+ if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
+ /*
+ * usize_max is out of range, and chances are that
+ * allocation will fail, but use the maximum possible
+ * value and carry on with prof_alloc_prep(), just in
+ * case allocation succeeds.
+ */
+ usize_max = HUGE_MAXCLASS;
+ }
+ }
+ tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
+ size, extra, alignment, zero, tctx);
+ } else {
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
+ }
+ if (usize == old_usize) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return (usize);
+ }
+ prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+ old_tctx);
+
+ return (usize);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_xallocx(void *ptr, size_t size, size_t extra, int flags)
+{
+ tsd_t *tsd;
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ bool zero = flags & MALLOCX_ZERO;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+
+ /*
+ * The API explicitly absolves itself of protecting against (size +
+ * extra) numerical overflow, but we may need to clamp extra to avoid
+ * exceeding HUGE_MAXCLASS.
+ *
+ * Ordinarily, size limit checking is handled deeper down, but here we
+ * have to check as part of (size + extra) clamping, since we need the
+ * clamped value in the above helper functions.
+ */
+ if (unlikely(size > HUGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
+ if (unlikely(HUGE_MAXCLASS - size < extra))
+ extra = HUGE_MAXCLASS - size;
+
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = u2rz(old_usize);
+
+ if (config_prof && opt_prof) {
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero);
+ } else {
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
+ }
+ if (unlikely(usize == old_usize))
+ goto label_not_resized;
+
+ if (config_stats) {
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
+ old_usize, old_rzsize, false, zero);
+label_not_resized:
+ UTRACE(ptr, size, ptr);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (usize);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_sallocx(const void *ptr, int flags)
+{
+ size_t usize;
+ tsdn_t *tsdn;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ if (config_ivsalloc)
+ usize = ivsalloc(tsdn, ptr, config_prof);
+ else
+ usize = isalloc(tsdn, ptr, config_prof);
+
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_dallocx(void *ptr, int flags)
+{
+ tsd_t *tsd;
+ tcache_t *tcache;
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ if (likely(!malloc_slow))
+ ifree(tsd, ptr, tcache, false);
+ else
+ ifree(tsd, ptr, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+inallocx(tsdn_t *tsdn, size_t size, int flags)
+{
+ size_t usize;
+
+ witness_assert_lockless(tsdn);
+
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
+ usize = s2u(size);
+ else
+ usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags)
+{
+ tsd_t *tsd;
+ tcache_t *tcache;
+ size_t usize;
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ if (likely(!malloc_slow))
+ isfree(tsd, ptr, usize, tcache, false);
+ else
+ isfree(tsd, ptr, usize, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags)
+{
+ size_t usize;
+ tsdn_t *tsdn;
+
+ assert(size != 0);
+
+ if (unlikely(malloc_init()))
+ return (0);
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ usize = inallocx(tsdn, size, flags);
+ if (unlikely(usize > HUGE_MAXCLASS))
+ return (0);
+
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+ int ret;
+ tsdn_t *tsdn;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+ ret = ctl_nametomib(tsdn, name, mibp, miblenp);
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+ stats_print(write_cb, cbopaque, opts);
+ witness_assert_lockless(tsdn);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+{
+ size_t ret;
+ tsdn_t *tsdn;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ if (config_ivsalloc)
+ ret = ivsalloc(tsdn, ptr, config_prof);
+ else
+ ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * The following functions are used by threading libraries for protection of
+ * malloc during fork().
+ */
+
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
+ */
+#ifndef JEMALLOC_JET
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+#endif
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_prefork(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_prefork(void)
+#endif
+{
+ tsd_t *tsd;
+ unsigned i, j, narenas;
+ arena_t *arena;
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (!malloc_initialized())
+ return;
+#endif
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ narenas = narenas_total_get();
+
+ witness_prefork(tsd);
+ /* Acquire all mutexes in a safe order. */
+ ctl_prefork(tsd_tsdn(tsd));
+ malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ prof_prefork0(tsd_tsdn(tsd));
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < narenas; j++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
+ NULL) {
+ switch (i) {
+ case 0:
+ arena_prefork0(tsd_tsdn(tsd), arena);
+ break;
+ case 1:
+ arena_prefork1(tsd_tsdn(tsd), arena);
+ break;
+ case 2:
+ arena_prefork2(tsd_tsdn(tsd), arena);
+ break;
+ default: not_reached();
+ }
+ }
+ }
+ }
+ base_prefork(tsd_tsdn(tsd));
+ for (i = 0; i < narenas; i++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ }
+ prof_prefork1(tsd_tsdn(tsd));
+}
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_postfork_parent(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_postfork(void)
+#endif
+{
+ tsd_t *tsd;
+ unsigned i, narenas;
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (!malloc_initialized())
+ return;
+#endif
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ witness_postfork_parent(tsd);
+ /* Release all mutexes, now that fork() has completed. */
+ base_postfork_parent(tsd_tsdn(tsd));
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
+
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
+ prof_postfork_parent(tsd_tsdn(tsd));
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_parent(tsd_tsdn(tsd));
+}
+
+void
+jemalloc_postfork_child(void)
+{
+ tsd_t *tsd;
+ unsigned i, narenas;
+
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ witness_postfork_child(tsd);
+ /* Release all mutexes, now that fork() has completed. */
+ base_postfork_child(tsd_tsdn(tsd));
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
+
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
+ prof_postfork_child(tsd_tsdn(tsd));
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_child(tsd_tsdn(tsd));
+}
+
+/******************************************************************************/
diff --git a/memory/jemalloc/src/src/mb.c b/memory/jemalloc/src/src/mb.c
new file mode 100644
index 000000000..dc2c0a256
--- /dev/null
+++ b/memory/jemalloc/src/src/mb.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_MB_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/mutex.c b/memory/jemalloc/src/src/mutex.c
new file mode 100644
index 000000000..6333e73d6
--- /dev/null
+++ b/memory/jemalloc/src/src/mutex.c
@@ -0,0 +1,158 @@
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+#include <dlfcn.h>
+#endif
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifdef JEMALLOC_LAZY_LOCK
+bool isthreaded = false;
+#endif
+#ifdef JEMALLOC_MUTEX_INIT_CB
+static bool postpone_init = true;
+static malloc_mutex_t *postponed_mutexes = NULL;
+#endif
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+static void pthread_create_once(void);
+#endif
+
+/******************************************************************************/
+/*
+ * We intercept pthread_create() calls in order to toggle isthreaded if the
+ * process goes multi-threaded.
+ */
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+
+static void
+pthread_create_once(void)
+{
+
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+ if (pthread_create_fptr == NULL) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ }
+
+ isthreaded = true;
+}
+
+JEMALLOC_EXPORT int
+pthread_create(pthread_t *__restrict thread,
+ const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
+ void *__restrict arg)
+{
+ static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+
+ pthread_once(&once_control, pthread_create_once);
+
+ return (pthread_create_fptr(thread, attr, start_routine, arg));
+}
+#endif
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+#endif
+
+bool
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
+{
+
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
+ if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
+ _CRT_SPINCOUNT))
+ return (true);
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mutex->lock = OS_UNFAIR_LOCK_INIT;
+#elif (defined(JEMALLOC_OSSPIN))
+ mutex->lock = 0;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+ if (postpone_init) {
+ mutex->postponed_next = postponed_mutexes;
+ postponed_mutexes = mutex;
+ } else {
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0)
+ return (true);
+ }
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
+ if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ if (config_debug)
+ witness_init(&mutex->witness, name, rank, NULL);
+ return (false);
+}
+
+void
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_lock(tsdn, mutex);
+}
+
+void
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_unlock(tsdn, mutex);
+}
+
+void
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ malloc_mutex_unlock(tsdn, mutex);
+#else
+ if (malloc_mutex_init(mutex, mutex->witness.name,
+ mutex->witness.rank)) {
+ malloc_printf("<jemalloc>: Error re-initializing mutex in "
+ "child\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+}
+
+bool
+malloc_mutex_boot(void)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ postpone_init = false;
+ while (postponed_mutexes != NULL) {
+ if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
+ bootstrap_calloc) != 0)
+ return (true);
+ postponed_mutexes = postponed_mutexes->postponed_next;
+ }
+#endif
+ return (false);
+}
diff --git a/memory/jemalloc/src/src/nstime.c b/memory/jemalloc/src/src/nstime.c
new file mode 100644
index 000000000..0948e29fa
--- /dev/null
+++ b/memory/jemalloc/src/src/nstime.c
@@ -0,0 +1,194 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define BILLION UINT64_C(1000000000)
+
+void
+nstime_init(nstime_t *time, uint64_t ns)
+{
+
+ time->ns = ns;
+}
+
+void
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
+{
+
+ time->ns = sec * BILLION + nsec;
+}
+
+uint64_t
+nstime_ns(const nstime_t *time)
+{
+
+ return (time->ns);
+}
+
+uint64_t
+nstime_sec(const nstime_t *time)
+{
+
+ return (time->ns / BILLION);
+}
+
+uint64_t
+nstime_nsec(const nstime_t *time)
+{
+
+ return (time->ns % BILLION);
+}
+
+void
+nstime_copy(nstime_t *time, const nstime_t *source)
+{
+
+ *time = *source;
+}
+
+int
+nstime_compare(const nstime_t *a, const nstime_t *b)
+{
+
+ return ((a->ns > b->ns) - (a->ns < b->ns));
+}
+
+void
+nstime_add(nstime_t *time, const nstime_t *addend)
+{
+
+ assert(UINT64_MAX - time->ns >= addend->ns);
+
+ time->ns += addend->ns;
+}
+
+void
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
+{
+
+ assert(nstime_compare(time, subtrahend) >= 0);
+
+ time->ns -= subtrahend->ns;
+}
+
+void
+nstime_imultiply(nstime_t *time, uint64_t multiplier)
+{
+
+ assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
+ 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+
+ time->ns *= multiplier;
+}
+
+void
+nstime_idivide(nstime_t *time, uint64_t divisor)
+{
+
+ assert(divisor != 0);
+
+ time->ns /= divisor;
+}
+
+uint64_t
+nstime_divide(const nstime_t *time, const nstime_t *divisor)
+{
+
+ assert(divisor->ns != 0);
+
+ return (time->ns / divisor->ns);
+}
+
+#ifdef _WIN32
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ FILETIME ft;
+ uint64_t ticks_100ns;
+
+ GetSystemTimeAsFileTime(&ft);
+ ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+
+ nstime_init(time, ticks_100ns * 100);
+}
+#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+
+ nstime_init(time, mach_absolute_time());
+}
+#else
+# define NSTIME_MONOTONIC false
+static void
+nstime_get(nstime_t *time)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef nstime_monotonic
+#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
+#endif
+bool
+nstime_monotonic(void)
+{
+
+ return (NSTIME_MONOTONIC);
+#undef NSTIME_MONOTONIC
+}
+#ifdef JEMALLOC_JET
+#undef nstime_monotonic
+#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
+nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define nstime_update JEMALLOC_N(n_nstime_update)
+#endif
+bool
+nstime_update(nstime_t *time)
+{
+ nstime_t old_time;
+
+ nstime_copy(&old_time, time);
+ nstime_get(time);
+
+ /* Handle non-monotonic clocks. */
+ if (unlikely(nstime_compare(&old_time, time) > 0)) {
+ nstime_copy(time, &old_time);
+ return (true);
+ }
+
+ return (false);
+}
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define nstime_update JEMALLOC_N(nstime_update)
+nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
+#endif
diff --git a/memory/jemalloc/src/src/pages.c b/memory/jemalloc/src/src/pages.c
new file mode 100644
index 000000000..647952ac3
--- /dev/null
+++ b/memory/jemalloc/src/src/pages.c
@@ -0,0 +1,273 @@
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#include <sys/sysctl.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifndef _WIN32
+# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
+# define PAGES_PROT_DECOMMIT (PROT_NONE)
+static int mmap_flags;
+#endif
+static bool os_overcommits;
+
+/******************************************************************************/
+
+void *
+pages_map(void *addr, size_t size, bool *commit)
+{
+ void *ret;
+
+ assert(size != 0);
+
+ if (os_overcommits)
+ *commit = true;
+
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
+ PAGE_READWRITE);
+#else
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ {
+ int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+
+ ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ }
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ pages_unmap(ret, size);
+ ret = NULL;
+ }
+#endif
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+void
+pages_unmap(void *addr, size_t size)
+{
+
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
+ if (opt_abort)
+ abort();
+ }
+}
+
+void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size, commit);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit)
+{
+
+ if (os_overcommits)
+ return (true);
+
+#ifdef _WIN32
+ return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
+ PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
+#else
+ {
+ int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+ void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
+ -1, 0);
+ if (result == MAP_FAILED)
+ return (true);
+ if (result != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right
+ * place.
+ */
+ pages_unmap(result, size);
+ return (true);
+ }
+ return (false);
+ }
+#endif
+}
+
+bool
+pages_commit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, true));
+}
+
+bool
+pages_decommit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, false));
+}
+
+bool
+pages_purge(void *addr, size_t size)
+{
+ bool unzeroed;
+
+#ifdef _WIN32
+ VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
+#elif defined(JEMALLOC_HAVE_MADVISE)
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# else
+# error "No madvise(2) flag defined for purging unused dirty pages."
+# endif
+ int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
+ unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+#else
+ /* Last resort no-op. */
+ unzeroed = true;
+#endif
+ return (unzeroed);
+}
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+static bool
+os_overcommits_sysctl(void)
+{
+ int vm_overcommit;
+ size_t sz;
+
+ sz = sizeof(vm_overcommit);
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
+ return (false); /* Error. */
+
+ return ((vm_overcommit & 0x3) == 0);
+}
+#endif
+
+#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+/*
+ * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
+ * reentry during bootstrapping if another library has interposed system call
+ * wrappers.
+ */
+static bool
+os_overcommits_proc(void)
+{
+ int fd;
+ char buf[1];
+ ssize_t nread;
+
+#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open)
+ fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
+#else
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+#endif
+ if (fd == -1)
+ return (false); /* Error. */
+
+#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read)
+ nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
+#else
+ nread = read(fd, &buf, sizeof(buf));
+#endif
+
+#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close)
+ syscall(SYS_close, fd);
+#else
+ close(fd);
+#endif
+
+ if (nread < 1)
+ return (false); /* Error. */
+ /*
+ * /proc/sys/vm/overcommit_memory meanings:
+ * 0: Heuristic overcommit.
+ * 1: Always overcommit.
+ * 2: Never overcommit.
+ */
+ return (buf[0] == '0' || buf[0] == '1');
+}
+#endif
+
+void
+pages_boot(void)
+{
+
+#ifndef _WIN32
+ mmap_flags = MAP_PRIVATE | MAP_ANON;
+#endif
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+ os_overcommits = os_overcommits_sysctl();
+#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
+ os_overcommits = os_overcommits_proc();
+# ifdef MAP_NORESERVE
+ if (os_overcommits)
+ mmap_flags |= MAP_NORESERVE;
+# endif
+#else
+ os_overcommits = false;
+#endif
+}
diff --git a/memory/jemalloc/src/src/prng.c b/memory/jemalloc/src/src/prng.c
new file mode 100644
index 000000000..76646a2a4
--- /dev/null
+++ b/memory/jemalloc/src/src/prng.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_PRNG_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/prof.c b/memory/jemalloc/src/src/prof.c
new file mode 100644
index 000000000..c89dade1f
--- /dev/null
+++ b/memory/jemalloc/src/src/prof.c
@@ -0,0 +1,2355 @@
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+#include <unwind.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_prof = false;
+bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
+size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool opt_prof_gdump = false;
+bool opt_prof_final = false;
+bool opt_prof_leak = false;
+bool opt_prof_accum = false;
+char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
+uint64_t prof_interval = 0;
+
+size_t lg_prof_sample;
+
+/*
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+static malloc_mutex_t *gctx_locks;
+static unsigned cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t bt2gctx;
+static malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
+
+static malloc_mutex_t prof_dump_seq_mtx;
+static uint64_t prof_dump_seq;
+static uint64_t prof_dump_iseq;
+static uint64_t prof_dump_mseq;
+static uint64_t prof_dump_useq;
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps.
+ */
+static malloc_mutex_t prof_dump_mtx;
+static char prof_dump_buf[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PROF_DUMP_BUFSIZE
+#else
+ 1
+#endif
+];
+static size_t prof_dump_buf_end;
+static int prof_dump_fd;
+
+/* Do not dump any profiles until bootstrapping is complete. */
+static bool prof_booted = false;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+
+/******************************************************************************/
+/* Red-black trees. */
+
+JEMALLOC_INLINE_C int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
+{
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
+{
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0)
+ ret = (a_len > b_len) - (a_len < b_len);
+ return (ret);
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
+{
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
+void
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL)
+ prof_sample_threshold_update(tdata);
+ }
+
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
+
+void
+prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
+{
+
+ prof_tctx_set(tsdn, ptr, usize, tctx);
+
+ malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+}
+
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+{
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
+
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+}
+
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+}
+
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ if (tdata != NULL) {
+ bool idump, gdump;
+
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump)
+ prof_idump(tsd_tsdn(tsd));
+ if (gdump)
+ prof_gdump(tsd_tsdn(tsd));
+ }
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+void
+prof_backtrace(prof_bt_t *bt)
+{
+ int nframes;
+
+ cassert(config_prof);
+ assert(bt->len == 0);
+ assert(bt->vec != NULL);
+
+ nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
+ if (nframes <= 0)
+ return;
+ bt->len = nframes;
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
+{
+
+ cassert(config_prof);
+
+ return (_URC_NO_REASON);
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg)
+{
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
+
+ cassert(config_prof);
+
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL)
+ return (_URC_END_OF_STACK);
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
+
+ return (_URC_NO_REASON);
+}
+
+void
+prof_backtrace(prof_bt_t *bt)
+{
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
+
+ cassert(config_prof);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+void
+prof_backtrace(prof_bt_t *bt)
+{
+#define BT_FRAME(i) \
+ if ((i) < PROF_BT_MAX) { \
+ void *p; \
+ if (__builtin_frame_address(i) == 0) \
+ return; \
+ p = __builtin_return_address(i); \
+ if (p == NULL) \
+ return; \
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
+ } else \
+ return;
+
+ cassert(config_prof);
+
+ BT_FRAME(0)
+ BT_FRAME(1)
+ BT_FRAME(2)
+ BT_FRAME(3)
+ BT_FRAME(4)
+ BT_FRAME(5)
+ BT_FRAME(6)
+ BT_FRAME(7)
+ BT_FRAME(8)
+ BT_FRAME(9)
+
+ BT_FRAME(10)
+ BT_FRAME(11)
+ BT_FRAME(12)
+ BT_FRAME(13)
+ BT_FRAME(14)
+ BT_FRAME(15)
+ BT_FRAME(16)
+ BT_FRAME(17)
+ BT_FRAME(18)
+ BT_FRAME(19)
+
+ BT_FRAME(20)
+ BT_FRAME(21)
+ BT_FRAME(22)
+ BT_FRAME(23)
+ BT_FRAME(24)
+ BT_FRAME(25)
+ BT_FRAME(26)
+ BT_FRAME(27)
+ BT_FRAME(28)
+ BT_FRAME(29)
+
+ BT_FRAME(30)
+ BT_FRAME(31)
+ BT_FRAME(32)
+ BT_FRAME(33)
+ BT_FRAME(34)
+ BT_FRAME(35)
+ BT_FRAME(36)
+ BT_FRAME(37)
+ BT_FRAME(38)
+ BT_FRAME(39)
+
+ BT_FRAME(40)
+ BT_FRAME(41)
+ BT_FRAME(42)
+ BT_FRAME(43)
+ BT_FRAME(44)
+ BT_FRAME(45)
+ BT_FRAME(46)
+ BT_FRAME(47)
+ BT_FRAME(48)
+ BT_FRAME(49)
+
+ BT_FRAME(50)
+ BT_FRAME(51)
+ BT_FRAME(52)
+ BT_FRAME(53)
+ BT_FRAME(54)
+ BT_FRAME(55)
+ BT_FRAME(56)
+ BT_FRAME(57)
+ BT_FRAME(58)
+ BT_FRAME(59)
+
+ BT_FRAME(60)
+ BT_FRAME(61)
+ BT_FRAME(62)
+ BT_FRAME(63)
+ BT_FRAME(64)
+ BT_FRAME(65)
+ BT_FRAME(66)
+ BT_FRAME(67)
+ BT_FRAME(68)
+ BT_FRAME(69)
+
+ BT_FRAME(70)
+ BT_FRAME(71)
+ BT_FRAME(72)
+ BT_FRAME(73)
+ BT_FRAME(74)
+ BT_FRAME(75)
+ BT_FRAME(76)
+ BT_FRAME(77)
+ BT_FRAME(78)
+ BT_FRAME(79)
+
+ BT_FRAME(80)
+ BT_FRAME(81)
+ BT_FRAME(82)
+ BT_FRAME(83)
+ BT_FRAME(84)
+ BT_FRAME(85)
+ BT_FRAME(86)
+ BT_FRAME(87)
+ BT_FRAME(88)
+ BT_FRAME(89)
+
+ BT_FRAME(90)
+ BT_FRAME(91)
+ BT_FRAME(92)
+ BT_FRAME(93)
+ BT_FRAME(94)
+ BT_FRAME(95)
+ BT_FRAME(96)
+ BT_FRAME(97)
+ BT_FRAME(98)
+ BT_FRAME(99)
+
+ BT_FRAME(100)
+ BT_FRAME(101)
+ BT_FRAME(102)
+ BT_FRAME(103)
+ BT_FRAME(104)
+ BT_FRAME(105)
+ BT_FRAME(106)
+ BT_FRAME(107)
+ BT_FRAME(108)
+ BT_FRAME(109)
+
+ BT_FRAME(110)
+ BT_FRAME(111)
+ BT_FRAME(112)
+ BT_FRAME(113)
+ BT_FRAME(114)
+ BT_FRAME(115)
+ BT_FRAME(116)
+ BT_FRAME(117)
+ BT_FRAME(118)
+ BT_FRAME(119)
+
+ BT_FRAME(120)
+ BT_FRAME(121)
+ BT_FRAME(122)
+ BT_FRAME(123)
+ BT_FRAME(124)
+ BT_FRAME(125)
+ BT_FRAME(126)
+ BT_FRAME(127)
+#undef BT_FRAME
+}
+#else
+void
+prof_backtrace(prof_bt_t *bt)
+{
+
+ cassert(config_prof);
+ not_reached();
+}
+#endif
+
+static malloc_mutex_t *
+prof_gctx_mutex_choose(void)
+{
+ unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
+
+ return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid)
+{
+
+ return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
+{
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL)
+ return (NULL);
+ gctx->lock = prof_gctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return (gctx);
+}
+
+static void
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+
+ /*
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
+ * into this function.
+ */
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
+ not_reached();
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
+ } else {
+ /*
+ * Compensate for increment in prof_tctx_destroy() or
+ * prof_lookup().
+ */
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
+ }
+}
+
+static bool
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
+{
+
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ if (opt_prof_accum)
+ return (false);
+ if (tctx->cnts.curobjs != 0)
+ return (false);
+ if (tctx->prepared)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx)
+{
+
+ if (opt_prof_accum)
+ return (false);
+ if (!tctx_tree_empty(&gctx->tctxs))
+ return (false);
+ if (gctx->nlimbo != 0)
+ return (false);
+ return (true);
+}
+
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
+{
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else
+ destroy_gctx = false;
+ break;
+ case prof_tctx_state_dumping:
+ /*
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
+ */
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, false);
+
+ if (destroy_tctx)
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
+}
+
+static bool
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
+{
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_gctx;
+
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (gctx.v == NULL) {
+ prof_leave(tsd, tdata);
+ return (true);
+ }
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
+ return (true);
+ }
+ new_gctx = true;
+ } else {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+ }
+ prof_leave(tsd, tdata);
+
+ *p_btkey = btkey.v;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return (false);
+}
+
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt)
+{
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } ret;
+ prof_tdata_t *tdata;
+ bool not_found;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return (NULL);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) /* Note double negative! */
+ ret.p->prepared = true;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
+ void *btkey;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
+
+ /*
+ * This thread's cache lacks bt. Look for it in the global
+ * cache.
+ */
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx))
+ return (NULL);
+
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ return (NULL);
+ }
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
+ memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
+ return (NULL);
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+
+ return (ret.p);
+}
+
+/*
+ * The bodies of this function and prof_leakcheck() are compiled out unless heap
+ * profiling is enabled, so that it is possible to compile jemalloc with
+ * floating point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a workaround for
+ * versions of glibc that don't properly save/restore floating point registers
+ * during dynamic lazy symbol loading (which internally calls into whatever
+ * malloc implementation happens to be integrated into the application). Note
+ * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
+ * memory moves, so jemalloc must be compiled with such optimizations disabled
+ * (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+void
+prof_sample_threshold_update(prof_tdata_t *tdata)
+{
+#ifdef JEMALLOC_PROF
+ uint64_t r;
+ double u;
+
+ if (!config_prof)
+ return;
+
+ if (lg_prof_sample == 0) {
+ tdata->bytes_until_sample = 0;
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ r = prng_lg_range_u64(&tdata->prng_state, 53);
+ u = (double)r * (1.0/9007199254740992.0L);
+ tdata->bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+#endif
+}
+
+#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return (NULL);
+}
+
+size_t
+prof_tdata_count(void)
+{
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return (tdata_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+size_t
+prof_bt_count(void)
+{
+ size_t bt_count;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return (0);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ return (bt_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
+#endif
+static int
+prof_dump_open(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = creat(filename, 0644);
+ if (fd == -1 && !propagate_err) {
+ malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
+ if (opt_abort)
+ abort();
+ }
+
+ return (fd);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
+#endif
+
+static bool
+prof_dump_flush(bool propagate_err)
+{
+ bool ret = false;
+ ssize_t err;
+
+ cassert(config_prof);
+
+ err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
+ if (err == -1) {
+ if (!propagate_err) {
+ malloc_write("<jemalloc>: write() failed during heap "
+ "profile flush\n");
+ if (opt_abort)
+ abort();
+ }
+ ret = true;
+ }
+ prof_dump_buf_end = 0;
+
+ return (ret);
+}
+
+static bool
+prof_dump_close(bool propagate_err)
+{
+ bool ret;
+
+ assert(prof_dump_fd != -1);
+ ret = prof_dump_flush(propagate_err);
+ close(prof_dump_fd);
+ prof_dump_fd = -1;
+
+ return (ret);
+}
+
+static bool
+prof_dump_write(bool propagate_err, const char *s)
+{
+ size_t i, slen, n;
+
+ cassert(config_prof);
+
+ i = 0;
+ slen = strlen(s);
+ while (i < slen) {
+ /* Flush the buffer if it is full. */
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
+ if (prof_dump_flush(propagate_err) && propagate_err)
+ return (true);
+
+ if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
+ /* Finish writing. */
+ n = slen - i;
+ } else {
+ /* Write as much of s as will fit. */
+ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
+ }
+ memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
+ prof_dump_buf_end += n;
+ i += n;
+ }
+
+ return (false);
+}
+
+JEMALLOC_FORMAT_PRINTF(2, 3)
+static bool
+prof_dump_printf(bool propagate_err, const char *format, ...)
+{
+ bool ret;
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ ret = prof_dump_write(propagate_err, buf);
+
+ return (ret);
+}
+
+static void
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
+{
+
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
+}
+
+static void
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
+{
+
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return (NULL);
+}
+
+struct prof_tctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
+{
+ struct prof_tctx_dump_iter_arg_s *arg =
+ (struct prof_tctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ if (prof_dump_printf(arg->propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes))
+ return (tctx);
+ break;
+ default:
+ not_reached();
+ }
+ return (NULL);
+}
+
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return (ret);
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+{
+
+ cassert(config_prof);
+
+ malloc_mutex_lock(tsdn, gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
+
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+{
+ struct prof_gctx_merge_iter_arg_s *arg =
+ (struct prof_gctx_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0)
+ arg->leak_ngctx++;
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+
+ return (NULL);
+}
+
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
+{
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, true, true);
+ } else
+ next = NULL;
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+}
+
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *opaque)
+{
+ struct prof_tdata_merge_iter_arg_s *arg =
+ (struct prof_tdata_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);)
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+
+ arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ }
+ } else
+ tdata->dumping = false;
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
+
+ return (NULL);
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping)
+ return (NULL);
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : ""))
+ return (tdata);
+ return (NULL);
+}
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
+#endif
+static bool
+prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
+{
+ bool ret;
+
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
+#endif
+
+static bool
+prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
+{
+ bool ret;
+ unsigned i;
+ struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
+
+ cassert(config_prof);
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
+ ret = false;
+ goto label_return;
+ }
+
+ if (prof_dump_printf(propagate_err, "@")) {
+ ret = true;
+ goto label_return;
+ }
+ for (i = 0; i < bt->len; i++) {
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
+ (uintptr_t)bt->vec[i])) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
+
+ prof_tctx_dump_iter_arg.tsdn = tsdn;
+ prof_tctx_dump_iter_arg.propagate_err = propagate_err;
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY);
+
+ return (mfd);
+}
+#endif
+
+static int
+prof_getpid(void)
+{
+
+#ifdef _WIN32
+ return (GetCurrentProcessId());
+#else
+ return (getpid());
+#endif
+}
+
+static bool
+prof_dump_maps(bool propagate_err)
+{
+ bool ret;
+ int mfd;
+
+ cassert(config_prof);
+#ifdef __FreeBSD__
+ mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
+#else
+ {
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1)
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
+#endif
+ if (mfd != -1) {
+ ssize_t nread;
+
+ if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
+ nread = 0;
+ do {
+ prof_dump_buf_end += nread;
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ /* Make space in prof_dump_buf before read(). */
+ if (prof_dump_flush(propagate_err) &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
+ }
+ nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
+ PROF_DUMP_BUFSIZE - prof_dump_buf_end);
+ } while (nread > 0);
+ } else {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ if (mfd != -1)
+ close(mfd);
+ return (ret);
+}
+
+/*
+ * See prof_sample_threshold_update() comment for why the body of this function
+ * is conditionally compiled.
+ */
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
+ const char *filename)
+{
+
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
+ if (cnt_all->curbytes != 0) {
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
+ filename);
+ }
+#endif
+}
+
+struct prof_gctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+{
+ prof_gctx_t *ret;
+ struct prof_gctx_dump_iter_arg_s *arg =
+ (struct prof_gctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+
+ if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
+ gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return (ret);
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
+{
+ prof_tdata_t *tdata;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ size_t tabind;
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ prof_gctx_tree_t gctxs;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ prof_enter(tsd, tdata);
+
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(&gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ (void *)&prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg.leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
+ (void *)&prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+
+ /* Create dump file. */
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
+ goto label_open_close_error;
+
+ /* Dump profile header. */
+ if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
+ &prof_tdata_merge_iter_arg.cnt_all))
+ goto label_write_error;
+
+ /* Dump per gctx profile stats. */
+ prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg.propagate_err = propagate_err;
+ if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
+ (void *)&prof_gctx_dump_iter_arg) != NULL)
+ goto label_write_error;
+
+ /* Dump /proc/<pid>/maps if possible. */
+ if (prof_dump_maps(propagate_err))
+ goto label_write_error;
+
+ if (prof_dump_close(propagate_err))
+ goto label_open_close_error;
+
+ prof_gctx_finish(tsd, &gctxs);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ if (leakcheck) {
+ prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
+ prof_gctx_merge_iter_arg.leak_ngctx, filename);
+ }
+ return (false);
+label_write_error:
+ prof_dump_close(propagate_err);
+label_open_close_error:
+ prof_gctx_finish(tsd, &gctxs);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ return (true);
+}
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+static void
+prof_dump_filename(char *filename, char v, uint64_t vseq)
+{
+
+ cassert(config_prof);
+
+ if (vseq != VSEQ_INVALID) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c.heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ }
+ prof_dump_seq++;
+}
+
+static void
+prof_fdump(void)
+{
+ tsd_t *tsd;
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
+
+ if (!prof_booted)
+ return;
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
+
+void
+prof_idump(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (!prof_booted || tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return;
+ if (tdata->enq) {
+ tdata->enq_idump = true;
+ return;
+ }
+
+ if (opt_prof_prefix[0] != '\0') {
+ char filename[PATH_MAX + 1];
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
+ }
+}
+
+bool
+prof_mdump(tsd_t *tsd, const char *filename)
+{
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+
+ if (!opt_prof || !prof_booted)
+ return (true);
+
+ if (filename == NULL) {
+ /* No filename specified, so automatically generate one. */
+ if (opt_prof_prefix[0] == '\0')
+ return (true);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
+ prof_dump_mseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ filename = filename_buf;
+ }
+ return (prof_dump(tsd, true, filename, false));
+}
+
+void
+prof_gdump(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (!prof_booted || tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return;
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
+ return;
+ }
+
+ if (opt_prof_prefix[0] != '\0') {
+ char filename[DUMP_FILENAME_BUFSIZE];
+ malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
+ }
+}
+
+static void
+prof_bt_hash(const void *key, size_t r_hash[2])
+{
+ prof_bt_t *bt = (prof_bt_t *)key;
+
+ cassert(config_prof);
+
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
+}
+
+static bool
+prof_bt_keycomp(const void *k1, const void *k2)
+{
+ const prof_bt_t *bt1 = (prof_bt_t *)k1;
+ const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+ cassert(config_prof);
+
+ if (bt1->len != bt2->len)
+ return (false);
+ return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+JEMALLOC_INLINE_C uint64_t
+prof_thr_uid_alloc(tsdn_t *tsdn)
+{
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+
+ return (thr_uid);
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ /* Initialize an empty cache for this thread. */
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL)
+ return (NULL);
+
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ return (NULL);
+ }
+
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return (tdata);
+}
+
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd)
+{
+
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
+ NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ if (tdata->attached && !even_if_attached)
+ return (false);
+ if (ckh_count(&tdata->bt2tctx) != 0)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
+ if (tdata->thread_name != NULL)
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
+ /*
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
+ */
+ if (!destroy_tdata)
+ tdata->attached = false;
+ tsd_prof_tdata_set(tsd, NULL);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, true);
+}
+
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active));
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tsdn, tdata, false);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return (destroy_tdata);
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample)
+{
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else
+ next = NULL;
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ if (!config_prof)
+ return;
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL)
+ prof_tdata_detach(tsd, tdata);
+}
+
+bool
+prof_active_get(tsdn_t *tsdn)
+{
+ bool prof_active_current;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return (prof_active_current);
+}
+
+bool
+prof_active_set(tsdn_t *tsdn, bool active)
+{
+ bool prof_active_old;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return (prof_active_old);
+}
+
+const char *
+prof_thread_name_get(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return ("");
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
+{
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL)
+ return (NULL);
+
+ size = strlen(thread_name) + 1;
+ if (size == 1)
+ return ("");
+
+ ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL)
+ return (NULL);
+ memcpy(ret, thread_name, size);
+ return (ret);
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name)
+{
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (EAGAIN);
+
+ /* Validate input. */
+ if (thread_name == NULL)
+ return (EFAULT);
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c))
+ return (EFAULT);
+ }
+
+ s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ if (s == NULL)
+ return (EAGAIN);
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0)
+ tdata->thread_name = s;
+ return (0);
+}
+
+bool
+prof_thread_active_get(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (false);
+ return (tdata->active);
+}
+
+bool
+prof_thread_active_set(tsd_t *tsd, bool active)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+ tdata->active = active;
+ return (false);
+}
+
+bool
+prof_thread_active_init_get(tsdn_t *tsdn)
+{
+ bool active_init;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return (active_init);
+}
+
+bool
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
+{
+ bool active_init_old;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return (active_init_old);
+}
+
+bool
+prof_gdump_get(tsdn_t *tsdn)
+{
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(tsdn_t *tsdn, bool gdump)
+{
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return (prof_gdump_old);
+}
+
+void
+prof_boot0(void)
+{
+
+ cassert(config_prof);
+
+ memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
+ sizeof(PROF_PREFIX_DEFAULT));
+}
+
+void
+prof_boot1(void)
+{
+
+ cassert(config_prof);
+
+ /*
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
+ */
+
+ if (opt_prof_leak && !opt_prof) {
+ /*
+ * Enable opt_prof, but in such a way that profiles are never
+ * automatically dumped.
+ */
+ opt_prof = true;
+ opt_prof_gdump = false;
+ } else if (opt_prof) {
+ if (opt_lg_prof_interval >= 0) {
+ prof_interval = (((uint64_t)1U) <<
+ opt_lg_prof_interval);
+ }
+ }
+}
+
+bool
+prof_boot2(tsd_t *tsd)
+{
+
+ cassert(config_prof);
+
+ if (opt_prof) {
+ unsigned i;
+
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE))
+ return (true);
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP))
+ return (true);
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init",
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
+ return (true);
+
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp))
+ return (true);
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX))
+ return (true);
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS))
+ return (true);
+
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID))
+ return (true);
+
+ if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
+ WITNESS_RANK_PROF_DUMP_SEQ))
+ return (true);
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP))
+ return (true);
+
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
+ if (gctx_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
+ WITNESS_RANK_PROF_GCTX))
+ return (true);
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
+ if (tdata_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
+ WITNESS_RANK_PROF_TDATA))
+ return (true);
+ }
+ }
+
+#ifdef JEMALLOC_PROF_LIBGCC
+ /*
+ * Cause the backtracing machinery to allocate its internal state
+ * before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+
+ prof_booted = true;
+
+ return (false);
+}
+
+void
+prof_prefork0(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_prefork(tsdn, &prof_dump_mtx);
+ malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
+ malloc_mutex_prefork(tsdn, &tdatas_mtx);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
+}
+
+void
+prof_prefork1(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ malloc_mutex_prefork(tsdn, &prof_active_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ }
+}
+
+void
+prof_postfork_parent(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_postfork_parent(tsdn,
+ &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
+ }
+}
+
+void
+prof_postfork_child(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
+ }
+}
+
+/******************************************************************************/
diff --git a/memory/jemalloc/src/src/quarantine.c b/memory/jemalloc/src/src/quarantine.c
new file mode 100644
index 000000000..18903fb5c
--- /dev/null
+++ b/memory/jemalloc/src/src/quarantine.c
@@ -0,0 +1,183 @@
+#define JEMALLOC_QUARANTINE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/*
+ * Quarantine pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
+#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
+#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
+static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
+ size_t upper_bound);
+
+/******************************************************************************/
+
+static quarantine_t *
+quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
+{
+ quarantine_t *quarantine;
+ size_t size;
+
+ size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
+ sizeof(quarantine_obj_t));
+ quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
+ false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
+ if (quarantine == NULL)
+ return (NULL);
+ quarantine->curbytes = 0;
+ quarantine->curobjs = 0;
+ quarantine->first = 0;
+ quarantine->lg_maxobjs = lg_maxobjs;
+
+ return (quarantine);
+}
+
+void
+quarantine_alloc_hook_work(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
+ /*
+ * Check again whether quarantine has been initialized, because
+ * quarantine_init() may have triggered recursive initialization.
+ */
+ if (tsd_quarantine_get(tsd) == NULL)
+ tsd_quarantine_set(tsd, quarantine);
+ else
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+}
+
+static quarantine_t *
+quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
+{
+ quarantine_t *ret;
+
+ ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
+ if (ret == NULL) {
+ quarantine_drain_one(tsd_tsdn(tsd), quarantine);
+ return (quarantine);
+ }
+
+ ret->curbytes = quarantine->curbytes;
+ ret->curobjs = quarantine->curobjs;
+ if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
+ quarantine->lg_maxobjs)) {
+ /* objs ring buffer data are contiguous. */
+ memcpy(ret->objs, &quarantine->objs[quarantine->first],
+ quarantine->curobjs * sizeof(quarantine_obj_t));
+ } else {
+ /* objs ring buffer data wrap around. */
+ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
+ quarantine->first;
+ size_t ncopy_b = quarantine->curobjs - ncopy_a;
+
+ memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
+ * sizeof(quarantine_obj_t));
+ memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
+ sizeof(quarantine_obj_t));
+ }
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+
+ tsd_quarantine_set(tsd, ret);
+ return (ret);
+}
+
+static void
+quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
+{
+ quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
+ assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
+ idalloctm(tsdn, obj->ptr, NULL, false, true);
+ quarantine->curbytes -= obj->usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+}
+
+static void
+quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
+{
+
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
+ quarantine_drain_one(tsdn, quarantine);
+}
+
+void
+quarantine(tsd_t *tsd, void *ptr)
+{
+ quarantine_t *quarantine;
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+
+ cassert(config_fill);
+ assert(opt_quarantine);
+
+ if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ return;
+ }
+ /*
+ * Drain one or more objects if the quarantine size limit would be
+ * exceeded by appending ptr.
+ */
+ if (quarantine->curbytes + usize > opt_quarantine) {
+ size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+ - usize : 0;
+ quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
+ }
+ /* Grow the quarantine ring buffer if it's full. */
+ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+ quarantine = quarantine_grow(tsd, quarantine);
+ /* quarantine_grow() must free a slot if it fails to grow. */
+ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+ /* Append ptr if its size doesn't exceed the quarantine size. */
+ if (quarantine->curbytes + usize <= opt_quarantine) {
+ size_t offset = (quarantine->first + quarantine->curobjs) &
+ ((ZU(1) << quarantine->lg_maxobjs) - 1);
+ quarantine_obj_t *obj = &quarantine->objs[offset];
+ obj->ptr = ptr;
+ obj->usize = usize;
+ quarantine->curbytes += usize;
+ quarantine->curobjs++;
+ if (config_fill && unlikely(opt_junk_free)) {
+ /*
+ * Only do redzone validation if Valgrind isn't in
+ * operation.
+ */
+ if ((!config_valgrind || likely(!in_valgrind))
+ && usize <= SMALL_MAXCLASS)
+ arena_quarantine_junk_small(ptr, usize);
+ else
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ }
+ } else {
+ assert(quarantine->curbytes == 0);
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ }
+}
+
+void
+quarantine_cleanup(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!config_fill)
+ return;
+
+ quarantine = tsd_quarantine_get(tsd);
+ if (quarantine != NULL) {
+ quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ tsd_quarantine_set(tsd, NULL);
+ }
+}
diff --git a/memory/jemalloc/src/src/rtree.c b/memory/jemalloc/src/src/rtree.c
new file mode 100644
index 000000000..f2e2997d5
--- /dev/null
+++ b/memory/jemalloc/src/src/rtree.c
@@ -0,0 +1,132 @@
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+static unsigned
+hmin(unsigned ha, unsigned hb)
+{
+
+ return (ha < hb ? ha : hb);
+}
+
+/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
+bool
+rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc)
+{
+ unsigned bits_in_leaf, height, i;
+
+ assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
+ RTREE_BITS_PER_LEVEL));
+ assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
+
+ bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
+ : (bits % RTREE_BITS_PER_LEVEL);
+ if (bits > bits_in_leaf) {
+ height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
+ if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
+ height++;
+ } else
+ height = 1;
+ assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
+
+ rtree->alloc = alloc;
+ rtree->dalloc = dalloc;
+ rtree->height = height;
+
+ /* Root level. */
+ rtree->levels[0].subtree = NULL;
+ rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
+ bits_in_leaf;
+ rtree->levels[0].cumbits = rtree->levels[0].bits;
+ /* Interior levels. */
+ for (i = 1; i < height-1; i++) {
+ rtree->levels[i].subtree = NULL;
+ rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
+ rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
+ RTREE_BITS_PER_LEVEL;
+ }
+ /* Leaf level. */
+ if (height > 1) {
+ rtree->levels[height-1].subtree = NULL;
+ rtree->levels[height-1].bits = bits_in_leaf;
+ rtree->levels[height-1].cumbits = bits;
+ }
+
+ /* Compute lookup table to be used by rtree_start_level(). */
+ for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
+ rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
+ 1);
+ }
+
+ return (false);
+}
+
+static void
+rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
+{
+
+ if (level + 1 < rtree->height) {
+ size_t nchildren, i;
+
+ nchildren = ZU(1) << rtree->levels[level].bits;
+ for (i = 0; i < nchildren; i++) {
+ rtree_node_elm_t *child = node[i].child;
+ if (child != NULL)
+ rtree_delete_subtree(rtree, child, level + 1);
+ }
+ }
+ rtree->dalloc(node);
+}
+
+void
+rtree_delete(rtree_t *rtree)
+{
+ unsigned i;
+
+ for (i = 0; i < rtree->height; i++) {
+ rtree_node_elm_t *subtree = rtree->levels[i].subtree;
+ if (subtree != NULL)
+ rtree_delete_subtree(rtree, subtree, i);
+ }
+}
+
+static rtree_node_elm_t *
+rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
+{
+ rtree_node_elm_t *node;
+
+ if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
+ spin_t spinner;
+
+ /*
+ * Another thread is already in the process of initializing.
+ * Spin-wait until initialization is complete.
+ */
+ spin_init(&spinner);
+ do {
+ spin_adaptive(&spinner);
+ node = atomic_read_p((void **)elmp);
+ } while (node == RTREE_NODE_INITIALIZING);
+ } else {
+ node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
+ if (node == NULL)
+ return (NULL);
+ atomic_write_p((void **)elmp, node);
+ }
+
+ return (node);
+}
+
+rtree_node_elm_t *
+rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
+{
+
+ return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
+}
+
+rtree_node_elm_t *
+rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+{
+
+ return (rtree_node_init(rtree, level+1, &elm->child));
+}
diff --git a/memory/jemalloc/src/src/spin.c b/memory/jemalloc/src/src/spin.c
new file mode 100644
index 000000000..5242d95aa
--- /dev/null
+++ b/memory/jemalloc/src/src/spin.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_SPIN_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/stats.c b/memory/jemalloc/src/src/stats.c
new file mode 100644
index 000000000..bd8af3999
--- /dev/null
+++ b/memory/jemalloc/src/src/stats.c
@@ -0,0 +1,1153 @@
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define CTL_GET(n, v, t) do { \
+ size_t sz = sizeof(t); \
+ xmallctl(n, v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_M2_GET(n, i, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = (i); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = (i); \
+ mib[4] = (j); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+} while (0)
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_stats_print = false;
+
+size_t stats_cactive = 0;
+
+/******************************************************************************/
+
+static void
+stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool large, bool huge, unsigned i)
+{
+ size_t page;
+ bool config_tcache, in_gap, in_gap_prev;
+ unsigned nbins, j;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"bins\": [\n");
+ } else {
+ CTL_GET("config.tcache", &config_tcache, bool);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs"
+ " curruns regs pgs util nfills"
+ " nflushes newruns reruns\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs"
+ " curruns regs pgs util newruns"
+ " reruns\n");
+ }
+ }
+ for (j = 0, in_gap = false; j < nbins; j++) {
+ uint64_t nruns;
+ size_t reg_size, run_size, curregs;
+ size_t curruns;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nreruns;
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+ uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nruns == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (config_tcache) {
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
+ &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
+ &nflushes, uint64_t);
+ }
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
+ size_t);
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curregs\": %zu,\n"
+ "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
+ nmalloc,
+ ndalloc,
+ curregs,
+ nrequests);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
+ nfills,
+ nflushes);
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curruns\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ nreruns,
+ curruns,
+ (j + 1 < nbins) ? "," : "");
+ } else if (!in_gap) {
+ size_t availregs, milli;
+ char util[6]; /* "x.yyy". */
+
+ availregs = nregs * curruns;
+ milli = (availregs != 0) ? (1000 * curregs) / availregs
+ : 1000;
+ assert(milli <= 1000);
+ if (milli < 10) {
+ malloc_snprintf(util, sizeof(util),
+ "0.00%zu", milli);
+ } else if (milli < 100) {
+ malloc_snprintf(util, sizeof(util), "0.0%zu",
+ milli);
+ } else if (milli < 1000) {
+ malloc_snprintf(util, sizeof(util), "0.%zu",
+ milli);
+ } else
+ malloc_snprintf(util, sizeof(util), "1");
+
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nfills, nflushes,
+ nruns, nreruns);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nruns, nreruns);
+ }
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]%s\n", (large || huge) ? "," : "");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool huge, unsigned i)
+{
+ unsigned nbins, nlruns, j;
+ bool in_gap, in_gap_prev;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"lruns\": [\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size ind allocated nmalloc"
+ " ndalloc nrequests curruns\n");
+ }
+ for (j = 0, in_gap = false; j < nlruns; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t run_size, curruns;
+
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
+ size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"curruns\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ curruns,
+ (j + 1 < nlruns) ? "," : "");
+ } else if (!in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ run_size, nbins + j, curruns * run_size, nmalloc,
+ ndalloc, nrequests, curruns);
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]%s\n", huge ? "," : "");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, unsigned i)
+{
+ unsigned nbins, nlruns, nhchunks, j;
+ bool in_gap, in_gap_prev;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"hchunks\": [\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc"
+ " ndalloc nrequests curhchunks\n");
+ }
+ for (j = 0, in_gap = false; j < nhchunks; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t hchunk_size, curhchunks;
+
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
+ &curhchunks, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"curhchunks\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ curhchunks,
+ (j + 1 < nhchunks) ? "," : "");
+ } else if (!in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ hchunk_size, nbins + nlruns + j,
+ curhchunks * hchunk_size, nmalloc, ndalloc,
+ nrequests, curhchunks);
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]\n");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, unsigned i, bool bins, bool large, bool huge)
+{
+ unsigned nthreads;
+ const char *dss;
+ ssize_t lg_dirty_mult, decay_time;
+ size_t page, pactive, pdirty, mapped, retained;
+ size_t metadata_mapped, metadata_allocated;
+ uint64_t npurge, nmadvise, purged;
+ size_t small_allocated;
+ uint64_t small_nmalloc, small_ndalloc, small_nrequests;
+ size_t large_allocated;
+ uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"nthreads\": %u,\n", nthreads);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
+ }
+
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dss\": \"%s\",\n", dss);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "dss allocation precedence: %s\n", dss);
+ }
+
+ CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
+ } else {
+ if (opt_purge == purge_mode_ratio) {
+ if (lg_dirty_mult >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: N/A\n");
+ }
+ }
+ }
+
+ CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
+ } else {
+ if (opt_purge == purge_mode_decay) {
+ if (decay_time >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "decay time: %zd\n", decay_time);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "decay time: N/A\n");
+ }
+ }
+ }
+
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
+ CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pactive\": %zu,\n", pactive);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
+ ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
+ }
+
+ CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"small\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc"
+ " ndalloc nrequests\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc,
+ small_nrequests);
+ }
+
+ CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"large\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc,
+ large_nrequests);
+ }
+
+ CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
+ CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"huge\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ }
+ if (!json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
+ }
+
+ CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"mapped\": %zu,\n", mapped);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "mapped: %12zu\n", mapped);
+ }
+
+ CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"retained\": %zu,\n", retained);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "retained: %12zu\n", retained);
+ }
+
+ CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
+ size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"metadata\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
+ }
+
+ if (bins) {
+ stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
+ i);
+ }
+ if (large)
+ stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
+ if (huge)
+ stats_arena_hchunks_print(write_cb, cbopaque, json, i);
+}
+
+static void
+stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool merged, bool unmerged)
+{
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ uint32_t u32v;
+ uint64_t u64v;
+ ssize_t ssv;
+ size_t sv, bsz, usz, ssz, sssz, cpsz;
+
+ bsz = sizeof(bool);
+ usz = sizeof(unsigned);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
+
+ CTL_GET("version", &cpv, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"version\": \"%s\",\n", cpv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+
+ /* config. */
+#define CONFIG_WRITE_BOOL_JSON(n, c) \
+ if (json) { \
+ CTL_GET("config."#n, &bv, bool); \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
+ (c)); \
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"config\": {\n");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
+
+ CTL_GET("config.debug", &bv, bool);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
+ } else {
+ malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
+ bv ? "enabled" : "disabled");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(fill, ",")
+ CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"malloc_conf\": \"%s\",\n",
+ config_malloc_conf);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "config.malloc_conf: \"%s\"\n", config_malloc_conf);
+ }
+
+ CONFIG_WRITE_BOOL_JSON(munmap, ",")
+ CONFIG_WRITE_BOOL_JSON(prof, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
+ CONFIG_WRITE_BOOL_JSON(stats, ",")
+ CONFIG_WRITE_BOOL_JSON(tcache, ",")
+ CONFIG_WRITE_BOOL_JSON(tls, ",")
+ CONFIG_WRITE_BOOL_JSON(utrace, ",")
+ CONFIG_WRITE_BOOL_JSON(valgrind, ",")
+ CONFIG_WRITE_BOOL_JSON(xmalloc, "")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+#undef CONFIG_WRITE_BOOL_JSON
+
+ /* opt. */
+#define OPT_WRITE_BOOL(n, c) \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s\n", bv ? "true" : "false"); \
+ } \
+ }
+#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
+ : "false", bv2 ? "true" : "false"); \
+ } \
+ } \
+}
+#define OPT_WRITE_UNSIGNED(n, c) \
+ if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %u%s\n", uv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %u\n", uv); \
+ } \
+ }
+#define OPT_WRITE_SIZE_T(n, c) \
+ if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zu\n", sv); \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T(n, c) \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd\n", ssv); \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd ("#m": %zd)\n", \
+ ssv, ssv2); \
+ } \
+ } \
+}
+#define OPT_WRITE_CHAR_P(n, c) \
+ if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": \"%s\"\n", cpv); \
+ } \
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"opt\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Run-time option settings:\n");
+ }
+ OPT_WRITE_BOOL(abort, ",")
+ OPT_WRITE_SIZE_T(lg_chunk, ",")
+ OPT_WRITE_CHAR_P(dss, ",")
+ OPT_WRITE_UNSIGNED(narenas, ",")
+ OPT_WRITE_CHAR_P(purge, ",")
+ if (json || opt_purge == purge_mode_ratio) {
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
+ arenas.lg_dirty_mult, ",")
+ }
+ if (json || opt_purge == purge_mode_decay) {
+ OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
+ }
+ OPT_WRITE_CHAR_P(junk, ",")
+ OPT_WRITE_SIZE_T(quarantine, ",")
+ OPT_WRITE_BOOL(redzone, ",")
+ OPT_WRITE_BOOL(zero, ",")
+ OPT_WRITE_BOOL(utrace, ",")
+ OPT_WRITE_BOOL(xmalloc, ",")
+ OPT_WRITE_BOOL(tcache, ",")
+ OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
+ OPT_WRITE_BOOL(prof, ",")
+ OPT_WRITE_CHAR_P(prof_prefix, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
+ ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
+ OPT_WRITE_BOOL(prof_accum, ",")
+ OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
+ OPT_WRITE_BOOL(prof_gdump, ",")
+ OPT_WRITE_BOOL(prof_final, ",")
+ OPT_WRITE_BOOL(prof_leak, ",")
+ /*
+ * stats_print is always emitted, so as long as stats_print comes last
+ * it's safe to unconditionally omit the comma here (rather than having
+ * to conditionally omit it elsewhere depending on configuration).
+ */
+ OPT_WRITE_BOOL(stats_print, "")
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+
+#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_BOOL_MUTABLE
+#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_SSIZE_T
+#undef OPT_WRITE_CHAR_P
+
+ /* arenas. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"arenas\": {\n");
+ }
+
+ CTL_GET("arenas.narenas", &uv, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"narenas\": %u,\n", uv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
+
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
+ } else if (opt_purge == purge_mode_ratio) {
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: "
+ "%u:1\n", (1U << ssv));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: "
+ "N/A\n");
+ }
+ }
+ CTL_GET("arenas.decay_time", &ssv, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"decay_time\": %zd,\n", ssv);
+ } else if (opt_purge == purge_mode_decay) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Unused dirty page decay time: %zd%s\n",
+ ssv, (ssv < 0) ? " (no decay)" : "");
+ }
+
+ CTL_GET("arenas.quantum", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"quantum\": %zu,\n", sv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+
+ CTL_GET("arenas.page", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"page\": %zu,\n", sv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+
+ if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"tcache_max\": %zu,\n", sv);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Maximum thread-cached size class: %zu\n", sv);
+ }
+ }
+
+ if (json) {
+ unsigned nbins, nlruns, nhchunks, i;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nbins\": %u,\n", nbins);
+
+ CTL_GET("arenas.nhbins", &uv, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nhbins\": %u,\n", uv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"bin\": [\n");
+ for (i = 0; i < nbins; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu,\n", sv);
+
+ CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
+
+ CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"run_size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t],\n");
+
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nlruns\": %u,\n", nlruns);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lrun\": [\n");
+ for (i = 0; i < nlruns; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t],\n");
+
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nhchunks\": %u,\n", nhchunks);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"hchunk\": [\n");
+ for (i = 0; i < nhchunks; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t]\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+
+ /* prof. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"prof\": {\n");
+
+ CTL_GET("prof.thread_active_init", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
+ "false");
+
+ CTL_GET("prof.active", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.gdump", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.interval", &u64v, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"interval\": %"FMTu64",\n", u64v);
+
+ CTL_GET("prof.lg_sample", &ssv, ssize_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lg_sample\": %zd\n", ssv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
+ "");
+ }
+}
+
+static void
+stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
+{
+ size_t *cactive;
+ size_t allocated, active, metadata, resident, mapped, retained;
+
+ CTL_GET("stats.cactive", &cactive, size_t *);
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
+ CTL_GET("stats.retained", &retained, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"allocated\": %zu,\n", allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %zu,\n", active);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"metadata\": %zu,\n", metadata);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"resident\": %zu,\n", resident);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"mapped\": %zu,\n", mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"retained\": %zu\n", retained);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (merged || unmerged) ? "," : "");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Allocated: %zu, active: %zu, metadata: %zu,"
+ " resident: %zu, mapped: %zu, retained: %zu\n",
+ allocated, active, metadata, resident, mapped, retained);
+ malloc_cprintf(write_cb, cbopaque,
+ "Current active ceiling: %zu\n",
+ atomic_read_z(cactive));
+ }
+
+ if (merged || unmerged) {
+ unsigned narenas;
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats.arenas\": {\n");
+ }
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i, j, ninitialized;
+
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", (void *)initialized,
+ &isz, NULL, 0);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ if (initialized[i])
+ ninitialized++;
+ }
+
+ /* Merged stats. */
+ if (merged && (ninitialized > 1 || !unmerged)) {
+ /* Print merged arena stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"merged\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "\nMerged arenas stats:\n");
+ }
+ stats_arena_print(write_cb, cbopaque, json,
+ narenas, bins, large, huge);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t}%s\n", (ninitialized > 1) ?
+ "," : "");
+ }
+ }
+
+ /* Unmerged stats. */
+ for (i = j = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ if (json) {
+ j++;
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t\"%u\": {\n", i);
+ } else {
+ malloc_cprintf(write_cb,
+ cbopaque, "\narenas[%u]:\n",
+ i);
+ }
+ stats_arena_print(write_cb, cbopaque,
+ json, i, bins, large, huge);
+ if (json) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t}%s\n", (j <
+ ninitialized) ? "," : "");
+ }
+ }
+ }
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}\n");
+ }
+ }
+}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+ bool json = false;
+ bool general = true;
+ bool merged = true;
+ bool unmerged = true;
+ bool bins = true;
+ bool large = true;
+ bool huge = true;
+
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
+
+ if (opts != NULL) {
+ unsigned i;
+
+ for (i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+ case 'J':
+ json = true;
+ break;
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ case 'h':
+ huge = false;
+ break;
+ default:;
+ }
+ }
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "{\n"
+ "\t\"jemalloc\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
+ }
+
+ if (general)
+ stats_general_print(write_cb, cbopaque, json, merged, unmerged);
+ if (config_stats) {
+ stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
+ bins, large, huge);
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t}\n"
+ "}\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "--- End jemalloc statistics ---\n");
+ }
+}
diff --git a/memory/jemalloc/src/src/tcache.c b/memory/jemalloc/src/src/tcache.c
new file mode 100644
index 000000000..f97aa420c
--- /dev/null
+++ b/memory/jemalloc/src/src/tcache.c
@@ -0,0 +1,555 @@
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_tcache = true;
+ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+
+tcache_bin_info_t *tcache_bin_info;
+static unsigned stack_nelms; /* Total stack elms per tcache. */
+
+unsigned nhbins;
+size_t tcache_maxclass;
+
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
+
+/******************************************************************************/
+
+size_t
+tcache_salloc(tsdn_t *tsdn, const void *ptr)
+{
+
+ return (arena_salloc(tsdn, ptr, false));
+}
+
+void
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
+{
+ szind_t binind = tcache->next_gc_bin;
+ tcache_bin_t *tbin = &tcache->tbins[binind];
+ tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+
+ if (tbin->low_water > 0) {
+ /*
+ * Flush (ceiling) 3/4 of the objects below the low water mark.
+ */
+ if (binind < NBINS) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
+ } else {
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
+ }
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that the
+ * fill count is always at least 1.
+ */
+ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
+ tbin->lg_fill_div++;
+ } else if (tbin->low_water < 0) {
+ /*
+ * Increase fill count by 2X. Make sure lg_fill_div stays
+ * greater than 0.
+ */
+ if (tbin->lg_fill_div > 1)
+ tbin->lg_fill_div--;
+ }
+ tbin->low_water = tbin->ncached;
+
+ tcache->next_gc_bin++;
+ if (tcache->next_gc_bin == nhbins)
+ tcache->next_gc_bin = 0;
+}
+
+void *
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
+{
+ void *ret;
+
+ arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
+ tcache->prof_accumbytes : 0);
+ if (config_prof)
+ tcache->prof_accumbytes = 0;
+ ret = tcache_alloc_easy(tbin, tcache_success);
+
+ return (ret);
+}
+
+void
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem)
+{
+ arena_t *arena;
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < NBINS);
+ assert(rem <= tbin->ncached);
+
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena bin associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ *(tbin->avail - 1));
+ arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+ arena_bin_t *bin = &bin_arena->bins[binind];
+
+ if (config_prof && bin_arena == arena) {
+ if (arena_prof_accum(tsd_tsdn(tsd), arena,
+ tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
+ tcache->prof_accumbytes = 0;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (config_stats && bin_arena == arena) {
+ assert(!merged_stats);
+ merged_stats = true;
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (extent_node_arena_get(&chunk->node) == bin_arena) {
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_bits_t *bitselm =
+ arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
+ bin_arena, chunk, ptr, bitselm);
+ } else {
+ /*
+ * This object was allocated via a different
+ * arena bin than the one that is currently
+ * locked. Stash the object, so that it can be
+ * handled in a future pass.
+ */
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ }
+ if (config_stats && !merged_stats) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_bin_t *bin = &arena->bins[binind];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache)
+{
+ arena_t *arena;
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < nhbins);
+ assert(rem <= tbin->ncached);
+
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ *(tbin->avail - 1));
+ arena_t *locked_arena = extent_node_arena_get(&chunk->node);
+ UNUSED bool idump;
+
+ if (config_prof)
+ idump = false;
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
+ if ((config_prof || config_stats) && locked_arena == arena) {
+ if (config_prof) {
+ idump = arena_prof_accum_locked(arena,
+ tcache->prof_accumbytes);
+ tcache->prof_accumbytes = 0;
+ }
+ if (config_stats) {
+ merged_stats = true;
+ arena->stats.nrequests_large +=
+ tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (extent_node_arena_get(&chunk->node) ==
+ locked_arena) {
+ arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
+ locked_arena, chunk, ptr);
+ } else {
+ /*
+ * This object was allocated via a different
+ * arena than the one that is currently locked.
+ * Stash the object, so that it can be handled
+ * in a future pass.
+ */
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
+ if (config_prof && idump)
+ prof_idump(tsd_tsdn(tsd));
+ arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
+ ndeferred);
+ }
+ if (config_stats && !merged_stats) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ }
+
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+static void
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Link into list of extant tcaches. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+}
+
+static void
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Unlink from list of extant tcaches. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ tcache_stats_merge(tsdn, tcache, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+}
+
+void
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tsdn, tcache, oldarena);
+ tcache_arena_associate(tsdn, tcache, newarena);
+}
+
+tcache_t *
+tcache_get_hard(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ if (!tcache_enabled_get()) {
+ if (tsd_nominal(tsd))
+ tcache_enabled_set(false); /* Memoize. */
+ return (NULL);
+ }
+ arena = arena_choose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (NULL);
+ return (tcache_create(tsd_tsdn(tsd), arena));
+}
+
+tcache_t *
+tcache_create(tsdn_t *tsdn, arena_t *arena)
+{
+ tcache_t *tcache;
+ size_t size, stack_offset;
+ unsigned i;
+
+ size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ /* Naturally align the pointer stacks. */
+ size = PTR_CEILING(size);
+ stack_offset = size;
+ size += stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+
+ tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
+ arena_get(TSDN_NULL, 0, true));
+ if (tcache == NULL)
+ return (NULL);
+
+ tcache_arena_associate(tsdn, tcache, arena);
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ for (i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+ (uintptr_t)stack_offset);
+ }
+
+ return (tcache);
+}
+
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache)
+{
+ arena_t *arena;
+ unsigned i;
+
+ arena = arena_choose(tsd, NULL);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
+
+ for (i = 0; i < NBINS; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+ }
+
+ for (; i < nhbins; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[i - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ }
+ }
+
+ if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
+
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+}
+
+void
+tcache_cleanup(tsd_t *tsd)
+{
+ tcache_t *tcache;
+
+ if (!config_tcache)
+ return;
+
+ if ((tcache = tsd_tcache_get(tsd)) != NULL) {
+ tcache_destroy(tsd, tcache);
+ tsd_tcache_set(tsd, NULL);
+ }
+}
+
+void
+tcache_enabled_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+ unsigned i;
+
+ cassert(config_stats);
+
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
+ /* Merge and reset tcache stats. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ tbin->tstats.nrequests = 0;
+ }
+
+ for (; i < nhbins; i++) {
+ malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ lstats->nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+}
+
+bool
+tcaches_create(tsd_t *tsd, unsigned *r_ind)
+{
+ arena_t *arena;
+ tcache_t *tcache;
+ tcaches_t *elm;
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
+ (MALLOCX_TCACHE_MAX+1));
+ if (tcaches == NULL)
+ return (true);
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+ return (true);
+ arena = arena_ichoose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (true);
+ tcache = tcache_create(tsd_tsdn(tsd), arena);
+ if (tcache == NULL)
+ return (true);
+
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = (unsigned)(elm - tcaches);
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+
+ return (false);
+}
+
+static void
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
+{
+
+ if (elm->tcache == NULL)
+ return;
+ tcache_destroy(tsd, elm->tcache);
+ elm->tcache = NULL;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind)
+{
+
+ tcaches_elm_flush(tsd, &tcaches[ind]);
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ tcaches_elm_flush(tsd, elm);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+}
+
+bool
+tcache_boot(tsdn_t *tsdn)
+{
+ unsigned i;
+
+ /*
+ * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
+ * known.
+ */
+ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ tcache_maxclass = SMALL_MAXCLASS;
+ else if ((1U << opt_lg_tcache_max) > large_maxclass)
+ tcache_maxclass = large_maxclass;
+ else
+ tcache_maxclass = (1U << opt_lg_tcache_max);
+
+ nhbins = size2index(tcache_maxclass) + 1;
+
+ /* Initialize tcache_bin_info. */
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
+ sizeof(tcache_bin_info_t));
+ if (tcache_bin_info == NULL)
+ return (true);
+ stack_nelms = 0;
+ for (i = 0; i < NBINS; i++) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (arena_bin_info[i].nregs << 1);
+ } else {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MAX;
+ }
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+
+ return (false);
+}
diff --git a/memory/jemalloc/src/src/ticker.c b/memory/jemalloc/src/src/ticker.c
new file mode 100644
index 000000000..db0902404
--- /dev/null
+++ b/memory/jemalloc/src/src/ticker.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_TICKER_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/memory/jemalloc/src/src/tsd.c b/memory/jemalloc/src/src/tsd.c
new file mode 100644
index 000000000..ec69a51c3
--- /dev/null
+++ b/memory/jemalloc/src/src/tsd.c
@@ -0,0 +1,197 @@
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
+malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
+
+/******************************************************************************/
+
+void *
+malloc_tsd_malloc(size_t size)
+{
+
+ return (a0malloc(CACHELINE_CEILING(size)));
+}
+
+void
+malloc_tsd_dalloc(void *wrapper)
+{
+
+ a0dalloc(wrapper);
+}
+
+void
+malloc_tsd_no_cleanup(void *arg)
+{
+
+ not_reached();
+}
+
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+#ifndef _WIN32
+JEMALLOC_EXPORT
+#endif
+void
+_malloc_thread_cleanup(void)
+{
+ bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
+ unsigned i;
+
+ for (i = 0; i < ncleanups; i++)
+ pending[i] = true;
+
+ do {
+ again = false;
+ for (i = 0; i < ncleanups; i++) {
+ if (pending[i]) {
+ pending[i] = cleanups[i]();
+ if (pending[i])
+ again = true;
+ }
+ }
+ } while (again);
+}
+#endif
+
+void
+malloc_tsd_cleanup_register(bool (*f)(void))
+{
+
+ assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
+ cleanups[ncleanups] = f;
+ ncleanups++;
+}
+
+void
+tsd_cleanup(void *arg)
+{
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd->state) {
+ case tsd_state_uninitialized:
+ /* Do nothing. */
+ break;
+ case tsd_state_nominal:
+#define O(n, t) \
+ n##_cleanup(tsd);
+MALLOC_TSD
+#undef O
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ case tsd_state_reincarnated:
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset state to tsd_state_purgatory and request
+ * another callback.
+ */
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ default:
+ not_reached();
+ }
+}
+
+tsd_t *
+malloc_tsd_boot0(void)
+{
+ tsd_t *tsd;
+
+ ncleanups = 0;
+ if (tsd_boot0())
+ return (NULL);
+ tsd = tsd_fetch();
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ return (tsd);
+}
+
+void
+malloc_tsd_boot1(void)
+{
+
+ tsd_boot1();
+ *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+}
+
+#ifdef _WIN32
+static BOOL WINAPI
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+
+ switch (fdwReason) {
+#ifdef JEMALLOC_LAZY_LOCK
+ case DLL_THREAD_ATTACH:
+ isthreaded = true;
+ break;
+#endif
+ case DLL_THREAD_DETACH:
+ _malloc_thread_cleanup();
+ break;
+ default:
+ break;
+ }
+ return (true);
+}
+
+#ifdef _MSC_VER
+# ifdef _M_IX86
+# pragma comment(linker, "/INCLUDE:__tls_used")
+# pragma comment(linker, "/INCLUDE:_tls_callback")
+# else
+# pragma comment(linker, "/INCLUDE:_tls_used")
+# pragma comment(linker, "/INCLUDE:tls_callback")
+# endif
+# pragma section(".CRT$XLY",long,read)
+#endif
+JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
+BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
+ DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
+#endif
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+ pthread_t self = pthread_self();
+ tsd_init_block_t *iter;
+
+ /* Check whether this thread has already inserted into the list. */
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
+ ql_foreach(iter, &head->blocks, link) {
+ if (iter->thread == self) {
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return (iter->data);
+ }
+ }
+ /* Insert block into list. */
+ ql_elm_new(block, link);
+ block->thread = self;
+ ql_tail_insert(&head->blocks, block, link);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return (NULL);
+}
+
+void
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
+ ql_remove(&head->blocks, block, link);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+}
+#endif
diff --git a/memory/jemalloc/src/src/util.c b/memory/jemalloc/src/src/util.c
new file mode 100644
index 000000000..79052674f
--- /dev/null
+++ b/memory/jemalloc/src/src/util.c
@@ -0,0 +1,666 @@
+/*
+ * Define simple versions of assertion macros that won't recurse in case
+ * of assertion failures in malloc_*printf().
+ */
+#define assert(e) do { \
+ if (config_debug && !(e)) { \
+ malloc_write("<jemalloc>: Failed assertion\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Unreachable code reached\n"); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Not implemented\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define JEMALLOC_UTIL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+ size_t *slen_p);
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+ size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+static void
+wrtmessage(void *cbopaque, const char *s)
+{
+
+#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write)
+ /*
+ * Use syscall(2) rather than write(2) when possible in order to avoid
+ * the possibility of memory allocation within libc. This is necessary
+ * on FreeBSD; most operating systems do not have this problem though.
+ *
+ * syscall() returns long or int, depending on platform, so capture the
+ * unused result in the widest plausible type to avoid compiler
+ * warnings.
+ */
+ UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+#else
+ UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
+#endif
+}
+
+JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
+
+/*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
+ */
+void
+malloc_write(const char *s)
+{
+
+ if (je_malloc_message != NULL)
+ je_malloc_message(NULL, s);
+ else
+ wrtmessage(NULL, s);
+}
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(int err, char *buf, size_t buflen)
+{
+
+#ifdef _WIN32
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
+ (LPSTR)buf, (DWORD)buflen, NULL);
+ return (0);
+#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
+ char *b = strerror_r(err, buf, buflen);
+ if (b != buf) {
+ strncpy(buf, b, buflen);
+ buf[buflen-1] = '\0';
+ }
+ return (0);
+#else
+ return (strerror_r(err, buf, buflen));
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
+{
+ uintmax_t ret, digit;
+ unsigned b;
+ bool neg;
+ const char *p, *ns;
+
+ p = nptr;
+ if (base < 0 || base == 1 || base > 36) {
+ ns = p;
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ b = base;
+
+ /* Swallow leading whitespace and get sign, if any. */
+ neg = false;
+ while (true) {
+ switch (*p) {
+ case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+ p++;
+ break;
+ case '-':
+ neg = true;
+ /* Fall through. */
+ case '+':
+ p++;
+ /* Fall through. */
+ default:
+ goto label_prefix;
+ }
+ }
+
+ /* Get prefix, if any. */
+ label_prefix:
+ /*
+ * Note where the first non-whitespace/sign character is so that it is
+ * possible to tell whether any digits are consumed (e.g., " 0" vs.
+ * " -x").
+ */
+ ns = p;
+ if (*p == '0') {
+ switch (p[1]) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7':
+ if (b == 0)
+ b = 8;
+ if (b == 8)
+ p++;
+ break;
+ case 'X': case 'x':
+ switch (p[2]) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ if (b == 0)
+ b = 16;
+ if (b == 16)
+ p += 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ p++;
+ ret = 0;
+ goto label_return;
+ }
+ }
+ if (b == 0)
+ b = 10;
+
+ /* Convert. */
+ ret = 0;
+ while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+ || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+ || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+ uintmax_t pret = ret;
+ ret *= b;
+ ret += digit;
+ if (ret < pret) {
+ /* Overflow. */
+ set_errno(ERANGE);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ p++;
+ }
+ if (neg)
+ ret = -ret;
+
+ if (p == ns) {
+ /* No conversion performed. */
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+
+label_return:
+ if (endptr != NULL) {
+ if (p == ns) {
+ /* No characters were converted. */
+ *endptr = (char *)nptr;
+ } else
+ *endptr = (char *)p;
+ }
+ return (ret);
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
+{
+ unsigned i;
+
+ i = U2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
+ } while (x > 0);
+ break;
+ case 16: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEF"
+ : "0123456789abcdef";
+
+ do {
+ i--;
+ s[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ } default: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ assert(base >= 2 && base <= 36);
+ do {
+ i--;
+ s[i] = digits[x % (uint64_t)base];
+ x /= (uint64_t)base;
+ } while (x > 0);
+ }}
+
+ *slen_p = U2S_BUFSIZE - 1 - i;
+ return (&s[i]);
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p)
+{
+ bool neg;
+
+ if ((neg = (x < 0)))
+ x = -x;
+ s = u2s(x, 10, false, s, slen_p);
+ if (neg)
+ sign = '-';
+ switch (sign) {
+ case '-':
+ if (!neg)
+ break;
+ /* Fall through. */
+ case ' ':
+ case '+':
+ s--;
+ (*slen_p)++;
+ *s = sign;
+ break;
+ default: not_reached();
+ }
+ return (s);
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 8, false, s, slen_p);
+ if (alt_form && *s != '0') {
+ s--;
+ (*slen_p)++;
+ *s = '0';
+ }
+ return (s);
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 16, uppercase, s, slen_p);
+ if (alt_form) {
+ s -= 2;
+ (*slen_p) += 2;
+ memcpy(s, uppercase ? "0X" : "0x", 2);
+ }
+ return (s);
+}
+
+size_t
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ size_t i;
+ const char *f;
+
+#define APPEND_C(c) do { \
+ if (i < size) \
+ str[i] = (c); \
+ i++; \
+} while (0)
+#define APPEND_S(s, slen) do { \
+ if (i < size) { \
+ size_t cpylen = (slen <= size - i) ? slen : size - i; \
+ memcpy(&str[i], s, cpylen); \
+ } \
+ i += slen; \
+} while (0)
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+ /* Left padding. */ \
+ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
+ (size_t)width - slen : 0); \
+ if (!left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+ /* Value. */ \
+ APPEND_S(s, slen); \
+ /* Right padding. */ \
+ if (left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do { \
+ switch (len) { \
+ case '?': \
+ val = va_arg(ap, int); \
+ break; \
+ case '?' | 0x80: \
+ val = va_arg(ap, unsigned int); \
+ break; \
+ case 'l': \
+ val = va_arg(ap, long); \
+ break; \
+ case 'l' | 0x80: \
+ val = va_arg(ap, unsigned long); \
+ break; \
+ case 'q': \
+ val = va_arg(ap, long long); \
+ break; \
+ case 'q' | 0x80: \
+ val = va_arg(ap, unsigned long long); \
+ break; \
+ case 'j': \
+ val = va_arg(ap, intmax_t); \
+ break; \
+ case 'j' | 0x80: \
+ val = va_arg(ap, uintmax_t); \
+ break; \
+ case 't': \
+ val = va_arg(ap, ptrdiff_t); \
+ break; \
+ case 'z': \
+ val = va_arg(ap, ssize_t); \
+ break; \
+ case 'z' | 0x80: \
+ val = va_arg(ap, size_t); \
+ break; \
+ case 'p': /* Synthetic; used for %p. */ \
+ val = va_arg(ap, uintptr_t); \
+ break; \
+ default: \
+ not_reached(); \
+ val = 0; \
+ } \
+} while (0)
+
+ i = 0;
+ f = format;
+ while (true) {
+ switch (*f) {
+ case '\0': goto label_out;
+ case '%': {
+ bool alt_form = false;
+ bool left_justify = false;
+ bool plus_space = false;
+ bool plus_plus = false;
+ int prec = -1;
+ int width = -1;
+ unsigned char len = '?';
+ char *s;
+ size_t slen;
+
+ f++;
+ /* Flags. */
+ while (true) {
+ switch (*f) {
+ case '#':
+ assert(!alt_form);
+ alt_form = true;
+ break;
+ case '-':
+ assert(!left_justify);
+ left_justify = true;
+ break;
+ case ' ':
+ assert(!plus_space);
+ plus_space = true;
+ break;
+ case '+':
+ assert(!plus_plus);
+ plus_plus = true;
+ break;
+ default: goto label_width;
+ }
+ f++;
+ }
+ /* Width. */
+ label_width:
+ switch (*f) {
+ case '*':
+ width = va_arg(ap, int);
+ f++;
+ if (width < 0) {
+ left_justify = true;
+ width = -width;
+ }
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uwidth;
+ set_errno(0);
+ uwidth = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uwidth != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ width = (int)uwidth;
+ break;
+ } default:
+ break;
+ }
+ /* Width/precision separator. */
+ if (*f == '.')
+ f++;
+ else
+ goto label_length;
+ /* Precision. */
+ switch (*f) {
+ case '*':
+ prec = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uprec;
+ set_errno(0);
+ uprec = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uprec != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ prec = (int)uprec;
+ break;
+ }
+ default: break;
+ }
+ /* Length. */
+ label_length:
+ switch (*f) {
+ case 'l':
+ f++;
+ if (*f == 'l') {
+ len = 'q';
+ f++;
+ } else
+ len = 'l';
+ break;
+ case 'q': case 'j': case 't': case 'z':
+ len = *f;
+ f++;
+ break;
+ default: break;
+ }
+ /* Conversion specifier. */
+ switch (*f) {
+ case '%':
+ /* %% */
+ APPEND_C(*f);
+ f++;
+ break;
+ case 'd': case 'i': {
+ intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[D2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = d2s(val, (plus_plus ? '+' : (plus_space ?
+ ' ' : '-')), buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'o': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[O2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = o2s(val, alt_form, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'u': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[U2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = u2s(val, 10, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'x': case 'X': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = x2s(val, alt_form, *f == 'X', buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'c': {
+ unsigned char val;
+ char buf[2];
+
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ val = va_arg(ap, int);
+ buf[0] = val;
+ buf[1] = '\0';
+ APPEND_PADDED_S(buf, 1, width, left_justify);
+ f++;
+ break;
+ } case 's':
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ s = va_arg(ap, char *);
+ slen = (prec < 0) ? strlen(s) : (size_t)prec;
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ case 'p': {
+ uintmax_t val;
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, 'p');
+ s = x2s(val, true, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } default: not_reached();
+ }
+ break;
+ } default: {
+ APPEND_C(*f);
+ f++;
+ break;
+ }}
+ }
+ label_out:
+ if (i < size)
+ str[i] = '\0';
+ else
+ str[size - 1] = '\0';
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+ return (i);
+}
+
+JEMALLOC_FORMAT_PRINTF(3, 4)
+size_t
+malloc_snprintf(char *str, size_t size, const char *format, ...)
+{
+ size_t ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = malloc_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap)
+{
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = (je_malloc_message != NULL) ? je_malloc_message :
+ wrtmessage;
+ cbopaque = NULL;
+ }
+
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_FORMAT_PRINTF(3, 4)
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(write_cb, cbopaque, format, ap);
+ va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+malloc_printf(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+}
+
+/*
+ * Restore normal assertion macros, in order to make it possible to compile all
+ * C files as a single concatenation.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#include "jemalloc/internal/assert.h"
diff --git a/memory/jemalloc/src/src/valgrind.c b/memory/jemalloc/src/src/valgrind.c
new file mode 100644
index 000000000..8e7ef3a2e
--- /dev/null
+++ b/memory/jemalloc/src/src/valgrind.c
@@ -0,0 +1,34 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_VALGRIND
+# error "This source file is for Valgrind integration."
+#endif
+
+#include <valgrind/memcheck.h>
+
+void
+valgrind_make_mem_noaccess(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
+}
+
+void
+valgrind_make_mem_undefined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
+}
+
+void
+valgrind_make_mem_defined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
+}
+
+void
+valgrind_freelike_block(void *ptr, size_t usize)
+{
+
+ VALGRIND_FREELIKE_BLOCK(ptr, usize);
+}
diff --git a/memory/jemalloc/src/src/witness.c b/memory/jemalloc/src/src/witness.c
new file mode 100644
index 000000000..23753f246
--- /dev/null
+++ b/memory/jemalloc/src/src/witness.c
@@ -0,0 +1,136 @@
+#define JEMALLOC_WITNESS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+void
+witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp)
+{
+
+ witness->name = name;
+ witness->rank = rank;
+ witness->comp = comp;
+}
+
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
+#endif
+void
+witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf(" %s(%u)\n", witness->name, witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
+#endif
+void
+witness_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
+#endif
+void
+witness_not_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+witness_not_owner_error_t *witness_not_owner_error =
+ JEMALLOC_N(n_witness_not_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
+#endif
+void
+witness_lockless_error(const witness_list_t *witnesses)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Should not own any locks:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf("\n");
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
+witness_lockless_error_t *witness_lockless_error =
+ JEMALLOC_N(n_witness_lockless_error);
+#endif
+
+void
+witnesses_cleanup(tsd_t *tsd)
+{
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ /* Do nothing. */
+}
+
+void
+witness_fork_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+witness_prefork(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, true);
+}
+
+void
+witness_postfork_parent(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, false);
+}
+
+void
+witness_postfork_child(tsd_t *tsd)
+{
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ witness_list_t *witnesses;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_new(witnesses);
+#endif
+ tsd_witness_fork_set(tsd, false);
+}
diff --git a/memory/jemalloc/src/src/zone.c b/memory/jemalloc/src/src/zone.c
new file mode 100644
index 000000000..0571920e4
--- /dev/null
+++ b/memory/jemalloc/src/src/zone.c
@@ -0,0 +1,330 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_ZONE
+# error "This source file is for zones on Darwin (OS X)."
+#endif
+
+/*
+ * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * We need to check whether it is present at runtime, thus the weak_import.
+ */
+extern malloc_zone_t *malloc_default_purgeable_zone(void)
+JEMALLOC_ATTR(weak_import);
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_zone_t *default_zone, *purgeable_zone;
+static malloc_zone_t jemalloc_zone;
+static struct malloc_introspection_t jemalloc_zone_introspect;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t zone_size(malloc_zone_t *zone, void *ptr);
+static void *zone_malloc(malloc_zone_t *zone, size_t size);
+static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
+static void *zone_valloc(malloc_zone_t *zone, size_t size);
+static void zone_free(malloc_zone_t *zone, void *ptr);
+static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ size_t size);
+static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
+ size_t size);
+#endif
+static void *zone_destroy(malloc_zone_t *zone);
+static size_t zone_good_size(malloc_zone_t *zone, size_t size);
+static void zone_force_lock(malloc_zone_t *zone);
+static void zone_force_unlock(malloc_zone_t *zone);
+
+/******************************************************************************/
+/*
+ * Functions.
+ */
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+
+ /*
+ * There appear to be places within Darwin (such as setenv(3)) that
+ * cause calls to this function with pointers that *no* zone owns. If
+ * we knew that all pointers were owned by *some* zone, we could split
+ * our zone into two parts, and use one as the default allocator and
+ * the other as the default deallocator/reallocator. Since that will
+ * not work in practice, we must check all pointers to assure that they
+ * reside within a mapped chunk before determining size.
+ */
+ return (ivsalloc(tsdn_fetch(), ptr, config_prof));
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+ return (je_malloc(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+ return (je_calloc(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, PAGE, size);
+
+ return (ret);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+
+ if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+ if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
+ return (je_realloc(ptr, size));
+
+ return (realloc(ptr, size));
+}
+
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, alignment, size);
+
+ return (ret);
+}
+#endif
+
+#if (JEMALLOC_ZONE_VERSION >= 6)
+static void
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ size_t alloc_size;
+
+ alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
+ if (alloc_size != 0) {
+ assert(alloc_size == size);
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+#endif
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+ /* This function should never be called. */
+ not_reached();
+ return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+
+ if (size == 0)
+ size = 1;
+ return (s2u(size));
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+
+ if (isthreaded)
+ jemalloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+
+ /*
+ * Call jemalloc_postfork_child() rather than
+ * jemalloc_postfork_parent(), because this function is executed by both
+ * parent and child. The parent can tolerate having state
+ * reinitialized, but the child cannot unlock mutexes that were locked
+ * by the parent.
+ */
+ if (isthreaded)
+ jemalloc_postfork_child();
+}
+
+static void
+zone_init(void)
+{
+
+ jemalloc_zone.size = (void *)zone_size;
+ jemalloc_zone.malloc = (void *)zone_malloc;
+ jemalloc_zone.calloc = (void *)zone_calloc;
+ jemalloc_zone.valloc = (void *)zone_valloc;
+ jemalloc_zone.free = (void *)zone_free;
+ jemalloc_zone.realloc = (void *)zone_realloc;
+ jemalloc_zone.destroy = (void *)zone_destroy;
+ jemalloc_zone.zone_name = "jemalloc_zone";
+ jemalloc_zone.batch_malloc = NULL;
+ jemalloc_zone.batch_free = NULL;
+ jemalloc_zone.introspect = &jemalloc_zone_introspect;
+ jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
+#if (JEMALLOC_ZONE_VERSION >= 5)
+ jemalloc_zone.memalign = zone_memalign;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ jemalloc_zone.free_definite_size = zone_free_definite_size;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 8)
+ jemalloc_zone.pressure_relief = NULL;
+#endif
+
+ jemalloc_zone_introspect.enumerator = NULL;
+ jemalloc_zone_introspect.good_size = (void *)zone_good_size;
+ jemalloc_zone_introspect.check = NULL;
+ jemalloc_zone_introspect.print = NULL;
+ jemalloc_zone_introspect.log = NULL;
+ jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
+ jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
+ jemalloc_zone_introspect.statistics = NULL;
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ jemalloc_zone_introspect.zone_locked = NULL;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 7)
+ jemalloc_zone_introspect.enable_discharge_checking = NULL;
+ jemalloc_zone_introspect.disable_discharge_checking = NULL;
+ jemalloc_zone_introspect.discharge = NULL;
+# ifdef __BLOCKS__
+ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
+# else
+ jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
+# endif
+#endif
+}
+
+static malloc_zone_t *
+zone_default_get(void)
+{
+ malloc_zone_t **zones = NULL;
+ unsigned int num_zones = 0;
+
+ /*
+ * On OSX 10.12, malloc_default_zone returns a special zone that is not
+ * present in the list of registered zones. That zone uses a "lite zone"
+ * if one is present (apparently enabled when malloc stack logging is
+ * enabled), or the first registered zone otherwise. In practice this
+ * means unless malloc stack logging is enabled, the first registered
+ * zone is the default. So get the list of zones to get the first one,
+ * instead of relying on malloc_default_zone.
+ */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
+ (vm_address_t**)&zones, &num_zones)) {
+ /*
+ * Reset the value in case the failure happened after it was
+ * set.
+ */
+ num_zones = 0;
+ }
+
+ if (num_zones)
+ return (zones[0]);
+
+ return (malloc_default_zone());
+}
+
+/* As written, this function can only promote jemalloc_zone. */
+static void
+zone_promote(void)
+{
+ malloc_zone_t *zone;
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+ zone = zone_default_get();
+ } while (zone != &jemalloc_zone);
+}
+
+JEMALLOC_ATTR(constructor)
+void
+zone_register(void)
+{
+
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ default_zone = zone_default_get();
+ if (!default_zone->zone_name || strcmp(default_zone->zone_name,
+ "DefaultMallocZone") != 0)
+ return;
+
+ /*
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone() is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
+ */
+ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
+ malloc_default_purgeable_zone();
+
+ /* Register the custom zone. At this point it won't be the default. */
+ zone_init();
+ malloc_zone_register(&jemalloc_zone);
+
+ /* Promote the custom zone to be default. */
+ zone_promote();
+}
diff --git a/memory/jemalloc/src/test/include/test/SFMT-alti.h b/memory/jemalloc/src/test/include/test/SFMT-alti.h
new file mode 100644
index 000000000..0005df6b4
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-alti.h
@@ -0,0 +1,186 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-alti.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ * pseudorandom number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ */
+
+#ifndef SFMT_ALTI_H
+#define SFMT_ALTI_H
+
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE
+vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d) {
+
+ const vector unsigned int sl1 = ALTI_SL1;
+ const vector unsigned int sr1 = ALTI_SR1;
+#ifdef ONLY64
+ const vector unsigned int mask = ALTI_MSK64;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM64;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM64;
+#else
+ const vector unsigned int mask = ALTI_MSK;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM;
+#endif
+ vector unsigned int v, w, x, y, z;
+ x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+ v = a;
+ y = vec_sr(b, sr1);
+ z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+ w = vec_sl(d, sl1);
+ z = vec_xor(z, w);
+ y = vec_and(y, mask);
+ v = vec_xor(v, x);
+ z = vec_xor(z, y);
+ z = vec_xor(z, v);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j].s = array[j + size - N].s;
+ }
+ for (; i < size; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ ctx->sfmt[j++].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#ifndef ONLY64
+#if defined(__APPLE__)
+#define ALTI_SWAP (vector unsigned char) \
+ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
+#else
+#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
+#endif
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+JEMALLOC_INLINE void swap(w128_t *array, int size) {
+ int i;
+ const vector unsigned char perm = ALTI_SWAP;
+
+ for (i = 0; i < size; i++) {
+ array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+ }
+}
+#endif
+
+#endif
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params.h b/memory/jemalloc/src/test/include/test/SFMT-params.h
new file mode 100644
index 000000000..ade662220
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params.h
@@ -0,0 +1,132 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS_H
+#define SFMT_PARAMS_H
+
+#if !defined(MEXP)
+#ifdef __GNUC__
+ #warning "MEXP is not defined. I assume MEXP is 19937."
+#endif
+ #define MEXP 19937
+#endif
+/*-----------------
+ BASIC DEFINITIONS
+ -----------------*/
+/** Mersenne Exponent. The period of the sequence
+ * is a multiple of 2^MEXP-1.
+ * #define MEXP 19937 */
+/** SFMT generator has an internal state array of 128-bit integers,
+ * and N is its size. */
+#define N (MEXP / 128 + 1)
+/** N32 is the size of internal state array when regarded as an array
+ * of 32-bit integers.*/
+#define N32 (N * 4)
+/** N64 is the size of internal state array when regarded as an array
+ * of 64-bit integers.*/
+#define N64 (N * 2)
+
+/*----------------------
+ the parameters of SFMT
+ following definitions are in paramsXXXX.h file.
+ ----------------------*/
+/** the pick up position of the array.
+#define POS1 122
+*/
+
+/** the parameter of shift left as four 32-bit registers.
+#define SL1 18
+ */
+
+/** the parameter of shift left as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SL2 1
+*/
+
+/** the parameter of shift right as four 32-bit registers.
+#define SR1 11
+*/
+
+/** the parameter of shift right as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SR2 1
+*/
+
+/** A bitmask, used in the recursion. These parameters are introduced
+ * to break symmetry of SIMD.
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+*/
+
+/** These definitions are part of a 128-bit period certification vector.
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xc98e126aU
+*/
+
+#if MEXP == 607
+ #include "test/SFMT-params607.h"
+#elif MEXP == 1279
+ #include "test/SFMT-params1279.h"
+#elif MEXP == 2281
+ #include "test/SFMT-params2281.h"
+#elif MEXP == 4253
+ #include "test/SFMT-params4253.h"
+#elif MEXP == 11213
+ #include "test/SFMT-params11213.h"
+#elif MEXP == 19937
+ #include "test/SFMT-params19937.h"
+#elif MEXP == 44497
+ #include "test/SFMT-params44497.h"
+#elif MEXP == 86243
+ #include "test/SFMT-params86243.h"
+#elif MEXP == 132049
+ #include "test/SFMT-params132049.h"
+#elif MEXP == 216091
+ #include "test/SFMT-params216091.h"
+#else
+#ifdef __GNUC__
+ #error "MEXP is not valid."
+ #undef MEXP
+#else
+ #undef MEXP
+#endif
+
+#endif
+
+#endif /* SFMT_PARAMS_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params11213.h b/memory/jemalloc/src/test/include/test/SFMT-params11213.h
new file mode 100644
index 000000000..2994bd21d
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params11213.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS11213_H
+#define SFMT_PARAMS11213_H
+
+#define POS1 68
+#define SL1 14
+#define SL2 3
+#define SR1 7
+#define SR2 3
+#define MSK1 0xeffff7fbU
+#define MSK2 0xffffffefU
+#define MSK3 0xdfdfbfffU
+#define MSK4 0x7fffdbfdU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xe8148000U
+#define PARITY4 0xd0c7afa3U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
+
+#endif /* SFMT_PARAMS11213_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params1279.h b/memory/jemalloc/src/test/include/test/SFMT-params1279.h
new file mode 100644
index 000000000..d7959f980
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params1279.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS1279_H
+#define SFMT_PARAMS1279_H
+
+#define POS1 7
+#define SL1 14
+#define SL2 3
+#define SR1 5
+#define SR2 1
+#define MSK1 0xf7fefffdU
+#define MSK2 0x7fefcfffU
+#define MSK3 0xaff3ef3fU
+#define MSK4 0xb5ffff7fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x20000000U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
+
+#endif /* SFMT_PARAMS1279_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params132049.h b/memory/jemalloc/src/test/include/test/SFMT-params132049.h
new file mode 100644
index 000000000..a1dcec392
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params132049.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS132049_H
+#define SFMT_PARAMS132049_H
+
+#define POS1 110
+#define SL1 19
+#define SL2 1
+#define SR1 21
+#define SR2 1
+#define MSK1 0xffffbb5fU
+#define MSK2 0xfb6ebf95U
+#define MSK3 0xfffefffaU
+#define MSK4 0xcff77fffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xcb520000U
+#define PARITY4 0xc7e91c7dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
+
+#endif /* SFMT_PARAMS132049_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params19937.h b/memory/jemalloc/src/test/include/test/SFMT-params19937.h
new file mode 100644
index 000000000..fb92b4c9b
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params19937.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS19937_H
+#define SFMT_PARAMS19937_H
+
+#define POS1 122
+#define SL1 18
+#define SL2 1
+#define SR1 11
+#define SR2 1
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x13c9e684U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
+
+#endif /* SFMT_PARAMS19937_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params216091.h b/memory/jemalloc/src/test/include/test/SFMT-params216091.h
new file mode 100644
index 000000000..125ce2820
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params216091.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS216091_H
+#define SFMT_PARAMS216091_H
+
+#define POS1 627
+#define SL1 11
+#define SL2 3
+#define SR1 10
+#define SR2 1
+#define MSK1 0xbff7bff7U
+#define MSK2 0xbfffffffU
+#define MSK3 0xbffffa7fU
+#define MSK4 0xffddfbfbU
+#define PARITY1 0xf8000001U
+#define PARITY2 0x89e80709U
+#define PARITY3 0x3bd2b64bU
+#define PARITY4 0x0c64b1e4U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
+
+#endif /* SFMT_PARAMS216091_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params2281.h b/memory/jemalloc/src/test/include/test/SFMT-params2281.h
new file mode 100644
index 000000000..0ef85c407
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params2281.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS2281_H
+#define SFMT_PARAMS2281_H
+
+#define POS1 12
+#define SL1 19
+#define SL2 1
+#define SR1 5
+#define SR2 1
+#define MSK1 0xbff7ffbfU
+#define MSK2 0xfdfffffeU
+#define MSK3 0xf7ffef7fU
+#define MSK4 0xf2f7cbbfU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x41dfa600U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
+
+#endif /* SFMT_PARAMS2281_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params4253.h b/memory/jemalloc/src/test/include/test/SFMT-params4253.h
new file mode 100644
index 000000000..9f07bc67e
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params4253.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS4253_H
+#define SFMT_PARAMS4253_H
+
+#define POS1 17
+#define SL1 20
+#define SL2 1
+#define SR1 7
+#define SR2 1
+#define MSK1 0x9f7bffffU
+#define MSK2 0x9fffff5fU
+#define MSK3 0x3efffffbU
+#define MSK4 0xfffff7bbU
+#define PARITY1 0xa8000001U
+#define PARITY2 0xaf5390a3U
+#define PARITY3 0xb740b3f8U
+#define PARITY4 0x6c11486dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
+
+#endif /* SFMT_PARAMS4253_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params44497.h b/memory/jemalloc/src/test/include/test/SFMT-params44497.h
new file mode 100644
index 000000000..85598fed5
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params44497.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS44497_H
+#define SFMT_PARAMS44497_H
+
+#define POS1 330
+#define SL1 5
+#define SL2 3
+#define SR1 9
+#define SR2 3
+#define MSK1 0xeffffffbU
+#define MSK2 0xdfbebfffU
+#define MSK3 0xbfbf7befU
+#define MSK4 0x9ffd7bffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xa3ac4000U
+#define PARITY4 0xecc1327aU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
+
+#endif /* SFMT_PARAMS44497_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params607.h b/memory/jemalloc/src/test/include/test/SFMT-params607.h
new file mode 100644
index 000000000..bc76485f8
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params607.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS607_H
+#define SFMT_PARAMS607_H
+
+#define POS1 2
+#define SL1 15
+#define SL2 3
+#define SR1 13
+#define SR2 3
+#define MSK1 0xfdff37ffU
+#define MSK2 0xef7f3f7dU
+#define MSK3 0xff777b7dU
+#define MSK4 0x7ff7fb2fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x5986f054U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
+
+#endif /* SFMT_PARAMS607_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-params86243.h b/memory/jemalloc/src/test/include/test/SFMT-params86243.h
new file mode 100644
index 000000000..5e4d783c5
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-params86243.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS86243_H
+#define SFMT_PARAMS86243_H
+
+#define POS1 366
+#define SL1 6
+#define SL2 7
+#define SR1 19
+#define SR2 1
+#define MSK1 0xfdbffbffU
+#define MSK2 0xbff7ff3fU
+#define MSK3 0xfd77efffU
+#define MSK4 0xbf9ff3ffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xe9528d85U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
+ #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
+
+#endif /* SFMT_PARAMS86243_H */
diff --git a/memory/jemalloc/src/test/include/test/SFMT-sse2.h b/memory/jemalloc/src/test/include/test/SFMT-sse2.h
new file mode 100644
index 000000000..0314a163d
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT-sse2.h
@@ -0,0 +1,157 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-sse2.h
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * @note We assume LITTLE ENDIAN in this file
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+
+#ifndef SFMT_SSE2_H
+#define SFMT_SSE2_H
+
+/**
+ * This function represents the recursion formula.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @param mask 128-bit mask
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
+ __m128i c, __m128i d, __m128i mask) {
+ __m128i v, x, y, z;
+
+ x = _mm_load_si128(a);
+ y = _mm_srli_epi32(*b, SR1);
+ z = _mm_srli_si128(c, SR2);
+ v = _mm_slli_epi32(d, SL1);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, v);
+ x = _mm_slli_si128(x, SL2);
+ y = _mm_and_si128(y, mask);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, y);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ r = _mm_load_si128(&array[j + size - N].si);
+ _mm_store_si128(&ctx->sfmt[j].si, r);
+ }
+ for (; i < size; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ _mm_store_si128(&ctx->sfmt[j++].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#endif
diff --git a/memory/jemalloc/src/test/include/test/SFMT.h b/memory/jemalloc/src/test/include/test/SFMT.h
new file mode 100644
index 000000000..09c1607dd
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/SFMT.h
@@ -0,0 +1,171 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
+ * number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ *
+ * @note We assume that your system has inttypes.h. If your system
+ * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
+ * and you have to define PRIu64 and PRIx64 in this file as follows:
+ * @verbatim
+ typedef unsigned int uint32_t
+ typedef unsigned long long uint64_t
+ #define PRIu64 "llu"
+ #define PRIx64 "llx"
+@endverbatim
+ * uint32_t must be exactly 32-bit unsigned integer type (no more, no
+ * less), and uint64_t must be exactly 64-bit unsigned integer type.
+ * PRIu64 and PRIx64 are used for printf function to print 64-bit
+ * unsigned int and 64-bit unsigned int in hexadecimal format.
+ */
+
+#ifndef SFMT_H
+#define SFMT_H
+
+typedef struct sfmt_s sfmt_t;
+
+uint32_t gen_rand32(sfmt_t *ctx);
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
+uint64_t gen_rand64(sfmt_t *ctx);
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
+sfmt_t *init_gen_rand(uint32_t seed);
+sfmt_t *init_by_array(uint32_t *init_key, int key_length);
+void fini_gen_rand(sfmt_t *ctx);
+const char *get_idstring(void);
+int get_min_array_size32(void);
+int get_min_array_size64(void);
+
+#ifndef JEMALLOC_ENABLE_INLINE
+double to_real1(uint32_t v);
+double genrand_real1(sfmt_t *ctx);
+double to_real2(uint32_t v);
+double genrand_real2(sfmt_t *ctx);
+double to_real3(uint32_t v);
+double genrand_real3(sfmt_t *ctx);
+double to_res53(uint64_t v);
+double to_res53_mix(uint32_t x, uint32_t y);
+double genrand_res53(sfmt_t *ctx);
+double genrand_res53_mix(sfmt_t *ctx);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
+/* These real versions are due to Isaku Wada */
+/** generates a random number on [0,1]-real-interval */
+JEMALLOC_INLINE double to_real1(uint32_t v)
+{
+ return v * (1.0/4294967295.0);
+ /* divided by 2^32-1 */
+}
+
+/** generates a random number on [0,1]-real-interval */
+JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
+{
+ return to_real1(gen_rand32(ctx));
+}
+
+/** generates a random number on [0,1)-real-interval */
+JEMALLOC_INLINE double to_real2(uint32_t v)
+{
+ return v * (1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on [0,1)-real-interval */
+JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
+{
+ return to_real2(gen_rand32(ctx));
+}
+
+/** generates a random number on (0,1)-real-interval */
+JEMALLOC_INLINE double to_real3(uint32_t v)
+{
+ return (((double)v) + 0.5)*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on (0,1)-real-interval */
+JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
+{
+ return to_real3(gen_rand32(ctx));
+}
+/** These real versions are due to Isaku Wada */
+
+/** generates a random number on [0,1) with 53-bit resolution*/
+JEMALLOC_INLINE double to_res53(uint64_t v)
+{
+ return v * (1.0/18446744073709551616.0L);
+}
+
+/** generates a random number on [0,1) with 53-bit resolution from two
+ * 32 bit integers */
+JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y)
+{
+ return to_res53(x | ((uint64_t)y << 32));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ */
+JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx)
+{
+ return to_res53(gen_rand64(ctx));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ using 32bit integer.
+ */
+JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx)
+{
+ uint32_t x, y;
+
+ x = gen_rand32(ctx);
+ y = gen_rand32(ctx);
+ return to_res53_mix(x, y);
+}
+#endif
+#endif
diff --git a/memory/jemalloc/src/test/include/test/btalloc.h b/memory/jemalloc/src/test/include/test/btalloc.h
new file mode 100644
index 000000000..c3f9d4df7
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/btalloc.h
@@ -0,0 +1,31 @@
+/* btalloc() provides a mechanism for allocating via permuted backtraces. */
+void *btalloc(size_t size, unsigned bits);
+
+#define btalloc_n_proto(n) \
+void *btalloc_##n(size_t size, unsigned bits);
+btalloc_n_proto(0)
+btalloc_n_proto(1)
+
+#define btalloc_n_gen(n) \
+void * \
+btalloc_##n(size_t size, unsigned bits) \
+{ \
+ void *p; \
+ \
+ if (bits == 0) \
+ p = mallocx(size, 0); \
+ else { \
+ switch (bits & 0x1U) { \
+ case 0: \
+ p = (btalloc_0(size, bits >> 1)); \
+ break; \
+ case 1: \
+ p = (btalloc_1(size, bits >> 1)); \
+ break; \
+ default: not_reached(); \
+ } \
+ } \
+ /* Intentionally sabotage tail call optimization. */ \
+ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
+ return (p); \
+}
diff --git a/memory/jemalloc/src/test/include/test/jemalloc_test.h.in b/memory/jemalloc/src/test/include/test/jemalloc_test.h.in
new file mode 100644
index 000000000..1f36e4695
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/jemalloc_test.h.in
@@ -0,0 +1,163 @@
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+#include <string.h>
+#ifdef _WIN32
+# include "msvc_compat/strings.h"
+#endif
+
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+#else
+# include <pthread.h>
+#endif
+
+#include "test/jemalloc_test_defs.h"
+
+#ifdef JEMALLOC_OSSPIN
+# include <libkern/OSAtomic.h>
+#endif
+
+#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
+# include <altivec.h>
+#endif
+#ifdef HAVE_SSE2
+# include <emmintrin.h>
+#endif
+
+/******************************************************************************/
+/*
+ * For unit tests, expose all public and private interfaces.
+ */
+#ifdef JEMALLOC_UNIT_TEST
+# define JEMALLOC_JET
+# define JEMALLOC_MANGLE
+# include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/*
+ * For integration tests, expose the public jemalloc interfaces, but only
+ * expose the minimum necessary internal utility code (to avoid re-implementing
+ * essentially identical code within the test infrastructure).
+ */
+#elif defined(JEMALLOC_INTEGRATION_TEST)
+# define JEMALLOC_MANGLE
+# include "jemalloc/jemalloc@install_suffix@.h"
+# include "jemalloc/internal/jemalloc_internal_defs.h"
+# include "jemalloc/internal/jemalloc_internal_macros.h"
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "jemalloc/internal/private_namespace.h"
+
+# define JEMALLOC_H_TYPES
+# define JEMALLOC_H_STRUCTS
+# define JEMALLOC_H_EXTERNS
+# define JEMALLOC_H_INLINES
+# include "jemalloc/internal/nstime.h"
+# include "jemalloc/internal/util.h"
+# include "jemalloc/internal/qr.h"
+# include "jemalloc/internal/ql.h"
+# undef JEMALLOC_H_TYPES
+# undef JEMALLOC_H_STRUCTS
+# undef JEMALLOC_H_EXTERNS
+# undef JEMALLOC_H_INLINES
+
+/******************************************************************************/
+/*
+ * For stress tests, expose the public jemalloc interfaces with name mangling
+ * so that they can be tested as e.g. malloc() and free(). Also expose the
+ * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
+ * a separate allocator for their internal data structures.
+ */
+#elif defined(JEMALLOC_STRESS_TEST)
+# include "jemalloc/jemalloc@install_suffix@.h"
+
+# include "jemalloc/jemalloc_protos_jet.h"
+
+# define JEMALLOC_JET
+# include "jemalloc/internal/jemalloc_internal.h"
+# include "jemalloc/internal/public_unnamespace.h"
+# undef JEMALLOC_JET
+
+# include "jemalloc/jemalloc_rename.h"
+# define JEMALLOC_MANGLE
+# ifdef JEMALLOC_STRESS_TESTLIB
+# include "jemalloc/jemalloc_mangle_jet.h"
+# else
+# include "jemalloc/jemalloc_mangle.h"
+# endif
+
+/******************************************************************************/
+/*
+ * This header does dangerous things, the effects of which only test code
+ * should be subject to.
+ */
+#else
+# error "This header cannot be included outside a testing context"
+#endif
+
+/******************************************************************************/
+/*
+ * Common test utilities.
+ */
+#include "test/btalloc.h"
+#include "test/math.h"
+#include "test/mtx.h"
+#include "test/mq.h"
+#include "test/test.h"
+#include "test/timer.h"
+#include "test/thd.h"
+#define MEXP 19937
+#include "test/SFMT.h"
+
+/******************************************************************************/
+/*
+ * Define always-enabled assertion macros, so that test assertions execute even
+ * if assertions are disabled in the library code.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#undef assert_not_implemented
+
+#define assert(e) do { \
+ if (!(e)) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define not_implemented() do { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define assert_not_implemented(e) do { \
+ if (!(e)) \
+ not_implemented(); \
+} while (0)
diff --git a/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in b/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in
new file mode 100644
index 000000000..5cc8532a3
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in
@@ -0,0 +1,9 @@
+#include "jemalloc/internal/jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+/*
+ * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its
+ * dependencies are notoriously unportable in practice.
+ */
+#undef HAVE_SSE2
+#undef HAVE_ALTIVEC
diff --git a/memory/jemalloc/src/test/include/test/math.h b/memory/jemalloc/src/test/include/test/math.h
new file mode 100644
index 000000000..b057b29a1
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/math.h
@@ -0,0 +1,311 @@
+#ifndef JEMALLOC_ENABLE_INLINE
+double ln_gamma(double x);
+double i_gamma(double x, double p, double ln_gamma_p);
+double pt_norm(double p);
+double pt_chi2(double p, double df, double ln_gamma_df_2);
+double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
+/*
+ * Compute the natural log of Gamma(x), accurate to 10 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
+ * [S14]. Communications of the ACM 9(9):684.
+ */
+JEMALLOC_INLINE double
+ln_gamma(double x)
+{
+ double f, z;
+
+ assert(x > 0.0);
+
+ if (x < 7.0) {
+ f = 1.0;
+ z = x;
+ while (z < 7.0) {
+ f *= z;
+ z += 1.0;
+ }
+ x = z;
+ f = -log(f);
+ } else
+ f = 0.0;
+
+ z = 1.0 / (x * x);
+
+ return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
+ (((-0.000595238095238 * z + 0.000793650793651) * z -
+ 0.002777777777778) * z + 0.083333333333333) / x);
+}
+
+/*
+ * Compute the incomplete Gamma ratio for [0..x], where p is the shape
+ * parameter, and ln_gamma_p is ln_gamma(p).
+ *
+ * This implementation is based on:
+ *
+ * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
+ * Applied Statistics 19:285-287.
+ */
+JEMALLOC_INLINE double
+i_gamma(double x, double p, double ln_gamma_p)
+{
+ double acu, factor, oflo, gin, term, rn, a, b, an, dif;
+ double pn[6];
+ unsigned i;
+
+ assert(p > 0.0);
+ assert(x >= 0.0);
+
+ if (x == 0.0)
+ return (0.0);
+
+ acu = 1.0e-10;
+ oflo = 1.0e30;
+ gin = 0.0;
+ factor = exp(p * log(x) - x - ln_gamma_p);
+
+ if (x <= 1.0 || x < p) {
+ /* Calculation by series expansion. */
+ gin = 1.0;
+ term = 1.0;
+ rn = p;
+
+ while (true) {
+ rn += 1.0;
+ term *= x / rn;
+ gin += term;
+ if (term <= acu) {
+ gin *= factor / p;
+ return (gin);
+ }
+ }
+ } else {
+ /* Calculation by continued fraction. */
+ a = 1.0 - p;
+ b = a + x + 1.0;
+ term = 0.0;
+ pn[0] = 1.0;
+ pn[1] = x;
+ pn[2] = x + 1.0;
+ pn[3] = x * b;
+ gin = pn[2] / pn[3];
+
+ while (true) {
+ a += 1.0;
+ b += 2.0;
+ term += 1.0;
+ an = a * term;
+ for (i = 0; i < 2; i++)
+ pn[i+4] = b * pn[i+2] - an * pn[i];
+ if (pn[5] != 0.0) {
+ rn = pn[4] / pn[5];
+ dif = fabs(gin - rn);
+ if (dif <= acu && dif <= acu * rn) {
+ gin = 1.0 - factor * gin;
+ return (gin);
+ }
+ gin = rn;
+ }
+ for (i = 0; i < 4; i++)
+ pn[i] = pn[i+2];
+
+ if (fabs(pn[4]) >= oflo) {
+ for (i = 0; i < 4; i++)
+ pn[i] /= oflo;
+ }
+ }
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the normal distribution,
+ * compute the limit on the definite integral from [-inf..z] that satisfies p,
+ * accurate to 16 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
+ * distribution. Applied Statistics 37(3):477-484.
+ */
+JEMALLOC_INLINE double
+pt_norm(double p)
+{
+ double q, r, ret;
+
+ assert(p > 0.0 && p < 1.0);
+
+ q = p - 0.5;
+ if (fabs(q) <= 0.425) {
+ /* p close to 1/2. */
+ r = 0.180625 - q * q;
+ return (q * (((((((2.5090809287301226727e3 * r +
+ 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
+ r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
+ * r + 3.3871328727963666080e0) /
+ (((((((5.2264952788528545610e3 * r +
+ 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
+ r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
+ * r + 1.0));
+ } else {
+ if (q < 0.0)
+ r = p;
+ else
+ r = 1.0 - p;
+ assert(r > 0.0);
+
+ r = sqrt(-log(r));
+ if (r <= 5.0) {
+ /* p neither close to 1/2 nor 0 or 1. */
+ r -= 1.6;
+ ret = ((((((((7.74545014278341407640e-4 * r +
+ 2.27238449892691845833e-2) * r +
+ 2.41780725177450611770e-1) * r +
+ 1.27045825245236838258e0) * r +
+ 3.64784832476320460504e0) * r +
+ 5.76949722146069140550e0) * r +
+ 4.63033784615654529590e0) * r +
+ 1.42343711074968357734e0) /
+ (((((((1.05075007164441684324e-9 * r +
+ 5.47593808499534494600e-4) * r +
+ 1.51986665636164571966e-2)
+ * r + 1.48103976427480074590e-1) * r +
+ 6.89767334985100004550e-1) * r +
+ 1.67638483018380384940e0) * r +
+ 2.05319162663775882187e0) * r + 1.0));
+ } else {
+ /* p near 0 or 1. */
+ r -= 5.0;
+ ret = ((((((((2.01033439929228813265e-7 * r +
+ 2.71155556874348757815e-5) * r +
+ 1.24266094738807843860e-3) * r +
+ 2.65321895265761230930e-2) * r +
+ 2.96560571828504891230e-1) * r +
+ 1.78482653991729133580e0) * r +
+ 5.46378491116411436990e0) * r +
+ 6.65790464350110377720e0) /
+ (((((((2.04426310338993978564e-15 * r +
+ 1.42151175831644588870e-7) * r +
+ 1.84631831751005468180e-5) * r +
+ 7.86869131145613259100e-4) * r +
+ 1.48753612908506148525e-2) * r +
+ 1.36929880922735805310e-1) * r +
+ 5.99832206555887937690e-1)
+ * r + 1.0));
+ }
+ if (q < 0.0)
+ ret = -ret;
+ return (ret);
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
+ * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
+ * the upper limit on the definite integral from [0..z] that satisfies p,
+ * accurate to 12 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
+ * the Chi^2 distribution. Applied Statistics 24(3):385-388.
+ *
+ * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
+ * points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
+ */
+JEMALLOC_INLINE double
+pt_chi2(double p, double df, double ln_gamma_df_2)
+{
+ double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
+ unsigned i;
+
+ assert(p >= 0.0 && p < 1.0);
+ assert(df > 0.0);
+
+ e = 5.0e-7;
+ aa = 0.6931471805;
+
+ xx = 0.5 * df;
+ c = xx - 1.0;
+
+ if (df < -1.24 * log(p)) {
+ /* Starting approximation for small Chi^2. */
+ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
+ if (ch - e < 0.0)
+ return (ch);
+ } else {
+ if (df > 0.32) {
+ x = pt_norm(p);
+ /*
+ * Starting approximation using Wilson and Hilferty
+ * estimate.
+ */
+ p1 = 0.222222 / df;
+ ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
+ /* Starting approximation for p tending to 1. */
+ if (ch > 2.2 * df + 6.0) {
+ ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
+ ln_gamma_df_2);
+ }
+ } else {
+ ch = 0.4;
+ a = log(1.0 - p);
+ while (true) {
+ q = ch;
+ p1 = 1.0 + ch * (4.67 + ch);
+ p2 = ch * (6.73 + ch * (6.66 + ch));
+ t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
+ * (13.32 + 3.0 * ch)) / p2;
+ ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
+ c * aa) * p2 / p1) / t;
+ if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ /* Calculation of seven-term Taylor series. */
+ q = ch;
+ p1 = 0.5 * ch;
+ if (p1 < 0.0)
+ return (-1.0);
+ p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
+ t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
+ b = t / ch;
+ a = 0.5 * t - b * c;
+ s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
+ 60.0 * a))))) / 420.0;
+ s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
+ a)))) / 2520.0;
+ s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
+ s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
+ (889.0 + 1740.0 * a))) / 5040.0;
+ s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
+ s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
+ ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
+ - b * (s4 - b * (s5 - b * s6))))));
+ if (fabs(q / ch - 1.0) <= e)
+ break;
+ }
+
+ return (ch);
+}
+
+/*
+ * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
+ * compute the upper limit on the definite integral from [0..z] that satisfies
+ * p.
+ */
+JEMALLOC_INLINE double
+pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
+{
+
+ return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
+}
+#endif
diff --git a/memory/jemalloc/src/test/include/test/mq.h b/memory/jemalloc/src/test/include/test/mq.h
new file mode 100644
index 000000000..7c4df4931
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/mq.h
@@ -0,0 +1,109 @@
+void mq_nanosleep(unsigned ns);
+
+/*
+ * Simple templated message queue implementation that relies on only mutexes for
+ * synchronization (which reduces portability issues). Given the following
+ * setup:
+ *
+ * typedef struct mq_msg_s mq_msg_t;
+ * struct mq_msg_s {
+ * mq_msg(mq_msg_t) link;
+ * [message data]
+ * };
+ * mq_gen(, mq_, mq_t, mq_msg_t, link)
+ *
+ * The API is as follows:
+ *
+ * bool mq_init(mq_t *mq);
+ * void mq_fini(mq_t *mq);
+ * unsigned mq_count(mq_t *mq);
+ * mq_msg_t *mq_tryget(mq_t *mq);
+ * mq_msg_t *mq_get(mq_t *mq);
+ * void mq_put(mq_t *mq, mq_msg_t *msg);
+ *
+ * The message queue linkage embedded in each message is to be treated as
+ * externally opaque (no need to initialize or clean up externally). mq_fini()
+ * does not perform any cleanup of messages, since it knows nothing of their
+ * payloads.
+ */
+#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
+
+#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
+typedef struct { \
+ mtx_t lock; \
+ ql_head(a_mq_msg_type) msgs; \
+ unsigned count; \
+} a_mq_type; \
+a_attr bool \
+a_prefix##init(a_mq_type *mq) { \
+ \
+ if (mtx_init(&mq->lock)) \
+ return (true); \
+ ql_new(&mq->msgs); \
+ mq->count = 0; \
+ return (false); \
+} \
+a_attr void \
+a_prefix##fini(a_mq_type *mq) \
+{ \
+ \
+ mtx_fini(&mq->lock); \
+} \
+a_attr unsigned \
+a_prefix##count(a_mq_type *mq) \
+{ \
+ unsigned count; \
+ \
+ mtx_lock(&mq->lock); \
+ count = mq->count; \
+ mtx_unlock(&mq->lock); \
+ return (count); \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##tryget(a_mq_type *mq) \
+{ \
+ a_mq_msg_type *msg; \
+ \
+ mtx_lock(&mq->lock); \
+ msg = ql_first(&mq->msgs); \
+ if (msg != NULL) { \
+ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
+ mq->count--; \
+ } \
+ mtx_unlock(&mq->lock); \
+ return (msg); \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##get(a_mq_type *mq) \
+{ \
+ a_mq_msg_type *msg; \
+ unsigned ns; \
+ \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) \
+ return (msg); \
+ \
+ ns = 1; \
+ while (true) { \
+ mq_nanosleep(ns); \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) \
+ return (msg); \
+ if (ns < 1000*1000*1000) { \
+ /* Double sleep time, up to max 1 second. */ \
+ ns <<= 1; \
+ if (ns > 1000*1000*1000) \
+ ns = 1000*1000*1000; \
+ } \
+ } \
+} \
+a_attr void \
+a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \
+{ \
+ \
+ mtx_lock(&mq->lock); \
+ ql_elm_new(msg, a_field); \
+ ql_tail_insert(&mq->msgs, msg, a_field); \
+ mq->count++; \
+ mtx_unlock(&mq->lock); \
+}
diff --git a/memory/jemalloc/src/test/include/test/mtx.h b/memory/jemalloc/src/test/include/test/mtx.h
new file mode 100644
index 000000000..58afbc3d1
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/mtx.h
@@ -0,0 +1,23 @@
+/*
+ * mtx is a slightly simplified version of malloc_mutex. This code duplication
+ * is unfortunate, but there are allocator bootstrapping considerations that
+ * would leak into the test infrastructure if malloc_mutex were used directly
+ * in tests.
+ */
+
+typedef struct {
+#ifdef _WIN32
+ CRITICAL_SECTION lock;
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock lock;
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLock lock;
+#else
+ pthread_mutex_t lock;
+#endif
+} mtx_t;
+
+bool mtx_init(mtx_t *mtx);
+void mtx_fini(mtx_t *mtx);
+void mtx_lock(mtx_t *mtx);
+void mtx_unlock(mtx_t *mtx);
diff --git a/memory/jemalloc/src/test/include/test/test.h b/memory/jemalloc/src/test/include/test/test.h
new file mode 100644
index 000000000..c8112eb8b
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/test.h
@@ -0,0 +1,333 @@
+#define ASSERT_BUFSIZE 256
+
+#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
+ t a_ = (a); \
+ t b_ = (b); \
+ if (!(a_ cmp b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) "#cmp" (%s) --> " \
+ "%"pri" "#neg_cmp" %"pri": ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_, b_); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
+ ==, "p", __VA_ARGS__)
+#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
+ ==, "p", __VA_ARGS__)
+
+#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
+#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
+#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
+#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
+#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
+#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
+
+#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
+#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
+#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
+#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
+#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
+#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
+
+#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
+#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
+#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
+#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
+#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
+#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
+
+#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
+#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
+#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
+#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
+#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
+#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
+
+#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
+ !=, "ld", __VA_ARGS__)
+#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
+ ==, "ld", __VA_ARGS__)
+#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
+ >=, "ld", __VA_ARGS__)
+#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
+ >, "ld", __VA_ARGS__)
+#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
+ <, "ld", __VA_ARGS__)
+#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
+ <=, "ld", __VA_ARGS__)
+
+#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
+ a, b, ==, !=, "lu", __VA_ARGS__)
+#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
+ a, b, !=, ==, "lu", __VA_ARGS__)
+#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <, >=, "lu", __VA_ARGS__)
+#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <=, >, "lu", __VA_ARGS__)
+#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >=, <, "lu", __VA_ARGS__)
+#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >, <=, "lu", __VA_ARGS__)
+
+#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
+ !=, "qd", __VA_ARGS__)
+#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
+ ==, "qd", __VA_ARGS__)
+#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
+ >=, "qd", __VA_ARGS__)
+#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
+ >, "qd", __VA_ARGS__)
+#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
+ <, "qd", __VA_ARGS__)
+#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
+ <=, "qd", __VA_ARGS__)
+
+#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", __VA_ARGS__)
+#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", __VA_ARGS__)
+#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <, >=, "qu", __VA_ARGS__)
+#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <=, >, "qu", __VA_ARGS__)
+#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >=, <, "qu", __VA_ARGS__)
+#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >, <=, "qu", __VA_ARGS__)
+
+#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
+ !=, "jd", __VA_ARGS__)
+#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
+ ==, "jd", __VA_ARGS__)
+#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
+ >=, "jd", __VA_ARGS__)
+#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
+ >, "jd", __VA_ARGS__)
+#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
+ <, "jd", __VA_ARGS__)
+#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
+ <=, "jd", __VA_ARGS__)
+
+#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", __VA_ARGS__)
+#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", __VA_ARGS__)
+#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
+ >=, "ju", __VA_ARGS__)
+#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
+ >, "ju", __VA_ARGS__)
+#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
+ <, "ju", __VA_ARGS__)
+#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
+ <=, "ju", __VA_ARGS__)
+
+#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
+ !=, "zd", __VA_ARGS__)
+#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
+ ==, "zd", __VA_ARGS__)
+#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
+ >=, "zd", __VA_ARGS__)
+#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
+ >, "zd", __VA_ARGS__)
+#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
+ <, "zd", __VA_ARGS__)
+#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
+ <=, "zd", __VA_ARGS__)
+
+#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
+ !=, "zu", __VA_ARGS__)
+#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
+ ==, "zu", __VA_ARGS__)
+#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
+ >=, "zu", __VA_ARGS__)
+#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
+ >, "zu", __VA_ARGS__)
+#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
+ <, "zu", __VA_ARGS__)
+#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
+ <=, "zu", __VA_ARGS__)
+
+#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
+ !=, FMTd32, __VA_ARGS__)
+#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
+ ==, FMTd32, __VA_ARGS__)
+#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
+ >=, FMTd32, __VA_ARGS__)
+#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
+ >, FMTd32, __VA_ARGS__)
+#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
+ <, FMTd32, __VA_ARGS__)
+#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
+ <=, FMTd32, __VA_ARGS__)
+
+#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
+ !=, FMTu32, __VA_ARGS__)
+#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
+ ==, FMTu32, __VA_ARGS__)
+#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
+ >=, FMTu32, __VA_ARGS__)
+#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
+ >, FMTu32, __VA_ARGS__)
+#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
+ <, FMTu32, __VA_ARGS__)
+#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
+ <=, FMTu32, __VA_ARGS__)
+
+#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
+ !=, FMTd64, __VA_ARGS__)
+#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
+ ==, FMTd64, __VA_ARGS__)
+#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
+ >=, FMTd64, __VA_ARGS__)
+#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
+ >, FMTd64, __VA_ARGS__)
+#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
+ <, FMTd64, __VA_ARGS__)
+#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
+ <=, FMTd64, __VA_ARGS__)
+
+#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
+ !=, FMTu64, __VA_ARGS__)
+#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
+ ==, FMTu64, __VA_ARGS__)
+#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
+ >=, FMTu64, __VA_ARGS__)
+#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
+ >, FMTu64, __VA_ARGS__)
+#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
+ <, FMTu64, __VA_ARGS__)
+#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
+ <=, FMTu64, __VA_ARGS__)
+
+#define assert_b_eq(a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ == b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) == (%s) --> %s != %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_b_ne(a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ != b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) != (%s) --> %s == %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
+#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
+
+#define assert_str_eq(a, b, ...) do { \
+ if (strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) same as (%s) --> " \
+ "\"%s\" differs from \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_str_ne(a, b, ...) do { \
+ if (!strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) differs from (%s) --> " \
+ "\"%s\" same as \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_not_reached(...) do { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Unreachable code reached: ", \
+ __func__, __FILE__, __LINE__); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+} while (0)
+
+/*
+ * If this enum changes, corresponding changes in test/test.sh.in are also
+ * necessary.
+ */
+typedef enum {
+ test_status_pass = 0,
+ test_status_skip = 1,
+ test_status_fail = 2,
+
+ test_status_count = 3
+} test_status_t;
+
+typedef void (test_t)(void);
+
+#define TEST_BEGIN(f) \
+static void \
+f(void) \
+{ \
+ p_test_init(#f);
+
+#define TEST_END \
+ goto label_test_end; \
+label_test_end: \
+ p_test_fini(); \
+}
+
+#define test(...) \
+ p_test(__VA_ARGS__, NULL)
+
+#define test_no_malloc_init(...) \
+ p_test_no_malloc_init(__VA_ARGS__, NULL)
+
+#define test_skip_if(e) do { \
+ if (e) { \
+ test_skip("%s:%s:%d: Test skipped: (%s)", \
+ __func__, __FILE__, __LINE__, #e); \
+ goto label_test_end; \
+ } \
+} while (0)
+
+void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+/* For private use by macros. */
+test_status_t p_test(test_t *t, ...);
+test_status_t p_test_no_malloc_init(test_t *t, ...);
+void p_test_init(const char *name);
+void p_test_fini(void);
+void p_test_fail(const char *prefix, const char *message);
diff --git a/memory/jemalloc/src/test/include/test/thd.h b/memory/jemalloc/src/test/include/test/thd.h
new file mode 100644
index 000000000..47a51262e
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/thd.h
@@ -0,0 +1,9 @@
+/* Abstraction layer for threading in tests. */
+#ifdef _WIN32
+typedef HANDLE thd_t;
+#else
+typedef pthread_t thd_t;
+#endif
+
+void thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
+void thd_join(thd_t thd, void **ret);
diff --git a/memory/jemalloc/src/test/include/test/timer.h b/memory/jemalloc/src/test/include/test/timer.h
new file mode 100644
index 000000000..ace6191b8
--- /dev/null
+++ b/memory/jemalloc/src/test/include/test/timer.h
@@ -0,0 +1,11 @@
+/* Simple timer, for use in benchmark reporting. */
+
+typedef struct {
+ nstime_t t0;
+ nstime_t t1;
+} timedelta_t;
+
+void timer_start(timedelta_t *timer);
+void timer_stop(timedelta_t *timer);
+uint64_t timer_usec(const timedelta_t *timer);
+void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
diff --git a/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 000000000..30c203ae6
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,69 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Error in arenas.extend");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_MALLOCX_ARENA)
+{
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_MALLOCX_ARENA));
+}
diff --git a/memory/jemalloc/src/test/integration/aligned_alloc.c b/memory/jemalloc/src/test/integration/aligned_alloc.c
new file mode 100644
index 000000000..58438421d
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/aligned_alloc.c
@@ -0,0 +1,139 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors)
+{
+ size_t alignment;
+ void *p;
+
+ alignment = 0;
+ set_errno(0);
+ p = aligned_alloc(alignment, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ set_errno(0);
+ p = aligned_alloc(alignment + 1, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ ps[i] = aligned_alloc(alignment, size);
+ if (ps[i] == NULL) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/allocated.c b/memory/jemalloc/src/test/integration/allocated.c
new file mode 100644
index 000000000..3630e80ce
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/allocated.c
@@ -0,0 +1,125 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", &a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
+ assert_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ assert_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = malloc_usable_size(p);
+ assert_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", &d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
+ assert_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ assert_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ assert_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return (NULL);
+label_ENOENT:
+ assert_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/memory/jemalloc/src/test/integration/chunk.c b/memory/jemalloc/src/test/integration/chunk.c
new file mode 100644
index 000000000..ff9bf967a
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/chunk.c
@@ -0,0 +1,293 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static chunk_hooks_t orig_hooks;
+static chunk_hooks_t old_hooks;
+
+static bool do_dalloc = true;
+static bool do_decommit;
+
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+# define TRACE_HOOK(fmt, ...)
+#endif
+
+void *
+chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, "
+ "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment,
+ *zero ? "true" : "false", *commit ? "true" : "false", arena_ind);
+ did_alloc = true;
+ return (old_hooks.alloc(new_addr, size, alignment, zero, commit,
+ arena_ind));
+}
+
+bool
+chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n",
+ __func__, chunk, size, committed ? "true" : "false", arena_ind);
+ did_dalloc = true;
+ if (!do_dalloc)
+ return (true);
+ return (old_hooks.dalloc(chunk, size, committed, arena_ind));
+}
+
+bool
+chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ err = old_hooks.commit(chunk, size, offset, length, arena_ind);
+ did_commit = !err;
+ return (err);
+}
+
+bool
+chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ if (!do_decommit)
+ return (true);
+ err = old_hooks.decommit(chunk, size, offset, length, arena_ind);
+ did_decommit = !err;
+ return (err);
+}
+
+bool
+chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ did_purge = true;
+ return (old_hooks.purge(chunk, size, offset, length, arena_ind));
+}
+
+bool
+chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_split = true;
+ return (old_hooks.split(chunk, size, size_a, size_b, committed,
+ arena_ind));
+}
+
+bool
+chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_merge = true;
+ return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
+ committed, arena_ind));
+}
+
+TEST_BEGIN(test_chunk)
+{
+ void *p;
+ size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
+ unsigned arena_ind;
+ int flags;
+ size_t hooks_mib[3], purge_mib[3];
+ size_t hooks_miblen, purge_miblen;
+ chunk_hooks_t new_hooks = {
+ chunk_alloc,
+ chunk_dalloc,
+ chunk_commit,
+ chunk_decommit,
+ chunk_purge,
+ chunk_split,
+ chunk_merge
+ };
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Install custom chunk hooks. */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = (size_t)arena_ind;
+ old_size = sizeof(chunk_hooks_t);
+ new_size = sizeof(chunk_hooks_t);
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
+ &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
+ orig_hooks = old_hooks;
+ assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
+ assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_ne(old_hooks.commit, chunk_commit,
+ "Unexpected commit error");
+ assert_ptr_ne(old_hooks.decommit, chunk_decommit,
+ "Unexpected decommit error");
+ assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
+ assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
+ assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.0.size failure");
+ assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.1.size failure");
+
+ /* Get huge size classes. */
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.0.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.1.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ purge_miblen = sizeof(purge_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ purge_mib[1] = (size_t)arena_ind;
+ do_dalloc = false;
+ do_decommit = false;
+ p = mallocx(huge0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_dalloc = false;
+ did_decommit = false;
+ did_purge = false;
+ did_split = false;
+ xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_a) {
+ assert_true(did_dalloc, "Expected dalloc");
+ assert_false(did_decommit, "Unexpected decommit");
+ assert_true(did_purge, "Expected purge");
+ }
+ assert_true(did_split, "Expected split");
+ dallocx(p, flags);
+ do_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ do_dalloc = false;
+ do_decommit = true;
+ p = mallocx(huge0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_b)
+ assert_true(did_split, "Expected split");
+ xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2);
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ if (xallocx_success_b && xallocx_success_c)
+ assert_true(did_merge, "Expected merge");
+ dallocx(p, flags);
+ do_dalloc = true;
+ do_decommit = false;
+
+ /* Test purge for partial-chunk huge allocations. */
+ if (huge0 * 2 > huge2) {
+ /*
+ * There are at least four size classes per doubling, so a
+ * successful xallocx() from size=huge2 to size=huge1 is
+ * guaranteed to leave trailing purgeable memory.
+ */
+ p = mallocx(huge2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_purge = false;
+ assert_zu_eq(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() failure");
+ assert_true(did_purge, "Expected purge");
+ dallocx(p, flags);
+ }
+
+ /* Test decommit for large allocations. */
+ do_decommit = true;
+ p = mallocx(large1, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ did_decommit = false;
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() failure");
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ did_commit = false;
+ assert_zu_eq(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() failure");
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ dallocx(p, flags);
+ do_decommit = false;
+
+ /* Make sure non-huge allocation succeeds. */
+ p = mallocx(42, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, flags);
+
+ /* Restore chunk hooks. */
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
+ &old_hooks, new_size), 0, "Unexpected chunk_hooks error");
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
+ NULL, 0), 0, "Unexpected chunk_hooks error");
+ assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
+ "Unexpected alloc error");
+ assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_eq(old_hooks.commit, orig_hooks.commit,
+ "Unexpected commit error");
+ assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit,
+ "Unexpected decommit error");
+ assert_ptr_eq(old_hooks.purge, orig_hooks.purge,
+ "Unexpected purge error");
+ assert_ptr_eq(old_hooks.split, orig_hooks.split,
+ "Unexpected split error");
+ assert_ptr_eq(old_hooks.merge, orig_hooks.merge,
+ "Unexpected merge error");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(test_chunk));
+}
diff --git a/memory/jemalloc/src/test/integration/mallocx.c b/memory/jemalloc/src/test/integration/mallocx.c
new file mode 100644
index 000000000..43b76ebac
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/mallocx.c
@@ -0,0 +1,234 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_overflow)
+{
+ size_t hugemax;
+
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ assert_ptr_null(mallocx(hugemax+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
+
+ assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(mallocx(SIZE_T_MAX, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+TEST_BEGIN(test_oom)
+{
+ size_t hugemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ hugemax = get_huge_size(get_nhuge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = mallocx(hugemax, 0);
+ if (ptrs[i] == NULL)
+ oom = true;
+ }
+ assert_true(oom,
+ "Expected OOM during series of calls to mallocx(size=%zu, 0)",
+ hugemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL)
+ dallocx(ptrs[i], 0);
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)),
+ "Expected OOM for mallocx()");
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)),
+ "Expected OOM for mallocx()");
+#else
+ assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
+ "Expected OOM for mallocx()");
+#endif
+}
+TEST_END
+
+TEST_BEGIN(test_basic)
+{
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_overflow,
+ test_oom,
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/overflow.c b/memory/jemalloc/src/test/integration/overflow.c
new file mode 100644
index 000000000..303d9b2d3
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/overflow.c
@@ -0,0 +1,49 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_overflow)
+{
+ unsigned nhchunks;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nhchunks - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
+
+ assert_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ assert_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() OOM");
+ assert_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_overflow));
+}
diff --git a/memory/jemalloc/src/test/integration/posix_memalign.c b/memory/jemalloc/src/test/integration/posix_memalign.c
new file mode 100644
index 000000000..e22e10200
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/posix_memalign.c
@@ -0,0 +1,133 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors)
+{
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/rallocx.c b/memory/jemalloc/src/test/integration/rallocx.c
new file mode 100644
index 000000000..66ad8660a
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/rallocx.c
@@ -0,0 +1,259 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_grow_and_shrink)
+{
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 2500
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to be at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ bool ret = false;
+ const uint8_t *buf = (const uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
+ ret = true;
+ }
+ }
+
+ return (ret);
+}
+
+TEST_BEGIN(test_zero)
+{
+ void *p, *q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ assert_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ assert_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ assert_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ assert_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align)
+{
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero)
+{
+ void *p, *q;
+ unsigned lg_align;
+ size_t sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = 0;
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%u", lg_align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%u", q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ assert_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ assert_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_overflow)
+{
+ size_t hugemax;
+ void *p;
+
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_ptr_null(rallocx(p, hugemax+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
+
+ assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_lg_align_and_zero,
+ test_overflow));
+}
diff --git a/memory/jemalloc/src/test/integration/sdallocx.c b/memory/jemalloc/src/test/integration/sdallocx.c
new file mode 100644
index 000000000..b84817d76
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/sdallocx.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/thread_arena.c b/memory/jemalloc/src/test/integration/thread_arena.c
new file mode 100644
index 000000000..67be53513
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/thread_arena.c
@@ -0,0 +1,79 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg)
+{
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
+ sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ assert_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_thread_arena)
+{
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ assert_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_thread_arena));
+}
diff --git a/memory/jemalloc/src/test/integration/thread_tcache_enabled.c b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c
new file mode 100644
index 000000000..f4e89c682
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,113 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ size_t sz;
+ bool e0, e1;
+
+ sz = sizeof(bool);
+ if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ assert_false(config_tcache,
+ "ENOENT should only be returned if tcache is "
+ "disabled");
+ }
+ goto label_ENOENT;
+ }
+
+ if (e0) {
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
+ 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return (NULL);
+label_ENOENT:
+ test_skip("\"thread.tcache.enabled\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/memory/jemalloc/src/test/integration/xallocx.c b/memory/jemalloc/src/test/integration/xallocx.c
new file mode 100644
index 000000000..ad292bb56
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/xallocx.c
@@ -0,0 +1,497 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void)
+{
+ static unsigned ind = 0;
+
+ if (ind == 0) {
+ size_t sz = sizeof(ind);
+ assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure creating arena");
+ }
+
+ return (ind);
+}
+
+TEST_BEGIN(test_same_size)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nsmall(void)
+{
+
+ return (get_nsizes_impl("arenas.nbins"));
+}
+
+static unsigned
+get_nlarge(void)
+{
+
+ return (get_nsizes_impl("arenas.nlruns"));
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_small_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.bin.0.size", ind));
+}
+
+static size_t
+get_large_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.lrun.0.size", ind));
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_size)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ assert_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that hugemax-size underflows. */
+ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small)
+{
+ size_t small0, small1, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large)
+{
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t smallmax, large0, large1, large2, huge0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ huge0 = get_huge_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(large2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_huge)
+{
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t largemax, huge1, huge2, huge3, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ largemax = get_large_size(get_nlarge()-1);
+ huge1 = get_huge_size(1);
+ huge2 = get_huge_size(2);
+ huge3 = get_huge_size(3);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(huge3, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_le(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c)
+ err = true;
+ }
+
+ if (err)
+ print_filled_extents(p, c, offset + len);
+
+ return (err);
+}
+
+static void
+test_zero(size_t szmin, size_t szmax)
+{
+ int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ assert_zu_eq(xallocx(p, sz, 0, flags), sz,
+ "Unexpected xallocx() error");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, flags);
+ assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz,
+ "Unexpected xallocx() failure");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ assert_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large)
+{
+ size_t large0, largemax;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ test_zero(large0, largemax);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+ size_t huge0, huge1;
+
+ /* Get size classes. */
+ huge0 = get_huge_size(0);
+ huge1 = get_huge_size(1);
+
+ test_zero(huge1, huge0 * 2);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_extra_huge,
+ test_zero_large,
+ test_zero_huge));
+}
diff --git a/memory/jemalloc/src/test/src/SFMT.c b/memory/jemalloc/src/test/src/SFMT.c
new file mode 100644
index 000000000..80cabe05e
--- /dev/null
+++ b/memory/jemalloc/src/test/src/SFMT.c
@@ -0,0 +1,719 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.c
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+#define SFMT_C_
+#include "test/jemalloc_test.h"
+#include "test/SFMT-params.h"
+
+#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(ONLY64) && !defined(BIG_ENDIAN64)
+ #if defined(__GNUC__)
+ #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
+ #endif
+#undef ONLY64
+#endif
+/*------------------------------------------------------
+ 128-bit SIMD data type for Altivec, SSE2 or standard C
+ ------------------------------------------------------*/
+#if defined(HAVE_ALTIVEC)
+/** 128-bit data structure */
+union W128_T {
+ vector unsigned int s;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#elif defined(HAVE_SSE2)
+/** 128-bit data structure */
+union W128_T {
+ __m128i si;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#else
+
+/** 128-bit data structure */
+struct W128_T {
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef struct W128_T w128_t;
+
+#endif
+
+struct sfmt_s {
+ /** the 128-bit internal state array */
+ w128_t sfmt[N];
+ /** index counter to the 32-bit internal state array */
+ int idx;
+ /** a flag: it is 0 if and only if the internal state is not yet
+ * initialized. */
+ int initialized;
+};
+
+/*--------------------------------------
+ FILE GLOBAL VARIABLES
+ internal state, index counter and flag
+ --------------------------------------*/
+
+/** a parity check vector which certificate the period of 2^{MEXP} */
+static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
+
+/*----------------
+ STATIC FUNCTIONS
+ ----------------*/
+JEMALLOC_INLINE_C int idxof(int i);
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
+#endif
+JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
+JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
+JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
+JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
+static void period_certification(sfmt_t *ctx);
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+JEMALLOC_INLINE_C void swap(w128_t *array, int size);
+#endif
+
+#if defined(HAVE_ALTIVEC)
+ #include "test/SFMT-alti.h"
+#elif defined(HAVE_SSE2)
+ #include "test/SFMT-sse2.h"
+#endif
+
+/**
+ * This function simulate a 64-bit index of LITTLE ENDIAN
+ * in BIG ENDIAN machine.
+ */
+#ifdef ONLY64
+JEMALLOC_INLINE_C int idxof(int i) {
+ return i ^ 1;
+}
+#else
+JEMALLOC_INLINE_C int idxof(int i) {
+ return i;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit right shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit left shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+#endif
+
+/**
+ * This function represents the recursion formula.
+ * @param r output
+ * @param a a 128-bit part of the internal state array
+ * @param b a 128-bit part of the internal state array
+ * @param c a 128-bit part of the internal state array
+ * @param d a 128-bit part of the internal state array
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#else
+JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#endif
+#endif
+
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pseudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < size - N; i++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j] = array[j + size - N];
+ }
+ for (; i < size; i++, j++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ ctx->sfmt[j] = array[i];
+ }
+}
+#endif
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
+JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
+ int i;
+ uint32_t x, y;
+
+ for (i = 0; i < size; i++) {
+ x = array[i].u[0];
+ y = array[i].u[2];
+ array[i].u[0] = array[i].u[1];
+ array[i].u[2] = array[i].u[3];
+ array[i].u[1] = x;
+ array[i].u[3] = y;
+ }
+}
+#endif
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func1(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1664525UL;
+}
+
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func2(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
+}
+
+/**
+ * This function certificate the period of 2^{MEXP}
+ */
+static void period_certification(sfmt_t *ctx) {
+ int inner = 0;
+ int i, j;
+ uint32_t work;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ for (i = 0; i < 4; i++)
+ inner ^= psfmt32[idxof(i)] & parity[i];
+ for (i = 16; i > 0; i >>= 1)
+ inner ^= inner >> i;
+ inner &= 1;
+ /* check OK */
+ if (inner == 1) {
+ return;
+ }
+ /* check NG, and modification */
+ for (i = 0; i < 4; i++) {
+ work = 1;
+ for (j = 0; j < 32; j++) {
+ if ((work & parity[i]) != 0) {
+ psfmt32[idxof(i)] ^= work;
+ return;
+ }
+ work = work << 1;
+ }
+ }
+}
+
+/*----------------
+ PUBLIC FUNCTIONS
+ ----------------*/
+/**
+ * This function returns the identification string.
+ * The string shows the word size, the Mersenne exponent,
+ * and all parameters of this generator.
+ */
+const char *get_idstring(void) {
+ return IDSTR;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array32() function.
+ * @return minimum size of array used for fill_array32() function.
+ */
+int get_min_array_size32(void) {
+ return N32;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array64() function.
+ * @return minimum size of array used for fill_array64() function.
+ */
+int get_min_array_size64(void) {
+ return N64;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates and returns 32-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * @return 32-bit pseudorandom number
+ */
+uint32_t gen_rand32(sfmt_t *ctx) {
+ uint32_t r;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ assert(ctx->initialized);
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+ r = psfmt32[ctx->idx++];
+ return r;
+}
+
+/* Generate a random integer in [0..limit). */
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
+ uint32_t ret, above;
+
+ above = 0xffffffffU - (0xffffffffU % limit);
+ while (1) {
+ ret = gen_rand32(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+/**
+ * This function generates and returns 64-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * The function gen_rand64 should not be called after gen_rand32,
+ * unless an initialization is again executed.
+ * @return 64-bit pseudorandom number
+ */
+uint64_t gen_rand64(sfmt_t *ctx) {
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ uint32_t r1, r2;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+#else
+ uint64_t r;
+ uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
+#endif
+
+ assert(ctx->initialized);
+ assert(ctx->idx % 2 == 0);
+
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ r1 = psfmt32[ctx->idx];
+ r2 = psfmt32[ctx->idx + 1];
+ ctx->idx += 2;
+ return ((uint64_t)r2 << 32) | r1;
+#else
+ r = psfmt64[ctx->idx / 2];
+ ctx->idx += 2;
+ return r;
+#endif
+}
+
+/* Generate a random integer in [0..limit). */
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
+ uint64_t ret, above;
+
+ above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
+ while (1) {
+ ret = gen_rand64(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates pseudorandom 32-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 624 and a
+ * multiple of four. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 32-bit integers are filled
+ * by this function. The pointer to the array must be \b "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 32-bit pseudorandom integers to be
+ * generated. size must be a multiple of 4, and greater than or equal
+ * to (MEXP / 128 + 1) * 4.
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 4 == 0);
+ assert(size >= N32);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 4);
+ ctx->idx = N32;
+}
+#endif
+
+/**
+ * This function generates pseudorandom 64-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 312 and a
+ * multiple of two. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 64-bit integers are filled
+ * by this function. The pointer to the array must be "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 64-bit pseudorandom integers to be
+ * generated. size must be a multiple of 2, and greater than or equal
+ * to (MEXP / 128 + 1) * 2
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 2 == 0);
+ assert(size >= N64);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 2);
+ ctx->idx = N32;
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ swap((w128_t *)array, size /2);
+#endif
+}
+
+/**
+ * This function initializes the internal state array with a 32-bit
+ * integer seed.
+ *
+ * @param seed a 32-bit integer used as the seed.
+ */
+sfmt_t *init_gen_rand(uint32_t seed) {
+ void *p;
+ sfmt_t *ctx;
+ int i;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ psfmt32[idxof(0)] = seed;
+ for (i = 1; i < N32; i++) {
+ psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
+ ^ (psfmt32[idxof(i - 1)] >> 30))
+ + i;
+ }
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+/**
+ * This function initializes the internal state array,
+ * with an array of 32-bit integers used as the seeds
+ * @param init_key the array of 32-bit integers, used as a seed.
+ * @param key_length the length of init_key.
+ */
+sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
+ void *p;
+ sfmt_t *ctx;
+ int i, j, count;
+ uint32_t r;
+ int lag;
+ int mid;
+ int size = N * 4;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ if (size >= 623) {
+ lag = 11;
+ } else if (size >= 68) {
+ lag = 7;
+ } else if (size >= 39) {
+ lag = 5;
+ } else {
+ lag = 3;
+ }
+ mid = (size - lag) / 2;
+
+ memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
+ if (key_length + 1 > N32) {
+ count = key_length + 1;
+ } else {
+ count = N32;
+ }
+ r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
+ ^ psfmt32[idxof(N32 - 1)]);
+ psfmt32[idxof(mid)] += r;
+ r += key_length;
+ psfmt32[idxof(mid + lag)] += r;
+ psfmt32[idxof(0)] = r;
+
+ count--;
+ for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += init_key[j] + i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (; j < count; j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (j = 0; j < N32; j++) {
+ r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ + psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] ^= r;
+ r -= i;
+ psfmt32[idxof((i + mid + lag) % N32)] ^= r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+void fini_gen_rand(sfmt_t *ctx) {
+ assert(ctx != NULL);
+
+ ctx->initialized = 0;
+ free(ctx);
+}
diff --git a/memory/jemalloc/src/test/src/btalloc.c b/memory/jemalloc/src/test/src/btalloc.c
new file mode 100644
index 000000000..9a253d978
--- /dev/null
+++ b/memory/jemalloc/src/test/src/btalloc.c
@@ -0,0 +1,8 @@
+#include "test/jemalloc_test.h"
+
+void *
+btalloc(size_t size, unsigned bits)
+{
+
+ return (btalloc_0(size, bits));
+}
diff --git a/memory/jemalloc/src/test/src/btalloc_0.c b/memory/jemalloc/src/test/src/btalloc_0.c
new file mode 100644
index 000000000..77d8904ea
--- /dev/null
+++ b/memory/jemalloc/src/test/src/btalloc_0.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(0)
diff --git a/memory/jemalloc/src/test/src/btalloc_1.c b/memory/jemalloc/src/test/src/btalloc_1.c
new file mode 100644
index 000000000..4c126c309
--- /dev/null
+++ b/memory/jemalloc/src/test/src/btalloc_1.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(1)
diff --git a/memory/jemalloc/src/test/src/math.c b/memory/jemalloc/src/test/src/math.c
new file mode 100644
index 000000000..887a36390
--- /dev/null
+++ b/memory/jemalloc/src/test/src/math.c
@@ -0,0 +1,2 @@
+#define MATH_C_
+#include "test/jemalloc_test.h"
diff --git a/memory/jemalloc/src/test/src/mq.c b/memory/jemalloc/src/test/src/mq.c
new file mode 100644
index 000000000..40b31c15c
--- /dev/null
+++ b/memory/jemalloc/src/test/src/mq.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep
+ * time is guaranteed.
+ */
+void
+mq_nanosleep(unsigned ns)
+{
+
+ assert(ns <= 1000*1000*1000);
+
+#ifdef _WIN32
+ Sleep(ns / 1000);
+#else
+ {
+ struct timespec timeout;
+
+ if (ns < 1000*1000*1000) {
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = ns;
+ } else {
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ }
+ nanosleep(&timeout, NULL);
+ }
+#endif
+}
diff --git a/memory/jemalloc/src/test/src/mtx.c b/memory/jemalloc/src/test/src/mtx.c
new file mode 100644
index 000000000..8a5dfdd99
--- /dev/null
+++ b/memory/jemalloc/src/test/src/mtx.c
@@ -0,0 +1,73 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
+bool
+mtx_init(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
+ return (true);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mtx->lock = OS_UNFAIR_LOCK_INIT;
+#elif (defined(JEMALLOC_OSSPIN))
+ mtx->lock = 0;
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
+ if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ return (false);
+}
+
+void
+mtx_fini(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+#elif (defined(JEMALLOC_OSSPIN))
+#else
+ pthread_mutex_destroy(&mtx->lock);
+#endif
+}
+
+void
+mtx_lock(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ EnterCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_lock(&mtx->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockLock(&mtx->lock);
+#else
+ pthread_mutex_lock(&mtx->lock);
+#endif
+}
+
+void
+mtx_unlock(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ LeaveCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_unlock(&mtx->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockUnlock(&mtx->lock);
+#else
+ pthread_mutex_unlock(&mtx->lock);
+#endif
+}
diff --git a/memory/jemalloc/src/test/src/test.c b/memory/jemalloc/src/test/src/test.c
new file mode 100644
index 000000000..d70cc7501
--- /dev/null
+++ b/memory/jemalloc/src/test/src/test.c
@@ -0,0 +1,133 @@
+#include "test/jemalloc_test.h"
+
+static unsigned test_count = 0;
+static test_status_t test_counts[test_status_count] = {0, 0, 0};
+static test_status_t test_status = test_status_pass;
+static const char * test_name = "";
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_skip(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_skip;
+}
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_fail(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_fail;
+}
+
+static const char *
+test_status_string(test_status_t test_status)
+{
+
+ switch (test_status) {
+ case test_status_pass: return "pass";
+ case test_status_skip: return "skip";
+ case test_status_fail: return "fail";
+ default: not_reached();
+ }
+}
+
+void
+p_test_init(const char *name)
+{
+
+ test_count++;
+ test_status = test_status_pass;
+ test_name = name;
+}
+
+void
+p_test_fini(void)
+{
+
+ test_counts[test_status]++;
+ malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
+}
+
+static test_status_t
+p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
+{
+ test_status_t ret;
+
+ if (do_malloc_init) {
+ /*
+ * Make sure initialization occurs prior to running tests.
+ * Tests are special because they may use internal facilities
+ * prior to triggering initialization as a side effect of
+ * calling into the public API.
+ */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return (test_status_fail);
+ }
+ }
+
+ ret = test_status_pass;
+ for (; t != NULL; t = va_arg(ap, test_t *)) {
+ t();
+ if (test_status > ret)
+ ret = test_status;
+ }
+
+ malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
+ test_status_string(test_status_pass),
+ test_counts[test_status_pass], test_count,
+ test_status_string(test_status_skip),
+ test_counts[test_status_skip], test_count,
+ test_status_string(test_status_fail),
+ test_counts[test_status_fail], test_count);
+
+ return (ret);
+}
+
+test_status_t
+p_test(test_t *t, ...)
+{
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(true, t, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+test_status_t
+p_test_no_malloc_init(test_t *t, ...)
+{
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(false, t, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+void
+p_test_fail(const char *prefix, const char *message)
+{
+
+ malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
+ test_status = test_status_fail;
+}
diff --git a/memory/jemalloc/src/test/src/thd.c b/memory/jemalloc/src/test/src/thd.c
new file mode 100644
index 000000000..c9d006586
--- /dev/null
+++ b/memory/jemalloc/src/test/src/thd.c
@@ -0,0 +1,39 @@
+#include "test/jemalloc_test.h"
+
+#ifdef _WIN32
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
+{
+ LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
+ *thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
+ if (*thd == NULL)
+ test_fail("Error in CreateThread()\n");
+}
+
+void
+thd_join(thd_t thd, void **ret)
+{
+
+ if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
+ DWORD exit_code;
+ GetExitCodeThread(thd, (LPDWORD) &exit_code);
+ *ret = (void *)(uintptr_t)exit_code;
+ }
+}
+
+#else
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
+{
+
+ if (pthread_create(thd, NULL, proc, arg) != 0)
+ test_fail("Error in pthread_create()\n");
+}
+
+void
+thd_join(thd_t thd, void **ret)
+{
+
+ pthread_join(thd, ret);
+}
+#endif
diff --git a/memory/jemalloc/src/test/src/timer.c b/memory/jemalloc/src/test/src/timer.c
new file mode 100644
index 000000000..3c7e63a26
--- /dev/null
+++ b/memory/jemalloc/src/test/src/timer.c
@@ -0,0 +1,60 @@
+#include "test/jemalloc_test.h"
+
+void
+timer_start(timedelta_t *timer)
+{
+
+ nstime_init(&timer->t0, 0);
+ nstime_update(&timer->t0);
+}
+
+void
+timer_stop(timedelta_t *timer)
+{
+
+ nstime_copy(&timer->t1, &timer->t0);
+ nstime_update(&timer->t1);
+}
+
+uint64_t
+timer_usec(const timedelta_t *timer)
+{
+ nstime_t delta;
+
+ nstime_copy(&delta, &timer->t1);
+ nstime_subtract(&delta, &timer->t0);
+ return (nstime_ns(&delta) / 1000);
+}
+
+void
+timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
+{
+ uint64_t t0 = timer_usec(a);
+ uint64_t t1 = timer_usec(b);
+ uint64_t mult;
+ size_t i = 0;
+ size_t j, n;
+
+ /* Whole. */
+ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
+ i += n;
+ if (i >= buflen)
+ return;
+ mult = 1;
+ for (j = 0; j < n; j++)
+ mult *= 10;
+
+ /* Decimal. */
+ n = malloc_snprintf(&buf[i], buflen-i, ".");
+ i += n;
+
+ /* Fraction. */
+ while (i < buflen-1) {
+ uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
+ >= 5)) ? 1 : 0;
+ n = malloc_snprintf(&buf[i], buflen-i,
+ "%"FMTu64, (t0 * mult / t1) % 10 + round);
+ i += n;
+ mult *= 10;
+ }
+}
diff --git a/memory/jemalloc/src/test/stress/microbench.c b/memory/jemalloc/src/test/stress/microbench.c
new file mode 100644
index 000000000..7dc45f89c
--- /dev/null
+++ b/memory/jemalloc/src/test/stress/microbench.c
@@ -0,0 +1,182 @@
+#include "test/jemalloc_test.h"
+
+JEMALLOC_INLINE_C void
+time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
+ void (*func)(void))
+{
+ uint64_t i;
+
+ for (i = 0; i < nwarmup; i++)
+ func();
+ timer_start(timer);
+ for (i = 0; i < niter; i++)
+ func();
+ timer_stop(timer);
+}
+
+void
+compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
+ void (*func_a), const char *name_b, void (*func_b))
+{
+ timedelta_t timer_a, timer_b;
+ char ratio_buf[6];
+ void *p;
+
+ p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+
+ time_func(&timer_a, nwarmup, niter, func_a);
+ time_func(&timer_b, nwarmup, niter, func_b);
+
+ timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
+ malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, "
+ "%s=%"FMTu64"us, ratio=1:%s\n",
+ niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
+ ratio_buf);
+
+ dallocx(p, 0);
+}
+
+static void
+malloc_free(void)
+{
+ /* The compiler can optimize away free(malloc(1))! */
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ free(p);
+}
+
+static void
+mallocx_free(void)
+{
+ void *p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_malloc_vs_mallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
+ malloc_free, "mallocx", mallocx_free);
+}
+TEST_END
+
+static void
+malloc_dallocx(void)
+{
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ dallocx(p, 0);
+}
+
+static void
+malloc_sdallocx(void)
+{
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ sdallocx(p, 1, 0);
+}
+
+TEST_BEGIN(test_free_vs_dallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
+ "dallocx", malloc_dallocx);
+}
+TEST_END
+
+TEST_BEGIN(test_dallocx_vs_sdallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
+ "sdallocx", malloc_sdallocx);
+}
+TEST_END
+
+static void
+malloc_mus_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ malloc_usable_size(p);
+ free(p);
+}
+
+static void
+malloc_sallocx_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (sallocx(p, 0) < 1)
+ test_fail("Unexpected sallocx() failure");
+ free(p);
+}
+
+TEST_BEGIN(test_mus_vs_sallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
+ malloc_mus_free, "sallocx", malloc_sallocx_free);
+}
+TEST_END
+
+static void
+malloc_nallocx_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (nallocx(1, 0) < 1)
+ test_fail("Unexpected nallocx() failure");
+ free(p);
+}
+
+TEST_BEGIN(test_sallocx_vs_nallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
+ malloc_sallocx_free, "nallocx", malloc_nallocx_free);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_malloc_vs_mallocx,
+ test_free_vs_dallocx,
+ test_dallocx_vs_sdallocx,
+ test_mus_vs_sallocx,
+ test_sallocx_vs_nallocx));
+}
diff --git a/memory/jemalloc/src/test/test.sh.in b/memory/jemalloc/src/test/test.sh.in
new file mode 100644
index 000000000..a39f99f6b
--- /dev/null
+++ b/memory/jemalloc/src/test/test.sh.in
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+case @abi@ in
+ macho)
+ export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
+ ;;
+ pecoff)
+ export PATH="${PATH}:@objroot@lib"
+ ;;
+ *)
+ ;;
+esac
+
+# Corresponds to test_status_t.
+pass_code=0
+skip_code=1
+fail_code=2
+
+pass_count=0
+skip_count=0
+fail_count=0
+for t in $@; do
+ if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
+ echo
+ fi
+ echo "=== ${t} ==="
+ ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ result_code=$?
+ case ${result_code} in
+ ${pass_code})
+ pass_count=$((pass_count+1))
+ ;;
+ ${skip_code})
+ skip_count=$((skip_count+1))
+ ;;
+ ${fail_code})
+ fail_count=$((fail_count+1))
+ ;;
+ *)
+ echo "Test harness error" 1>&2
+ exit 1
+ esac
+done
+
+total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
+echo
+echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
+
+if [ ${fail_count} -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/memory/jemalloc/src/test/unit/SFMT.c b/memory/jemalloc/src/test/unit/SFMT.c
new file mode 100644
index 000000000..ba4be8702
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/SFMT.c
@@ -0,0 +1,1605 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "test/jemalloc_test.h"
+
+#define BLOCK_SIZE 10000
+#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
+#define COUNT_1 1000
+#define COUNT_2 700
+
+static const uint32_t init_gen_rand_32_expected[] = {
+ 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
+ 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
+ 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
+ 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
+ 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
+ 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
+ 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
+ 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
+ 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
+ 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
+ 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
+ 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
+ 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
+ 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
+ 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
+ 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
+ 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
+ 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
+ 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
+ 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
+ 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
+ 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
+ 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
+ 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
+ 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
+ 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
+ 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
+ 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
+ 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
+ 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
+ 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
+ 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
+ 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
+ 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
+ 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
+ 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
+ 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
+ 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
+ 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
+ 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
+ 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
+ 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
+ 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
+ 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
+ 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
+ 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
+ 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
+ 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
+ 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
+ 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
+ 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
+ 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
+ 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
+ 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
+ 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
+ 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
+ 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
+ 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
+ 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
+ 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
+ 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
+ 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
+ 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
+ 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
+ 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
+ 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
+ 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
+ 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
+ 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
+ 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
+ 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
+ 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
+ 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
+ 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
+ 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
+ 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
+ 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
+ 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
+ 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
+ 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
+ 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
+ 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
+ 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
+ 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
+ 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
+ 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
+ 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
+ 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
+ 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
+ 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
+ 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
+ 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
+ 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
+ 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
+ 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
+ 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
+ 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
+ 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
+ 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
+ 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
+ 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
+ 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
+ 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
+ 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
+ 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
+ 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
+ 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
+ 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
+ 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
+ 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
+ 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
+ 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
+ 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
+ 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
+ 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
+ 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
+ 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
+ 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
+ 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
+ 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
+ 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
+ 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
+ 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
+ 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
+ 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
+ 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
+ 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
+ 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
+ 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
+ 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
+ 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
+ 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
+ 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
+ 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
+ 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
+ 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
+ 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
+ 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
+ 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
+ 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
+ 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
+ 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
+ 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
+ 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
+ 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
+ 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
+ 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
+ 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
+ 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
+ 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
+ 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
+ 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
+ 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
+ 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
+ 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
+ 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
+ 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
+ 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
+ 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
+ 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
+ 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
+ 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
+ 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
+ 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
+ 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
+ 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
+ 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
+ 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
+ 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
+ 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
+ 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
+ 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
+ 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
+ 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
+ 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
+ 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
+ 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
+ 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
+ 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
+ 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
+ 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
+ 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
+ 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
+ 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
+ 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
+ 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
+ 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
+ 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
+ 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
+ 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
+ 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
+ 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
+ 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
+ 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
+ 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
+ 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
+ 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
+ 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
+ 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
+ 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
+};
+static const uint32_t init_by_array_32_expected[] = {
+ 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
+ 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
+ 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
+ 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
+ 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
+ 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
+ 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
+ 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
+ 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
+ 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
+ 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
+ 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
+ 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
+ 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
+ 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
+ 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
+ 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
+ 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
+ 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
+ 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
+ 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
+ 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
+ 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
+ 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
+ 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
+ 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
+ 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
+ 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
+ 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
+ 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
+ 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
+ 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
+ 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
+ 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
+ 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
+ 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
+ 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
+ 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
+ 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
+ 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
+ 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
+ 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
+ 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
+ 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
+ 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
+ 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
+ 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
+ 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
+ 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
+ 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
+ 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
+ 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
+ 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
+ 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
+ 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
+ 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
+ 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
+ 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
+ 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
+ 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
+ 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
+ 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
+ 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
+ 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
+ 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
+ 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
+ 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
+ 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
+ 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
+ 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
+ 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
+ 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
+ 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
+ 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
+ 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
+ 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
+ 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
+ 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
+ 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
+ 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
+ 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
+ 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
+ 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
+ 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
+ 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
+ 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
+ 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
+ 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
+ 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
+ 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
+ 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
+ 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
+ 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
+ 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
+ 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
+ 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
+ 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
+ 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
+ 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
+ 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
+ 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
+ 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
+ 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
+ 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
+ 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
+ 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
+ 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
+ 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
+ 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
+ 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
+ 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
+ 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
+ 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
+ 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
+ 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
+ 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
+ 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
+ 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
+ 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
+ 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
+ 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
+ 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
+ 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
+ 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
+ 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
+ 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
+ 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
+ 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
+ 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
+ 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
+ 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
+ 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
+ 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
+ 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
+ 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
+ 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
+ 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
+ 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
+ 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
+ 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
+ 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
+ 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
+ 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
+ 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
+ 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
+ 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
+ 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
+ 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
+ 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
+ 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
+ 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
+ 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
+ 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
+ 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
+ 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
+ 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
+ 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
+ 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
+ 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
+ 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
+ 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
+ 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
+ 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
+ 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
+ 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
+ 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
+ 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
+ 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
+ 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
+ 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
+ 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
+ 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
+ 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
+ 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
+ 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
+ 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
+ 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
+ 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
+ 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
+ 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
+ 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
+ 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
+ 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
+ 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
+ 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
+ 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
+ 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
+ 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
+ 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
+ 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
+ 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
+ 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
+ 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
+ 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
+ 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
+ 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
+ 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
+ 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
+ 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
+ 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
+};
+static const uint64_t init_gen_rand_64_expected[] = {
+ KQU(16924766246869039260), KQU( 8201438687333352714),
+ KQU( 2265290287015001750), KQU(18397264611805473832),
+ KQU( 3375255223302384358), KQU( 6345559975416828796),
+ KQU(18229739242790328073), KQU( 7596792742098800905),
+ KQU( 255338647169685981), KQU( 2052747240048610300),
+ KQU(18328151576097299343), KQU(12472905421133796567),
+ KQU(11315245349717600863), KQU(16594110197775871209),
+ KQU(15708751964632456450), KQU(10452031272054632535),
+ KQU(11097646720811454386), KQU( 4556090668445745441),
+ KQU(17116187693090663106), KQU(14931526836144510645),
+ KQU( 9190752218020552591), KQU( 9625800285771901401),
+ KQU(13995141077659972832), KQU( 5194209094927829625),
+ KQU( 4156788379151063303), KQU( 8523452593770139494),
+ KQU(14082382103049296727), KQU( 2462601863986088483),
+ KQU( 3030583461592840678), KQU( 5221622077872827681),
+ KQU( 3084210671228981236), KQU(13956758381389953823),
+ KQU(13503889856213423831), KQU(15696904024189836170),
+ KQU( 4612584152877036206), KQU( 6231135538447867881),
+ KQU(10172457294158869468), KQU( 6452258628466708150),
+ KQU(14044432824917330221), KQU( 370168364480044279),
+ KQU(10102144686427193359), KQU( 667870489994776076),
+ KQU( 2732271956925885858), KQU(18027788905977284151),
+ KQU(15009842788582923859), KQU( 7136357960180199542),
+ KQU(15901736243475578127), KQU(16951293785352615701),
+ KQU(10551492125243691632), KQU(17668869969146434804),
+ KQU(13646002971174390445), KQU( 9804471050759613248),
+ KQU( 5511670439655935493), KQU(18103342091070400926),
+ KQU(17224512747665137533), KQU(15534627482992618168),
+ KQU( 1423813266186582647), KQU(15821176807932930024),
+ KQU( 30323369733607156), KQU(11599382494723479403),
+ KQU( 653856076586810062), KQU( 3176437395144899659),
+ KQU(14028076268147963917), KQU(16156398271809666195),
+ KQU( 3166955484848201676), KQU( 5746805620136919390),
+ KQU(17297845208891256593), KQU(11691653183226428483),
+ KQU(17900026146506981577), KQU(15387382115755971042),
+ KQU(16923567681040845943), KQU( 8039057517199388606),
+ KQU(11748409241468629263), KQU( 794358245539076095),
+ KQU(13438501964693401242), KQU(14036803236515618962),
+ KQU( 5252311215205424721), KQU(17806589612915509081),
+ KQU( 6802767092397596006), KQU(14212120431184557140),
+ KQU( 1072951366761385712), KQU(13098491780722836296),
+ KQU( 9466676828710797353), KQU(12673056849042830081),
+ KQU(12763726623645357580), KQU(16468961652999309493),
+ KQU(15305979875636438926), KQU(17444713151223449734),
+ KQU( 5692214267627883674), KQU(13049589139196151505),
+ KQU( 880115207831670745), KQU( 1776529075789695498),
+ KQU(16695225897801466485), KQU(10666901778795346845),
+ KQU( 6164389346722833869), KQU( 2863817793264300475),
+ KQU( 9464049921886304754), KQU( 3993566636740015468),
+ KQU( 9983749692528514136), KQU(16375286075057755211),
+ KQU(16042643417005440820), KQU(11445419662923489877),
+ KQU( 7999038846885158836), KQU( 6721913661721511535),
+ KQU( 5363052654139357320), KQU( 1817788761173584205),
+ KQU(13290974386445856444), KQU( 4650350818937984680),
+ KQU( 8219183528102484836), KQU( 1569862923500819899),
+ KQU( 4189359732136641860), KQU(14202822961683148583),
+ KQU( 4457498315309429058), KQU(13089067387019074834),
+ KQU(11075517153328927293), KQU(10277016248336668389),
+ KQU( 7070509725324401122), KQU(17808892017780289380),
+ KQU(13143367339909287349), KQU( 1377743745360085151),
+ KQU( 5749341807421286485), KQU(14832814616770931325),
+ KQU( 7688820635324359492), KQU(10960474011539770045),
+ KQU( 81970066653179790), KQU(12619476072607878022),
+ KQU( 4419566616271201744), KQU(15147917311750568503),
+ KQU( 5549739182852706345), KQU( 7308198397975204770),
+ KQU(13580425496671289278), KQU(17070764785210130301),
+ KQU( 8202832846285604405), KQU( 6873046287640887249),
+ KQU( 6927424434308206114), KQU( 6139014645937224874),
+ KQU(10290373645978487639), KQU(15904261291701523804),
+ KQU( 9628743442057826883), KQU(18383429096255546714),
+ KQU( 4977413265753686967), KQU( 7714317492425012869),
+ KQU( 9025232586309926193), KQU(14627338359776709107),
+ KQU(14759849896467790763), KQU(10931129435864423252),
+ KQU( 4588456988775014359), KQU(10699388531797056724),
+ KQU( 468652268869238792), KQU( 5755943035328078086),
+ KQU( 2102437379988580216), KQU( 9986312786506674028),
+ KQU( 2654207180040945604), KQU( 8726634790559960062),
+ KQU( 100497234871808137), KQU( 2800137176951425819),
+ KQU( 6076627612918553487), KQU( 5780186919186152796),
+ KQU( 8179183595769929098), KQU( 6009426283716221169),
+ KQU( 2796662551397449358), KQU( 1756961367041986764),
+ KQU( 6972897917355606205), KQU(14524774345368968243),
+ KQU( 2773529684745706940), KQU( 4853632376213075959),
+ KQU( 4198177923731358102), KQU( 8271224913084139776),
+ KQU( 2741753121611092226), KQU(16782366145996731181),
+ KQU(15426125238972640790), KQU(13595497100671260342),
+ KQU( 3173531022836259898), KQU( 6573264560319511662),
+ KQU(18041111951511157441), KQU( 2351433581833135952),
+ KQU( 3113255578908173487), KQU( 1739371330877858784),
+ KQU(16046126562789165480), KQU( 8072101652214192925),
+ KQU(15267091584090664910), KQU( 9309579200403648940),
+ KQU( 5218892439752408722), KQU(14492477246004337115),
+ KQU(17431037586679770619), KQU( 7385248135963250480),
+ KQU( 9580144956565560660), KQU( 4919546228040008720),
+ KQU(15261542469145035584), KQU(18233297270822253102),
+ KQU( 5453248417992302857), KQU( 9309519155931460285),
+ KQU(10342813012345291756), KQU(15676085186784762381),
+ KQU(15912092950691300645), KQU( 9371053121499003195),
+ KQU( 9897186478226866746), KQU(14061858287188196327),
+ KQU( 122575971620788119), KQU(12146750969116317754),
+ KQU( 4438317272813245201), KQU( 8332576791009527119),
+ KQU(13907785691786542057), KQU(10374194887283287467),
+ KQU( 2098798755649059566), KQU( 3416235197748288894),
+ KQU( 8688269957320773484), KQU( 7503964602397371571),
+ KQU(16724977015147478236), KQU( 9461512855439858184),
+ KQU(13259049744534534727), KQU( 3583094952542899294),
+ KQU( 8764245731305528292), KQU(13240823595462088985),
+ KQU(13716141617617910448), KQU(18114969519935960955),
+ KQU( 2297553615798302206), KQU( 4585521442944663362),
+ KQU(17776858680630198686), KQU( 4685873229192163363),
+ KQU( 152558080671135627), KQU(15424900540842670088),
+ KQU(13229630297130024108), KQU(17530268788245718717),
+ KQU(16675633913065714144), KQU( 3158912717897568068),
+ KQU(15399132185380087288), KQU( 7401418744515677872),
+ KQU(13135412922344398535), KQU( 6385314346100509511),
+ KQU(13962867001134161139), KQU(10272780155442671999),
+ KQU(12894856086597769142), KQU(13340877795287554994),
+ KQU(12913630602094607396), KQU(12543167911119793857),
+ KQU(17343570372251873096), KQU(10959487764494150545),
+ KQU( 6966737953093821128), KQU(13780699135496988601),
+ KQU( 4405070719380142046), KQU(14923788365607284982),
+ KQU( 2869487678905148380), KQU( 6416272754197188403),
+ KQU(15017380475943612591), KQU( 1995636220918429487),
+ KQU( 3402016804620122716), KQU(15800188663407057080),
+ KQU(11362369990390932882), KQU(15262183501637986147),
+ KQU(10239175385387371494), KQU( 9352042420365748334),
+ KQU( 1682457034285119875), KQU( 1724710651376289644),
+ KQU( 2038157098893817966), KQU( 9897825558324608773),
+ KQU( 1477666236519164736), KQU(16835397314511233640),
+ KQU(10370866327005346508), KQU(10157504370660621982),
+ KQU(12113904045335882069), KQU(13326444439742783008),
+ KQU(11302769043000765804), KQU(13594979923955228484),
+ KQU(11779351762613475968), KQU( 3786101619539298383),
+ KQU( 8021122969180846063), KQU(15745904401162500495),
+ KQU(10762168465993897267), KQU(13552058957896319026),
+ KQU(11200228655252462013), KQU( 5035370357337441226),
+ KQU( 7593918984545500013), KQU( 5418554918361528700),
+ KQU( 4858270799405446371), KQU( 9974659566876282544),
+ KQU(18227595922273957859), KQU( 2772778443635656220),
+ KQU(14285143053182085385), KQU( 9939700992429600469),
+ KQU(12756185904545598068), KQU( 2020783375367345262),
+ KQU( 57026775058331227), KQU( 950827867930065454),
+ KQU( 6602279670145371217), KQU( 2291171535443566929),
+ KQU( 5832380724425010313), KQU( 1220343904715982285),
+ KQU(17045542598598037633), KQU(15460481779702820971),
+ KQU(13948388779949365130), KQU(13975040175430829518),
+ KQU(17477538238425541763), KQU(11104663041851745725),
+ KQU(15860992957141157587), KQU(14529434633012950138),
+ KQU( 2504838019075394203), KQU( 7512113882611121886),
+ KQU( 4859973559980886617), KQU( 1258601555703250219),
+ KQU(15594548157514316394), KQU( 4516730171963773048),
+ KQU(11380103193905031983), KQU( 6809282239982353344),
+ KQU(18045256930420065002), KQU( 2453702683108791859),
+ KQU( 977214582986981460), KQU( 2006410402232713466),
+ KQU( 6192236267216378358), KQU( 3429468402195675253),
+ KQU(18146933153017348921), KQU(17369978576367231139),
+ KQU( 1246940717230386603), KQU(11335758870083327110),
+ KQU(14166488801730353682), KQU( 9008573127269635732),
+ KQU(10776025389820643815), KQU(15087605441903942962),
+ KQU( 1359542462712147922), KQU(13898874411226454206),
+ KQU(17911176066536804411), KQU( 9435590428600085274),
+ KQU( 294488509967864007), KQU( 8890111397567922046),
+ KQU( 7987823476034328778), KQU(13263827582440967651),
+ KQU( 7503774813106751573), KQU(14974747296185646837),
+ KQU( 8504765037032103375), KQU(17340303357444536213),
+ KQU( 7704610912964485743), KQU( 8107533670327205061),
+ KQU( 9062969835083315985), KQU(16968963142126734184),
+ KQU(12958041214190810180), KQU( 2720170147759570200),
+ KQU( 2986358963942189566), KQU(14884226322219356580),
+ KQU( 286224325144368520), KQU(11313800433154279797),
+ KQU(18366849528439673248), KQU(17899725929482368789),
+ KQU( 3730004284609106799), KQU( 1654474302052767205),
+ KQU( 5006698007047077032), KQU( 8196893913601182838),
+ KQU(15214541774425211640), KQU(17391346045606626073),
+ KQU( 8369003584076969089), KQU( 3939046733368550293),
+ KQU(10178639720308707785), KQU( 2180248669304388697),
+ KQU( 62894391300126322), KQU( 9205708961736223191),
+ KQU( 6837431058165360438), KQU( 3150743890848308214),
+ KQU(17849330658111464583), KQU(12214815643135450865),
+ KQU(13410713840519603402), KQU( 3200778126692046802),
+ KQU(13354780043041779313), KQU( 800850022756886036),
+ KQU(15660052933953067433), KQU( 6572823544154375676),
+ KQU(11030281857015819266), KQU(12682241941471433835),
+ KQU(11654136407300274693), KQU( 4517795492388641109),
+ KQU( 9757017371504524244), KQU(17833043400781889277),
+ KQU(12685085201747792227), KQU(10408057728835019573),
+ KQU( 98370418513455221), KQU( 6732663555696848598),
+ KQU(13248530959948529780), KQU( 3530441401230622826),
+ KQU(18188251992895660615), KQU( 1847918354186383756),
+ KQU( 1127392190402660921), KQU(11293734643143819463),
+ KQU( 3015506344578682982), KQU(13852645444071153329),
+ KQU( 2121359659091349142), KQU( 1294604376116677694),
+ KQU( 5616576231286352318), KQU( 7112502442954235625),
+ KQU(11676228199551561689), KQU(12925182803007305359),
+ KQU( 7852375518160493082), KQU( 1136513130539296154),
+ KQU( 5636923900916593195), KQU( 3221077517612607747),
+ KQU(17784790465798152513), KQU( 3554210049056995938),
+ KQU(17476839685878225874), KQU( 3206836372585575732),
+ KQU( 2765333945644823430), KQU(10080070903718799528),
+ KQU( 5412370818878286353), KQU( 9689685887726257728),
+ KQU( 8236117509123533998), KQU( 1951139137165040214),
+ KQU( 4492205209227980349), KQU(16541291230861602967),
+ KQU( 1424371548301437940), KQU( 9117562079669206794),
+ KQU(14374681563251691625), KQU(13873164030199921303),
+ KQU( 6680317946770936731), KQU(15586334026918276214),
+ KQU(10896213950976109802), KQU( 9506261949596413689),
+ KQU( 9903949574308040616), KQU( 6038397344557204470),
+ KQU( 174601465422373648), KQU(15946141191338238030),
+ KQU(17142225620992044937), KQU( 7552030283784477064),
+ KQU( 2947372384532947997), KQU( 510797021688197711),
+ KQU( 4962499439249363461), KQU( 23770320158385357),
+ KQU( 959774499105138124), KQU( 1468396011518788276),
+ KQU( 2015698006852312308), KQU( 4149400718489980136),
+ KQU( 5992916099522371188), KQU(10819182935265531076),
+ KQU(16189787999192351131), KQU( 342833961790261950),
+ KQU(12470830319550495336), KQU(18128495041912812501),
+ KQU( 1193600899723524337), KQU( 9056793666590079770),
+ KQU( 2154021227041669041), KQU( 4963570213951235735),
+ KQU( 4865075960209211409), KQU( 2097724599039942963),
+ KQU( 2024080278583179845), KQU(11527054549196576736),
+ KQU(10650256084182390252), KQU( 4808408648695766755),
+ KQU( 1642839215013788844), KQU(10607187948250398390),
+ KQU( 7076868166085913508), KQU( 730522571106887032),
+ KQU(12500579240208524895), KQU( 4484390097311355324),
+ KQU(15145801330700623870), KQU( 8055827661392944028),
+ KQU( 5865092976832712268), KQU(15159212508053625143),
+ KQU( 3560964582876483341), KQU( 4070052741344438280),
+ KQU( 6032585709886855634), KQU(15643262320904604873),
+ KQU( 2565119772293371111), KQU( 318314293065348260),
+ KQU(15047458749141511872), KQU( 7772788389811528730),
+ KQU( 7081187494343801976), KQU( 6465136009467253947),
+ KQU(10425940692543362069), KQU( 554608190318339115),
+ KQU(14796699860302125214), KQU( 1638153134431111443),
+ KQU(10336967447052276248), KQU( 8412308070396592958),
+ KQU( 4004557277152051226), KQU( 8143598997278774834),
+ KQU(16413323996508783221), KQU(13139418758033994949),
+ KQU( 9772709138335006667), KQU( 2818167159287157659),
+ KQU(17091740573832523669), KQU(14629199013130751608),
+ KQU(18268322711500338185), KQU( 8290963415675493063),
+ KQU( 8830864907452542588), KQU( 1614839084637494849),
+ KQU(14855358500870422231), KQU( 3472996748392519937),
+ KQU(15317151166268877716), KQU( 5825895018698400362),
+ KQU(16730208429367544129), KQU(10481156578141202800),
+ KQU( 4746166512382823750), KQU(12720876014472464998),
+ KQU( 8825177124486735972), KQU(13733447296837467838),
+ KQU( 6412293741681359625), KQU( 8313213138756135033),
+ KQU(11421481194803712517), KQU( 7997007691544174032),
+ KQU( 6812963847917605930), KQU( 9683091901227558641),
+ KQU(14703594165860324713), KQU( 1775476144519618309),
+ KQU( 2724283288516469519), KQU( 717642555185856868),
+ KQU( 8736402192215092346), KQU(11878800336431381021),
+ KQU( 4348816066017061293), KQU( 6115112756583631307),
+ KQU( 9176597239667142976), KQU(12615622714894259204),
+ KQU(10283406711301385987), KQU( 5111762509485379420),
+ KQU( 3118290051198688449), KQU( 7345123071632232145),
+ KQU( 9176423451688682359), KQU( 4843865456157868971),
+ KQU(12008036363752566088), KQU(12058837181919397720),
+ KQU( 2145073958457347366), KQU( 1526504881672818067),
+ KQU( 3488830105567134848), KQU(13208362960674805143),
+ KQU( 4077549672899572192), KQU( 7770995684693818365),
+ KQU( 1398532341546313593), KQU(12711859908703927840),
+ KQU( 1417561172594446813), KQU(17045191024194170604),
+ KQU( 4101933177604931713), KQU(14708428834203480320),
+ KQU(17447509264469407724), KQU(14314821973983434255),
+ KQU(17990472271061617265), KQU( 5087756685841673942),
+ KQU(12797820586893859939), KQU( 1778128952671092879),
+ KQU( 3535918530508665898), KQU( 9035729701042481301),
+ KQU(14808661568277079962), KQU(14587345077537747914),
+ KQU(11920080002323122708), KQU( 6426515805197278753),
+ KQU( 3295612216725984831), KQU(11040722532100876120),
+ KQU(12305952936387598754), KQU(16097391899742004253),
+ KQU( 4908537335606182208), KQU(12446674552196795504),
+ KQU(16010497855816895177), KQU( 9194378874788615551),
+ KQU( 3382957529567613384), KQU( 5154647600754974077),
+ KQU( 9801822865328396141), KQU( 9023662173919288143),
+ KQU(17623115353825147868), KQU( 8238115767443015816),
+ KQU(15811444159859002560), KQU( 9085612528904059661),
+ KQU( 6888601089398614254), KQU( 258252992894160189),
+ KQU( 6704363880792428622), KQU( 6114966032147235763),
+ KQU(11075393882690261875), KQU( 8797664238933620407),
+ KQU( 5901892006476726920), KQU( 5309780159285518958),
+ KQU(14940808387240817367), KQU(14642032021449656698),
+ KQU( 9808256672068504139), KQU( 3670135111380607658),
+ KQU(11211211097845960152), KQU( 1474304506716695808),
+ KQU(15843166204506876239), KQU( 7661051252471780561),
+ KQU(10170905502249418476), KQU( 7801416045582028589),
+ KQU( 2763981484737053050), KQU( 9491377905499253054),
+ KQU(16201395896336915095), KQU( 9256513756442782198),
+ KQU( 5411283157972456034), KQU( 5059433122288321676),
+ KQU( 4327408006721123357), KQU( 9278544078834433377),
+ KQU( 7601527110882281612), KQU(11848295896975505251),
+ KQU(12096998801094735560), KQU(14773480339823506413),
+ KQU(15586227433895802149), KQU(12786541257830242872),
+ KQU( 6904692985140503067), KQU( 5309011515263103959),
+ KQU(12105257191179371066), KQU(14654380212442225037),
+ KQU( 2556774974190695009), KQU( 4461297399927600261),
+ KQU(14888225660915118646), KQU(14915459341148291824),
+ KQU( 2738802166252327631), KQU( 6047155789239131512),
+ KQU(12920545353217010338), KQU(10697617257007840205),
+ KQU( 2751585253158203504), KQU(13252729159780047496),
+ KQU(14700326134672815469), KQU(14082527904374600529),
+ KQU(16852962273496542070), KQU(17446675504235853907),
+ KQU(15019600398527572311), KQU(12312781346344081551),
+ KQU(14524667935039810450), KQU( 5634005663377195738),
+ KQU(11375574739525000569), KQU( 2423665396433260040),
+ KQU( 5222836914796015410), KQU( 4397666386492647387),
+ KQU( 4619294441691707638), KQU( 665088602354770716),
+ KQU(13246495665281593610), KQU( 6564144270549729409),
+ KQU(10223216188145661688), KQU( 3961556907299230585),
+ KQU(11543262515492439914), KQU(16118031437285993790),
+ KQU( 7143417964520166465), KQU(13295053515909486772),
+ KQU( 40434666004899675), KQU(17127804194038347164),
+ KQU( 8599165966560586269), KQU( 8214016749011284903),
+ KQU(13725130352140465239), KQU( 5467254474431726291),
+ KQU( 7748584297438219877), KQU(16933551114829772472),
+ KQU( 2169618439506799400), KQU( 2169787627665113463),
+ KQU(17314493571267943764), KQU(18053575102911354912),
+ KQU(11928303275378476973), KQU(11593850925061715550),
+ KQU(17782269923473589362), KQU( 3280235307704747039),
+ KQU( 6145343578598685149), KQU(17080117031114086090),
+ KQU(18066839902983594755), KQU( 6517508430331020706),
+ KQU( 8092908893950411541), KQU(12558378233386153732),
+ KQU( 4476532167973132976), KQU(16081642430367025016),
+ KQU( 4233154094369139361), KQU( 8693630486693161027),
+ KQU(11244959343027742285), KQU(12273503967768513508),
+ KQU(14108978636385284876), KQU( 7242414665378826984),
+ KQU( 6561316938846562432), KQU( 8601038474994665795),
+ KQU(17532942353612365904), KQU(17940076637020912186),
+ KQU( 7340260368823171304), KQU( 7061807613916067905),
+ KQU(10561734935039519326), KQU(17990796503724650862),
+ KQU( 6208732943911827159), KQU( 359077562804090617),
+ KQU(14177751537784403113), KQU(10659599444915362902),
+ KQU(15081727220615085833), KQU(13417573895659757486),
+ KQU(15513842342017811524), KQU(11814141516204288231),
+ KQU( 1827312513875101814), KQU( 2804611699894603103),
+ KQU(17116500469975602763), KQU(12270191815211952087),
+ KQU(12256358467786024988), KQU(18435021722453971267),
+ KQU( 671330264390865618), KQU( 476504300460286050),
+ KQU(16465470901027093441), KQU( 4047724406247136402),
+ KQU( 1322305451411883346), KQU( 1388308688834322280),
+ KQU( 7303989085269758176), KQU( 9323792664765233642),
+ KQU( 4542762575316368936), KQU(17342696132794337618),
+ KQU( 4588025054768498379), KQU(13415475057390330804),
+ KQU(17880279491733405570), KQU(10610553400618620353),
+ KQU( 3180842072658960139), KQU(13002966655454270120),
+ KQU( 1665301181064982826), KQU( 7083673946791258979),
+ KQU( 190522247122496820), KQU(17388280237250677740),
+ KQU( 8430770379923642945), KQU(12987180971921668584),
+ KQU( 2311086108365390642), KQU( 2870984383579822345),
+ KQU(14014682609164653318), KQU(14467187293062251484),
+ KQU( 192186361147413298), KQU(15171951713531796524),
+ KQU( 9900305495015948728), KQU(17958004775615466344),
+ KQU(14346380954498606514), KQU(18040047357617407096),
+ KQU( 5035237584833424532), KQU(15089555460613972287),
+ KQU( 4131411873749729831), KQU( 1329013581168250330),
+ KQU(10095353333051193949), KQU(10749518561022462716),
+ KQU( 9050611429810755847), KQU(15022028840236655649),
+ KQU( 8775554279239748298), KQU(13105754025489230502),
+ KQU(15471300118574167585), KQU( 89864764002355628),
+ KQU( 8776416323420466637), KQU( 5280258630612040891),
+ KQU( 2719174488591862912), KQU( 7599309137399661994),
+ KQU(15012887256778039979), KQU(14062981725630928925),
+ KQU(12038536286991689603), KQU( 7089756544681775245),
+ KQU(10376661532744718039), KQU( 1265198725901533130),
+ KQU(13807996727081142408), KQU( 2935019626765036403),
+ KQU( 7651672460680700141), KQU( 3644093016200370795),
+ KQU( 2840982578090080674), KQU(17956262740157449201),
+ KQU(18267979450492880548), KQU(11799503659796848070),
+ KQU( 9942537025669672388), KQU(11886606816406990297),
+ KQU( 5488594946437447576), KQU( 7226714353282744302),
+ KQU( 3784851653123877043), KQU( 878018453244803041),
+ KQU(12110022586268616085), KQU( 734072179404675123),
+ KQU(11869573627998248542), KQU( 469150421297783998),
+ KQU( 260151124912803804), KQU(11639179410120968649),
+ KQU( 9318165193840846253), KQU(12795671722734758075),
+ KQU(15318410297267253933), KQU( 691524703570062620),
+ KQU( 5837129010576994601), KQU(15045963859726941052),
+ KQU( 5850056944932238169), KQU(12017434144750943807),
+ KQU( 7447139064928956574), KQU( 3101711812658245019),
+ KQU(16052940704474982954), KQU(18195745945986994042),
+ KQU( 8932252132785575659), KQU(13390817488106794834),
+ KQU(11582771836502517453), KQU( 4964411326683611686),
+ KQU( 2195093981702694011), KQU(14145229538389675669),
+ KQU(16459605532062271798), KQU( 866316924816482864),
+ KQU( 4593041209937286377), KQU( 8415491391910972138),
+ KQU( 4171236715600528969), KQU(16637569303336782889),
+ KQU( 2002011073439212680), KQU(17695124661097601411),
+ KQU( 4627687053598611702), KQU( 7895831936020190403),
+ KQU( 8455951300917267802), KQU( 2923861649108534854),
+ KQU( 8344557563927786255), KQU( 6408671940373352556),
+ KQU(12210227354536675772), KQU(14294804157294222295),
+ KQU(10103022425071085127), KQU(10092959489504123771),
+ KQU( 6554774405376736268), KQU(12629917718410641774),
+ KQU( 6260933257596067126), KQU( 2460827021439369673),
+ KQU( 2541962996717103668), KQU( 597377203127351475),
+ KQU( 5316984203117315309), KQU( 4811211393563241961),
+ KQU(13119698597255811641), KQU( 8048691512862388981),
+ KQU(10216818971194073842), KQU( 4612229970165291764),
+ KQU(10000980798419974770), KQU( 6877640812402540687),
+ KQU( 1488727563290436992), KQU( 2227774069895697318),
+ KQU(11237754507523316593), KQU(13478948605382290972),
+ KQU( 1963583846976858124), KQU( 5512309205269276457),
+ KQU( 3972770164717652347), KQU( 3841751276198975037),
+ KQU(10283343042181903117), KQU( 8564001259792872199),
+ KQU(16472187244722489221), KQU( 8953493499268945921),
+ KQU( 3518747340357279580), KQU( 4003157546223963073),
+ KQU( 3270305958289814590), KQU( 3966704458129482496),
+ KQU( 8122141865926661939), KQU(14627734748099506653),
+ KQU(13064426990862560568), KQU( 2414079187889870829),
+ KQU( 5378461209354225306), KQU(10841985740128255566),
+ KQU( 538582442885401738), KQU( 7535089183482905946),
+ KQU(16117559957598879095), KQU( 8477890721414539741),
+ KQU( 1459127491209533386), KQU(17035126360733620462),
+ KQU( 8517668552872379126), KQU(10292151468337355014),
+ KQU(17081267732745344157), KQU(13751455337946087178),
+ KQU(14026945459523832966), KQU( 6653278775061723516),
+ KQU(10619085543856390441), KQU( 2196343631481122885),
+ KQU(10045966074702826136), KQU(10082317330452718282),
+ KQU( 5920859259504831242), KQU( 9951879073426540617),
+ KQU( 7074696649151414158), KQU(15808193543879464318),
+ KQU( 7385247772746953374), KQU( 3192003544283864292),
+ KQU(18153684490917593847), KQU(12423498260668568905),
+ KQU(10957758099756378169), KQU(11488762179911016040),
+ KQU( 2099931186465333782), KQU(11180979581250294432),
+ KQU( 8098916250668367933), KQU( 3529200436790763465),
+ KQU(12988418908674681745), KQU( 6147567275954808580),
+ KQU( 3207503344604030989), KQU(10761592604898615360),
+ KQU( 229854861031893504), KQU( 8809853962667144291),
+ KQU(13957364469005693860), KQU( 7634287665224495886),
+ KQU(12353487366976556874), KQU( 1134423796317152034),
+ KQU( 2088992471334107068), KQU( 7393372127190799698),
+ KQU( 1845367839871058391), KQU( 207922563987322884),
+ KQU(11960870813159944976), KQU(12182120053317317363),
+ KQU(17307358132571709283), KQU(13871081155552824936),
+ KQU(18304446751741566262), KQU( 7178705220184302849),
+ KQU(10929605677758824425), KQU(16446976977835806844),
+ KQU(13723874412159769044), KQU( 6942854352100915216),
+ KQU( 1726308474365729390), KQU( 2150078766445323155),
+ KQU(15345558947919656626), KQU(12145453828874527201),
+ KQU( 2054448620739726849), KQU( 2740102003352628137),
+ KQU(11294462163577610655), KQU( 756164283387413743),
+ KQU(17841144758438810880), KQU(10802406021185415861),
+ KQU( 8716455530476737846), KQU( 6321788834517649606),
+ KQU(14681322910577468426), KQU(17330043563884336387),
+ KQU(12701802180050071614), KQU(14695105111079727151),
+ KQU( 5112098511654172830), KQU( 4957505496794139973),
+ KQU( 8270979451952045982), KQU(12307685939199120969),
+ KQU(12425799408953443032), KQU( 8376410143634796588),
+ KQU(16621778679680060464), KQU( 3580497854566660073),
+ KQU( 1122515747803382416), KQU( 857664980960597599),
+ KQU( 6343640119895925918), KQU(12878473260854462891),
+ KQU(10036813920765722626), KQU(14451335468363173812),
+ KQU( 5476809692401102807), KQU(16442255173514366342),
+ KQU(13060203194757167104), KQU(14354124071243177715),
+ KQU(15961249405696125227), KQU(13703893649690872584),
+ KQU( 363907326340340064), KQU( 6247455540491754842),
+ KQU(12242249332757832361), KQU( 156065475679796717),
+ KQU( 9351116235749732355), KQU( 4590350628677701405),
+ KQU( 1671195940982350389), KQU(13501398458898451905),
+ KQU( 6526341991225002255), KQU( 1689782913778157592),
+ KQU( 7439222350869010334), KQU(13975150263226478308),
+ KQU(11411961169932682710), KQU(17204271834833847277),
+ KQU( 541534742544435367), KQU( 6591191931218949684),
+ KQU( 2645454775478232486), KQU( 4322857481256485321),
+ KQU( 8477416487553065110), KQU(12902505428548435048),
+ KQU( 971445777981341415), KQU(14995104682744976712),
+ KQU( 4243341648807158063), KQU( 8695061252721927661),
+ KQU( 5028202003270177222), KQU( 2289257340915567840),
+ KQU(13870416345121866007), KQU(13994481698072092233),
+ KQU( 6912785400753196481), KQU( 2278309315841980139),
+ KQU( 4329765449648304839), KQU( 5963108095785485298),
+ KQU( 4880024847478722478), KQU(16015608779890240947),
+ KQU( 1866679034261393544), KQU( 914821179919731519),
+ KQU( 9643404035648760131), KQU( 2418114953615593915),
+ KQU( 944756836073702374), KQU(15186388048737296834),
+ KQU( 7723355336128442206), KQU( 7500747479679599691),
+ KQU(18013961306453293634), KQU( 2315274808095756456),
+ KQU(13655308255424029566), KQU(17203800273561677098),
+ KQU( 1382158694422087756), KQU( 5090390250309588976),
+ KQU( 517170818384213989), KQU( 1612709252627729621),
+ KQU( 1330118955572449606), KQU( 300922478056709885),
+ KQU(18115693291289091987), KQU(13491407109725238321),
+ KQU(15293714633593827320), KQU( 5151539373053314504),
+ KQU( 5951523243743139207), KQU(14459112015249527975),
+ KQU( 5456113959000700739), KQU( 3877918438464873016),
+ KQU(12534071654260163555), KQU(15871678376893555041),
+ KQU(11005484805712025549), KQU(16353066973143374252),
+ KQU( 4358331472063256685), KQU( 8268349332210859288),
+ KQU(12485161590939658075), KQU(13955993592854471343),
+ KQU( 5911446886848367039), KQU(14925834086813706974),
+ KQU( 6590362597857994805), KQU( 1280544923533661875),
+ KQU( 1637756018947988164), KQU( 4734090064512686329),
+ KQU(16693705263131485912), KQU( 6834882340494360958),
+ KQU( 8120732176159658505), KQU( 2244371958905329346),
+ KQU(10447499707729734021), KQU( 7318742361446942194),
+ KQU( 8032857516355555296), KQU(14023605983059313116),
+ KQU( 1032336061815461376), KQU( 9840995337876562612),
+ KQU( 9869256223029203587), KQU(12227975697177267636),
+ KQU(12728115115844186033), KQU( 7752058479783205470),
+ KQU( 729733219713393087), KQU(12954017801239007622)
+};
+static const uint64_t init_by_array_64_expected[] = {
+ KQU( 2100341266307895239), KQU( 8344256300489757943),
+ KQU(15687933285484243894), KQU( 8268620370277076319),
+ KQU(12371852309826545459), KQU( 8800491541730110238),
+ KQU(18113268950100835773), KQU( 2886823658884438119),
+ KQU( 3293667307248180724), KQU( 9307928143300172731),
+ KQU( 7688082017574293629), KQU( 900986224735166665),
+ KQU( 9977972710722265039), KQU( 6008205004994830552),
+ KQU( 546909104521689292), KQU( 7428471521869107594),
+ KQU(14777563419314721179), KQU(16116143076567350053),
+ KQU( 5322685342003142329), KQU( 4200427048445863473),
+ KQU( 4693092150132559146), KQU(13671425863759338582),
+ KQU( 6747117460737639916), KQU( 4732666080236551150),
+ KQU( 5912839950611941263), KQU( 3903717554504704909),
+ KQU( 2615667650256786818), KQU(10844129913887006352),
+ KQU(13786467861810997820), KQU(14267853002994021570),
+ KQU(13767807302847237439), KQU(16407963253707224617),
+ KQU( 4802498363698583497), KQU( 2523802839317209764),
+ KQU( 3822579397797475589), KQU( 8950320572212130610),
+ KQU( 3745623504978342534), KQU(16092609066068482806),
+ KQU( 9817016950274642398), KQU(10591660660323829098),
+ KQU(11751606650792815920), KQU( 5122873818577122211),
+ KQU(17209553764913936624), KQU( 6249057709284380343),
+ KQU(15088791264695071830), KQU(15344673071709851930),
+ KQU( 4345751415293646084), KQU( 2542865750703067928),
+ KQU(13520525127852368784), KQU(18294188662880997241),
+ KQU( 3871781938044881523), KQU( 2873487268122812184),
+ KQU(15099676759482679005), KQU(15442599127239350490),
+ KQU( 6311893274367710888), KQU( 3286118760484672933),
+ KQU( 4146067961333542189), KQU(13303942567897208770),
+ KQU( 8196013722255630418), KQU( 4437815439340979989),
+ KQU(15433791533450605135), KQU( 4254828956815687049),
+ KQU( 1310903207708286015), KQU(10529182764462398549),
+ KQU(14900231311660638810), KQU( 9727017277104609793),
+ KQU( 1821308310948199033), KQU(11628861435066772084),
+ KQU( 9469019138491546924), KQU( 3145812670532604988),
+ KQU( 9938468915045491919), KQU( 1562447430672662142),
+ KQU(13963995266697989134), KQU( 3356884357625028695),
+ KQU( 4499850304584309747), KQU( 8456825817023658122),
+ KQU(10859039922814285279), KQU( 8099512337972526555),
+ KQU( 348006375109672149), KQU(11919893998241688603),
+ KQU( 1104199577402948826), KQU(16689191854356060289),
+ KQU(10992552041730168078), KQU( 7243733172705465836),
+ KQU( 5668075606180319560), KQU(18182847037333286970),
+ KQU( 4290215357664631322), KQU( 4061414220791828613),
+ KQU(13006291061652989604), KQU( 7140491178917128798),
+ KQU(12703446217663283481), KQU( 5500220597564558267),
+ KQU(10330551509971296358), KQU(15958554768648714492),
+ KQU( 5174555954515360045), KQU( 1731318837687577735),
+ KQU( 3557700801048354857), KQU(13764012341928616198),
+ KQU(13115166194379119043), KQU( 7989321021560255519),
+ KQU( 2103584280905877040), KQU( 9230788662155228488),
+ KQU(16396629323325547654), KQU( 657926409811318051),
+ KQU(15046700264391400727), KQU( 5120132858771880830),
+ KQU( 7934160097989028561), KQU( 6963121488531976245),
+ KQU(17412329602621742089), KQU(15144843053931774092),
+ KQU(17204176651763054532), KQU(13166595387554065870),
+ KQU( 8590377810513960213), KQU( 5834365135373991938),
+ KQU( 7640913007182226243), KQU( 3479394703859418425),
+ KQU(16402784452644521040), KQU( 4993979809687083980),
+ KQU(13254522168097688865), KQU(15643659095244365219),
+ KQU( 5881437660538424982), KQU(11174892200618987379),
+ KQU( 254409966159711077), KQU(17158413043140549909),
+ KQU( 3638048789290376272), KQU( 1376816930299489190),
+ KQU( 4622462095217761923), KQU(15086407973010263515),
+ KQU(13253971772784692238), KQU( 5270549043541649236),
+ KQU(11182714186805411604), KQU(12283846437495577140),
+ KQU( 5297647149908953219), KQU(10047451738316836654),
+ KQU( 4938228100367874746), KQU(12328523025304077923),
+ KQU( 3601049438595312361), KQU( 9313624118352733770),
+ KQU(13322966086117661798), KQU(16660005705644029394),
+ KQU(11337677526988872373), KQU(13869299102574417795),
+ KQU(15642043183045645437), KQU( 3021755569085880019),
+ KQU( 4979741767761188161), KQU(13679979092079279587),
+ KQU( 3344685842861071743), KQU(13947960059899588104),
+ KQU( 305806934293368007), KQU( 5749173929201650029),
+ KQU(11123724852118844098), KQU(15128987688788879802),
+ KQU(15251651211024665009), KQU( 7689925933816577776),
+ KQU(16732804392695859449), KQU(17087345401014078468),
+ KQU(14315108589159048871), KQU( 4820700266619778917),
+ KQU(16709637539357958441), KQU( 4936227875177351374),
+ KQU( 2137907697912987247), KQU(11628565601408395420),
+ KQU( 2333250549241556786), KQU( 5711200379577778637),
+ KQU( 5170680131529031729), KQU(12620392043061335164),
+ KQU( 95363390101096078), KQU( 5487981914081709462),
+ KQU( 1763109823981838620), KQU( 3395861271473224396),
+ KQU( 1300496844282213595), KQU( 6894316212820232902),
+ KQU(10673859651135576674), KQU( 5911839658857903252),
+ KQU(17407110743387299102), KQU( 8257427154623140385),
+ KQU(11389003026741800267), KQU( 4070043211095013717),
+ KQU(11663806997145259025), KQU(15265598950648798210),
+ KQU( 630585789434030934), KQU( 3524446529213587334),
+ KQU( 7186424168495184211), KQU(10806585451386379021),
+ KQU(11120017753500499273), KQU( 1586837651387701301),
+ KQU(17530454400954415544), KQU( 9991670045077880430),
+ KQU( 7550997268990730180), KQU( 8640249196597379304),
+ KQU( 3522203892786893823), KQU(10401116549878854788),
+ KQU(13690285544733124852), KQU( 8295785675455774586),
+ KQU(15535716172155117603), KQU( 3112108583723722511),
+ KQU(17633179955339271113), KQU(18154208056063759375),
+ KQU( 1866409236285815666), KQU(13326075895396412882),
+ KQU( 8756261842948020025), KQU( 6281852999868439131),
+ KQU(15087653361275292858), KQU(10333923911152949397),
+ KQU( 5265567645757408500), KQU(12728041843210352184),
+ KQU( 6347959327507828759), KQU( 154112802625564758),
+ KQU(18235228308679780218), KQU( 3253805274673352418),
+ KQU( 4849171610689031197), KQU(17948529398340432518),
+ KQU(13803510475637409167), KQU(13506570190409883095),
+ KQU(15870801273282960805), KQU( 8451286481299170773),
+ KQU( 9562190620034457541), KQU( 8518905387449138364),
+ KQU(12681306401363385655), KQU( 3788073690559762558),
+ KQU( 5256820289573487769), KQU( 2752021372314875467),
+ KQU( 6354035166862520716), KQU( 4328956378309739069),
+ KQU( 449087441228269600), KQU( 5533508742653090868),
+ KQU( 1260389420404746988), KQU(18175394473289055097),
+ KQU( 1535467109660399420), KQU( 8818894282874061442),
+ KQU(12140873243824811213), KQU(15031386653823014946),
+ KQU( 1286028221456149232), KQU( 6329608889367858784),
+ KQU( 9419654354945132725), KQU( 6094576547061672379),
+ KQU(17706217251847450255), KQU( 1733495073065878126),
+ KQU(16918923754607552663), KQU( 8881949849954945044),
+ KQU(12938977706896313891), KQU(14043628638299793407),
+ KQU(18393874581723718233), KQU( 6886318534846892044),
+ KQU(14577870878038334081), KQU(13541558383439414119),
+ KQU(13570472158807588273), KQU(18300760537910283361),
+ KQU( 818368572800609205), KQU( 1417000585112573219),
+ KQU(12337533143867683655), KQU(12433180994702314480),
+ KQU( 778190005829189083), KQU(13667356216206524711),
+ KQU( 9866149895295225230), KQU(11043240490417111999),
+ KQU( 1123933826541378598), KQU( 6469631933605123610),
+ KQU(14508554074431980040), KQU(13918931242962026714),
+ KQU( 2870785929342348285), KQU(14786362626740736974),
+ KQU(13176680060902695786), KQU( 9591778613541679456),
+ KQU( 9097662885117436706), KQU( 749262234240924947),
+ KQU( 1944844067793307093), KQU( 4339214904577487742),
+ KQU( 8009584152961946551), KQU(16073159501225501777),
+ KQU( 3335870590499306217), KQU(17088312653151202847),
+ KQU( 3108893142681931848), KQU(16636841767202792021),
+ KQU(10423316431118400637), KQU( 8008357368674443506),
+ KQU(11340015231914677875), KQU(17687896501594936090),
+ KQU(15173627921763199958), KQU( 542569482243721959),
+ KQU(15071714982769812975), KQU( 4466624872151386956),
+ KQU( 1901780715602332461), KQU( 9822227742154351098),
+ KQU( 1479332892928648780), KQU( 6981611948382474400),
+ KQU( 7620824924456077376), KQU(14095973329429406782),
+ KQU( 7902744005696185404), KQU(15830577219375036920),
+ KQU(10287076667317764416), KQU(12334872764071724025),
+ KQU( 4419302088133544331), KQU(14455842851266090520),
+ KQU(12488077416504654222), KQU( 7953892017701886766),
+ KQU( 6331484925529519007), KQU( 4902145853785030022),
+ KQU(17010159216096443073), KQU(11945354668653886087),
+ KQU(15112022728645230829), KQU(17363484484522986742),
+ KQU( 4423497825896692887), KQU( 8155489510809067471),
+ KQU( 258966605622576285), KQU( 5462958075742020534),
+ KQU( 6763710214913276228), KQU( 2368935183451109054),
+ KQU(14209506165246453811), KQU( 2646257040978514881),
+ KQU( 3776001911922207672), KQU( 1419304601390147631),
+ KQU(14987366598022458284), KQU( 3977770701065815721),
+ KQU( 730820417451838898), KQU( 3982991703612885327),
+ KQU( 2803544519671388477), KQU(17067667221114424649),
+ KQU( 2922555119737867166), KQU( 1989477584121460932),
+ KQU(15020387605892337354), KQU( 9293277796427533547),
+ KQU(10722181424063557247), KQU(16704542332047511651),
+ KQU( 5008286236142089514), KQU(16174732308747382540),
+ KQU(17597019485798338402), KQU(13081745199110622093),
+ KQU( 8850305883842258115), KQU(12723629125624589005),
+ KQU( 8140566453402805978), KQU(15356684607680935061),
+ KQU(14222190387342648650), KQU(11134610460665975178),
+ KQU( 1259799058620984266), KQU(13281656268025610041),
+ KQU( 298262561068153992), KQU(12277871700239212922),
+ KQU(13911297774719779438), KQU(16556727962761474934),
+ KQU(17903010316654728010), KQU( 9682617699648434744),
+ KQU(14757681836838592850), KQU( 1327242446558524473),
+ KQU(11126645098780572792), KQU( 1883602329313221774),
+ KQU( 2543897783922776873), KQU(15029168513767772842),
+ KQU(12710270651039129878), KQU(16118202956069604504),
+ KQU(15010759372168680524), KQU( 2296827082251923948),
+ KQU(10793729742623518101), KQU(13829764151845413046),
+ KQU(17769301223184451213), KQU( 3118268169210783372),
+ KQU(17626204544105123127), KQU( 7416718488974352644),
+ KQU(10450751996212925994), KQU( 9352529519128770586),
+ KQU( 259347569641110140), KQU( 8048588892269692697),
+ KQU( 1774414152306494058), KQU(10669548347214355622),
+ KQU(13061992253816795081), KQU(18432677803063861659),
+ KQU( 8879191055593984333), KQU(12433753195199268041),
+ KQU(14919392415439730602), KQU( 6612848378595332963),
+ KQU( 6320986812036143628), KQU(10465592420226092859),
+ KQU( 4196009278962570808), KQU( 3747816564473572224),
+ KQU(17941203486133732898), KQU( 2350310037040505198),
+ KQU( 5811779859134370113), KQU(10492109599506195126),
+ KQU( 7699650690179541274), KQU( 1954338494306022961),
+ KQU(14095816969027231152), KQU( 5841346919964852061),
+ KQU(14945969510148214735), KQU( 3680200305887550992),
+ KQU( 6218047466131695792), KQU( 8242165745175775096),
+ KQU(11021371934053307357), KQU( 1265099502753169797),
+ KQU( 4644347436111321718), KQU( 3609296916782832859),
+ KQU( 8109807992218521571), KQU(18387884215648662020),
+ KQU(14656324896296392902), KQU(17386819091238216751),
+ KQU(17788300878582317152), KQU( 7919446259742399591),
+ KQU( 4466613134576358004), KQU(12928181023667938509),
+ KQU(13147446154454932030), KQU(16552129038252734620),
+ KQU( 8395299403738822450), KQU(11313817655275361164),
+ KQU( 434258809499511718), KQU( 2074882104954788676),
+ KQU( 7929892178759395518), KQU( 9006461629105745388),
+ KQU( 5176475650000323086), KQU(11128357033468341069),
+ KQU(12026158851559118955), KQU(14699716249471156500),
+ KQU( 448982497120206757), KQU( 4156475356685519900),
+ KQU( 6063816103417215727), KQU(10073289387954971479),
+ KQU( 8174466846138590962), KQU( 2675777452363449006),
+ KQU( 9090685420572474281), KQU( 6659652652765562060),
+ KQU(12923120304018106621), KQU(11117480560334526775),
+ KQU( 937910473424587511), KQU( 1838692113502346645),
+ KQU(11133914074648726180), KQU( 7922600945143884053),
+ KQU(13435287702700959550), KQU( 5287964921251123332),
+ KQU(11354875374575318947), KQU(17955724760748238133),
+ KQU(13728617396297106512), KQU( 4107449660118101255),
+ KQU( 1210269794886589623), KQU(11408687205733456282),
+ KQU( 4538354710392677887), KQU(13566803319341319267),
+ KQU(17870798107734050771), KQU( 3354318982568089135),
+ KQU( 9034450839405133651), KQU(13087431795753424314),
+ KQU( 950333102820688239), KQU( 1968360654535604116),
+ KQU(16840551645563314995), KQU( 8867501803892924995),
+ KQU(11395388644490626845), KQU( 1529815836300732204),
+ KQU(13330848522996608842), KQU( 1813432878817504265),
+ KQU( 2336867432693429560), KQU(15192805445973385902),
+ KQU( 2528593071076407877), KQU( 128459777936689248),
+ KQU( 9976345382867214866), KQU( 6208885766767996043),
+ KQU(14982349522273141706), KQU( 3099654362410737822),
+ KQU(13776700761947297661), KQU( 8806185470684925550),
+ KQU( 8151717890410585321), KQU( 640860591588072925),
+ KQU(14592096303937307465), KQU( 9056472419613564846),
+ KQU(14861544647742266352), KQU(12703771500398470216),
+ KQU( 3142372800384138465), KQU( 6201105606917248196),
+ KQU(18337516409359270184), KQU(15042268695665115339),
+ KQU(15188246541383283846), KQU(12800028693090114519),
+ KQU( 5992859621101493472), KQU(18278043971816803521),
+ KQU( 9002773075219424560), KQU( 7325707116943598353),
+ KQU( 7930571931248040822), KQU( 5645275869617023448),
+ KQU( 7266107455295958487), KQU( 4363664528273524411),
+ KQU(14313875763787479809), KQU(17059695613553486802),
+ KQU( 9247761425889940932), KQU(13704726459237593128),
+ KQU( 2701312427328909832), KQU(17235532008287243115),
+ KQU(14093147761491729538), KQU( 6247352273768386516),
+ KQU( 8268710048153268415), KQU( 7985295214477182083),
+ KQU(15624495190888896807), KQU( 3772753430045262788),
+ KQU( 9133991620474991698), KQU( 5665791943316256028),
+ KQU( 7551996832462193473), KQU(13163729206798953877),
+ KQU( 9263532074153846374), KQU( 1015460703698618353),
+ KQU(17929874696989519390), KQU(18257884721466153847),
+ KQU(16271867543011222991), KQU( 3905971519021791941),
+ KQU(16814488397137052085), KQU( 1321197685504621613),
+ KQU( 2870359191894002181), KQU(14317282970323395450),
+ KQU(13663920845511074366), KQU( 2052463995796539594),
+ KQU(14126345686431444337), KQU( 1727572121947022534),
+ KQU(17793552254485594241), KQU( 6738857418849205750),
+ KQU( 1282987123157442952), KQU(16655480021581159251),
+ KQU( 6784587032080183866), KQU(14726758805359965162),
+ KQU( 7577995933961987349), KQU(12539609320311114036),
+ KQU(10789773033385439494), KQU( 8517001497411158227),
+ KQU(10075543932136339710), KQU(14838152340938811081),
+ KQU( 9560840631794044194), KQU(17445736541454117475),
+ KQU(10633026464336393186), KQU(15705729708242246293),
+ KQU( 1117517596891411098), KQU( 4305657943415886942),
+ KQU( 4948856840533979263), KQU(16071681989041789593),
+ KQU(13723031429272486527), KQU( 7639567622306509462),
+ KQU(12670424537483090390), KQU( 9715223453097197134),
+ KQU( 5457173389992686394), KQU( 289857129276135145),
+ KQU(17048610270521972512), KQU( 692768013309835485),
+ KQU(14823232360546632057), KQU(18218002361317895936),
+ KQU( 3281724260212650204), KQU(16453957266549513795),
+ KQU( 8592711109774511881), KQU( 929825123473369579),
+ KQU(15966784769764367791), KQU( 9627344291450607588),
+ KQU(10849555504977813287), KQU( 9234566913936339275),
+ KQU( 6413807690366911210), KQU(10862389016184219267),
+ KQU(13842504799335374048), KQU( 1531994113376881174),
+ KQU( 2081314867544364459), KQU(16430628791616959932),
+ KQU( 8314714038654394368), KQU( 9155473892098431813),
+ KQU(12577843786670475704), KQU( 4399161106452401017),
+ KQU( 1668083091682623186), KQU( 1741383777203714216),
+ KQU( 2162597285417794374), KQU(15841980159165218736),
+ KQU( 1971354603551467079), KQU( 1206714764913205968),
+ KQU( 4790860439591272330), KQU(14699375615594055799),
+ KQU( 8374423871657449988), KQU(10950685736472937738),
+ KQU( 697344331343267176), KQU(10084998763118059810),
+ KQU(12897369539795983124), KQU(12351260292144383605),
+ KQU( 1268810970176811234), KQU( 7406287800414582768),
+ KQU( 516169557043807831), KQU( 5077568278710520380),
+ KQU( 3828791738309039304), KQU( 7721974069946943610),
+ KQU( 3534670260981096460), KQU( 4865792189600584891),
+ KQU(16892578493734337298), KQU( 9161499464278042590),
+ KQU(11976149624067055931), KQU(13219479887277343990),
+ KQU(14161556738111500680), KQU(14670715255011223056),
+ KQU( 4671205678403576558), KQU(12633022931454259781),
+ KQU(14821376219869187646), KQU( 751181776484317028),
+ KQU( 2192211308839047070), KQU(11787306362361245189),
+ KQU(10672375120744095707), KQU( 4601972328345244467),
+ KQU(15457217788831125879), KQU( 8464345256775460809),
+ KQU(10191938789487159478), KQU( 6184348739615197613),
+ KQU(11425436778806882100), KQU( 2739227089124319793),
+ KQU( 461464518456000551), KQU( 4689850170029177442),
+ KQU( 6120307814374078625), KQU(11153579230681708671),
+ KQU( 7891721473905347926), KQU(10281646937824872400),
+ KQU( 3026099648191332248), KQU( 8666750296953273818),
+ KQU(14978499698844363232), KQU(13303395102890132065),
+ KQU( 8182358205292864080), KQU(10560547713972971291),
+ KQU(11981635489418959093), KQU( 3134621354935288409),
+ KQU(11580681977404383968), KQU(14205530317404088650),
+ KQU( 5997789011854923157), KQU(13659151593432238041),
+ KQU(11664332114338865086), KQU( 7490351383220929386),
+ KQU( 7189290499881530378), KQU(15039262734271020220),
+ KQU( 2057217285976980055), KQU( 555570804905355739),
+ KQU(11235311968348555110), KQU(13824557146269603217),
+ KQU(16906788840653099693), KQU( 7222878245455661677),
+ KQU( 5245139444332423756), KQU( 4723748462805674292),
+ KQU(12216509815698568612), KQU(17402362976648951187),
+ KQU(17389614836810366768), KQU( 4880936484146667711),
+ KQU( 9085007839292639880), KQU(13837353458498535449),
+ KQU(11914419854360366677), KQU(16595890135313864103),
+ KQU( 6313969847197627222), KQU(18296909792163910431),
+ KQU(10041780113382084042), KQU( 2499478551172884794),
+ KQU(11057894246241189489), KQU( 9742243032389068555),
+ KQU(12838934582673196228), KQU(13437023235248490367),
+ KQU(13372420669446163240), KQU( 6752564244716909224),
+ KQU( 7157333073400313737), KQU(12230281516370654308),
+ KQU( 1182884552219419117), KQU( 2955125381312499218),
+ KQU(10308827097079443249), KQU( 1337648572986534958),
+ KQU(16378788590020343939), KQU( 108619126514420935),
+ KQU( 3990981009621629188), KQU( 5460953070230946410),
+ KQU( 9703328329366531883), KQU(13166631489188077236),
+ KQU( 1104768831213675170), KQU( 3447930458553877908),
+ KQU( 8067172487769945676), KQU( 5445802098190775347),
+ KQU( 3244840981648973873), KQU(17314668322981950060),
+ KQU( 5006812527827763807), KQU(18158695070225526260),
+ KQU( 2824536478852417853), KQU(13974775809127519886),
+ KQU( 9814362769074067392), KQU(17276205156374862128),
+ KQU(11361680725379306967), KQU( 3422581970382012542),
+ KQU(11003189603753241266), KQU(11194292945277862261),
+ KQU( 6839623313908521348), KQU(11935326462707324634),
+ KQU( 1611456788685878444), KQU(13112620989475558907),
+ KQU( 517659108904450427), KQU(13558114318574407624),
+ KQU(15699089742731633077), KQU( 4988979278862685458),
+ KQU( 8111373583056521297), KQU( 3891258746615399627),
+ KQU( 8137298251469718086), KQU(12748663295624701649),
+ KQU( 4389835683495292062), KQU( 5775217872128831729),
+ KQU( 9462091896405534927), KQU( 8498124108820263989),
+ KQU( 8059131278842839525), KQU(10503167994254090892),
+ KQU(11613153541070396656), KQU(18069248738504647790),
+ KQU( 570657419109768508), KQU( 3950574167771159665),
+ KQU( 5514655599604313077), KQU( 2908460854428484165),
+ KQU(10777722615935663114), KQU(12007363304839279486),
+ KQU( 9800646187569484767), KQU( 8795423564889864287),
+ KQU(14257396680131028419), KQU( 6405465117315096498),
+ KQU( 7939411072208774878), KQU(17577572378528990006),
+ KQU(14785873806715994850), KQU(16770572680854747390),
+ KQU(18127549474419396481), KQU(11637013449455757750),
+ KQU(14371851933996761086), KQU( 3601181063650110280),
+ KQU( 4126442845019316144), KQU(10198287239244320669),
+ KQU(18000169628555379659), KQU(18392482400739978269),
+ KQU( 6219919037686919957), KQU( 3610085377719446052),
+ KQU( 2513925039981776336), KQU(16679413537926716955),
+ KQU(12903302131714909434), KQU( 5581145789762985009),
+ KQU(12325955044293303233), KQU(17216111180742141204),
+ KQU( 6321919595276545740), KQU( 3507521147216174501),
+ KQU( 9659194593319481840), KQU(11473976005975358326),
+ KQU(14742730101435987026), KQU( 492845897709954780),
+ KQU(16976371186162599676), KQU(17712703422837648655),
+ KQU( 9881254778587061697), KQU( 8413223156302299551),
+ KQU( 1563841828254089168), KQU( 9996032758786671975),
+ KQU( 138877700583772667), KQU(13003043368574995989),
+ KQU( 4390573668650456587), KQU( 8610287390568126755),
+ KQU(15126904974266642199), KQU( 6703637238986057662),
+ KQU( 2873075592956810157), KQU( 6035080933946049418),
+ KQU(13382846581202353014), KQU( 7303971031814642463),
+ KQU(18418024405307444267), KQU( 5847096731675404647),
+ KQU( 4035880699639842500), KQU(11525348625112218478),
+ KQU( 3041162365459574102), KQU( 2604734487727986558),
+ KQU(15526341771636983145), KQU(14556052310697370254),
+ KQU(12997787077930808155), KQU( 9601806501755554499),
+ KQU(11349677952521423389), KQU(14956777807644899350),
+ KQU(16559736957742852721), KQU(12360828274778140726),
+ KQU( 6685373272009662513), KQU(16932258748055324130),
+ KQU(15918051131954158508), KQU( 1692312913140790144),
+ KQU( 546653826801637367), KQU( 5341587076045986652),
+ KQU(14975057236342585662), KQU(12374976357340622412),
+ KQU(10328833995181940552), KQU(12831807101710443149),
+ KQU(10548514914382545716), KQU( 2217806727199715993),
+ KQU(12627067369242845138), KQU( 4598965364035438158),
+ KQU( 150923352751318171), KQU(14274109544442257283),
+ KQU( 4696661475093863031), KQU( 1505764114384654516),
+ KQU(10699185831891495147), KQU( 2392353847713620519),
+ KQU( 3652870166711788383), KQU( 8640653276221911108),
+ KQU( 3894077592275889704), KQU( 4918592872135964845),
+ KQU(16379121273281400789), KQU(12058465483591683656),
+ KQU(11250106829302924945), KQU( 1147537556296983005),
+ KQU( 6376342756004613268), KQU(14967128191709280506),
+ KQU(18007449949790627628), KQU( 9497178279316537841),
+ KQU( 7920174844809394893), KQU(10037752595255719907),
+ KQU(15875342784985217697), KQU(15311615921712850696),
+ KQU( 9552902652110992950), KQU(14054979450099721140),
+ KQU( 5998709773566417349), KQU(18027910339276320187),
+ KQU( 8223099053868585554), KQU( 7842270354824999767),
+ KQU( 4896315688770080292), KQU(12969320296569787895),
+ KQU( 2674321489185759961), KQU( 4053615936864718439),
+ KQU(11349775270588617578), KQU( 4743019256284553975),
+ KQU( 5602100217469723769), KQU(14398995691411527813),
+ KQU( 7412170493796825470), KQU( 836262406131744846),
+ KQU( 8231086633845153022), KQU( 5161377920438552287),
+ KQU( 8828731196169924949), KQU(16211142246465502680),
+ KQU( 3307990879253687818), KQU( 5193405406899782022),
+ KQU( 8510842117467566693), KQU( 6070955181022405365),
+ KQU(14482950231361409799), KQU(12585159371331138077),
+ KQU( 3511537678933588148), KQU( 2041849474531116417),
+ KQU(10944936685095345792), KQU(18303116923079107729),
+ KQU( 2720566371239725320), KQU( 4958672473562397622),
+ KQU( 3032326668253243412), KQU(13689418691726908338),
+ KQU( 1895205511728843996), KQU( 8146303515271990527),
+ KQU(16507343500056113480), KQU( 473996939105902919),
+ KQU( 9897686885246881481), KQU(14606433762712790575),
+ KQU( 6732796251605566368), KQU( 1399778120855368916),
+ KQU( 935023885182833777), KQU(16066282816186753477),
+ KQU( 7291270991820612055), KQU(17530230393129853844),
+ KQU(10223493623477451366), KQU(15841725630495676683),
+ KQU(17379567246435515824), KQU( 8588251429375561971),
+ KQU(18339511210887206423), KQU(17349587430725976100),
+ KQU(12244876521394838088), KQU( 6382187714147161259),
+ KQU(12335807181848950831), KQU(16948885622305460665),
+ KQU(13755097796371520506), KQU(14806740373324947801),
+ KQU( 4828699633859287703), KQU( 8209879281452301604),
+ KQU(12435716669553736437), KQU(13970976859588452131),
+ KQU( 6233960842566773148), KQU(12507096267900505759),
+ KQU( 1198713114381279421), KQU(14989862731124149015),
+ KQU(15932189508707978949), KQU( 2526406641432708722),
+ KQU( 29187427817271982), KQU( 1499802773054556353),
+ KQU(10816638187021897173), KQU( 5436139270839738132),
+ KQU( 6659882287036010082), KQU( 2154048955317173697),
+ KQU(10887317019333757642), KQU(16281091802634424955),
+ KQU(10754549879915384901), KQU(10760611745769249815),
+ KQU( 2161505946972504002), KQU( 5243132808986265107),
+ KQU(10129852179873415416), KQU( 710339480008649081),
+ KQU( 7802129453068808528), KQU(17967213567178907213),
+ KQU(15730859124668605599), KQU(13058356168962376502),
+ KQU( 3701224985413645909), KQU(14464065869149109264),
+ KQU( 9959272418844311646), KQU(10157426099515958752),
+ KQU(14013736814538268528), KQU(17797456992065653951),
+ KQU(17418878140257344806), KQU(15457429073540561521),
+ KQU( 2184426881360949378), KQU( 2062193041154712416),
+ KQU( 8553463347406931661), KQU( 4913057625202871854),
+ KQU( 2668943682126618425), KQU(17064444737891172288),
+ KQU( 4997115903913298637), KQU(12019402608892327416),
+ KQU(17603584559765897352), KQU(11367529582073647975),
+ KQU( 8211476043518436050), KQU( 8676849804070323674),
+ KQU(18431829230394475730), KQU(10490177861361247904),
+ KQU( 9508720602025651349), KQU( 7409627448555722700),
+ KQU( 5804047018862729008), KQU(11943858176893142594),
+ KQU(11908095418933847092), KQU( 5415449345715887652),
+ KQU( 1554022699166156407), KQU( 9073322106406017161),
+ KQU( 7080630967969047082), KQU(18049736940860732943),
+ KQU(12748714242594196794), KQU( 1226992415735156741),
+ KQU(17900981019609531193), KQU(11720739744008710999),
+ KQU( 3006400683394775434), KQU(11347974011751996028),
+ KQU( 3316999628257954608), KQU( 8384484563557639101),
+ KQU(18117794685961729767), KQU( 1900145025596618194),
+ KQU(17459527840632892676), KQU( 5634784101865710994),
+ KQU( 7918619300292897158), KQU( 3146577625026301350),
+ KQU( 9955212856499068767), KQU( 1873995843681746975),
+ KQU( 1561487759967972194), KQU( 8322718804375878474),
+ KQU(11300284215327028366), KQU( 4667391032508998982),
+ KQU( 9820104494306625580), KQU(17922397968599970610),
+ KQU( 1784690461886786712), KQU(14940365084341346821),
+ KQU( 5348719575594186181), KQU(10720419084507855261),
+ KQU(14210394354145143274), KQU( 2426468692164000131),
+ KQU(16271062114607059202), KQU(14851904092357070247),
+ KQU( 6524493015693121897), KQU( 9825473835127138531),
+ KQU(14222500616268569578), KQU(15521484052007487468),
+ KQU(14462579404124614699), KQU(11012375590820665520),
+ KQU(11625327350536084927), KQU(14452017765243785417),
+ KQU( 9989342263518766305), KQU( 3640105471101803790),
+ KQU( 4749866455897513242), KQU(13963064946736312044),
+ KQU(10007416591973223791), KQU(18314132234717431115),
+ KQU( 3286596588617483450), KQU( 7726163455370818765),
+ KQU( 7575454721115379328), KQU( 5308331576437663422),
+ KQU(18288821894903530934), KQU( 8028405805410554106),
+ KQU(15744019832103296628), KQU( 149765559630932100),
+ KQU( 6137705557200071977), KQU(14513416315434803615),
+ KQU(11665702820128984473), KQU( 218926670505601386),
+ KQU( 6868675028717769519), KQU(15282016569441512302),
+ KQU( 5707000497782960236), KQU( 6671120586555079567),
+ KQU( 2194098052618985448), KQU(16849577895477330978),
+ KQU(12957148471017466283), KQU( 1997805535404859393),
+ KQU( 1180721060263860490), KQU(13206391310193756958),
+ KQU(12980208674461861797), KQU( 3825967775058875366),
+ KQU(17543433670782042631), KQU( 1518339070120322730),
+ KQU(16344584340890991669), KQU( 2611327165318529819),
+ KQU(11265022723283422529), KQU( 4001552800373196817),
+ KQU(14509595890079346161), KQU( 3528717165416234562),
+ KQU(18153222571501914072), KQU( 9387182977209744425),
+ KQU(10064342315985580021), KQU(11373678413215253977),
+ KQU( 2308457853228798099), KQU( 9729042942839545302),
+ KQU( 7833785471140127746), KQU( 6351049900319844436),
+ KQU(14454610627133496067), KQU(12533175683634819111),
+ KQU(15570163926716513029), KQU(13356980519185762498)
+};
+
+TEST_BEGIN(test_gen_rand_32)
+{
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_gen_rand(1234);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(1234);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_32)
+{
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_by_array(ini, 4);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 4);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_by_array_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_gen_rand_64)
+{
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_gen_rand(4321);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(4321);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_64)
+{
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ uint32_t ini[] = {5, 4, 3, 2, 1};
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_by_array(ini, 5);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 5);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_by_array_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_gen_rand_32,
+ test_by_array_32,
+ test_gen_rand_64,
+ test_by_array_64));
+}
diff --git a/memory/jemalloc/src/test/unit/a0.c b/memory/jemalloc/src/test/unit/a0.c
new file mode 100644
index 000000000..b9ba45a3d
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/a0.c
@@ -0,0 +1,19 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_a0)
+{
+ void *p;
+
+ p = a0malloc(1);
+ assert_ptr_not_null(p, "Unexpected a0malloc() error");
+ a0dalloc(p);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test_no_malloc_init(
+ test_a0));
+}
diff --git a/memory/jemalloc/src/test/unit/arena_reset.c b/memory/jemalloc/src/test/unit/arena_reset.c
new file mode 100644
index 000000000..8ba36c21f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/arena_reset.c
@@ -0,0 +1,159 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf = "prof:true,lg_prof_sample:0";
+#endif
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nsmall(void)
+{
+
+ return (get_nsizes_impl("arenas.nbins"));
+}
+
+static unsigned
+get_nlarge(void)
+{
+
+ return (get_nsizes_impl("arenas.nlruns"));
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_small_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.bin.0.size", ind));
+}
+
+static size_t
+get_large_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.lrun.0.size", ind));
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_arena_reset)
+{
+#define NHUGE 4
+ unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
+ size_t sz, miblen;
+ void **ptrs;
+ int flags;
+ size_t mib[3];
+ tsdn_t *tsdn;
+
+ test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill
+ && unlikely(opt_quarantine)));
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ nsmall = get_nsmall();
+ nlarge = get_nlarge();
+ nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
+ nptrs = nsmall + nlarge + nhuge;
+ ptrs = (void **)malloc(nptrs * sizeof(void *));
+ assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
+
+ /* Allocate objects with a wide range of sizes. */
+ for (i = 0; i < nsmall; i++) {
+ sz = get_small_size(i);
+ ptrs[i] = mallocx(sz, flags);
+ assert_ptr_not_null(ptrs[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+ for (i = 0; i < nlarge; i++) {
+ sz = get_large_size(i);
+ ptrs[nsmall + i] = mallocx(sz, flags);
+ assert_ptr_not_null(ptrs[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+ for (i = 0; i < nhuge; i++) {
+ sz = get_huge_size(i);
+ ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
+ assert_ptr_not_null(ptrs[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+
+ tsdn = tsdn_fetch();
+
+ /* Verify allocations. */
+ for (i = 0; i < nptrs; i++) {
+ assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
+ "Allocation should have queryable size");
+ }
+
+ /* Reset. */
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+
+ /* Verify allocations no longer exist. */
+ for (i = 0; i < nptrs; i++) {
+ assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0,
+ "Allocation should no longer exist");
+ }
+
+ free(ptrs);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_arena_reset));
+}
diff --git a/memory/jemalloc/src/test/unit/atomic.c b/memory/jemalloc/src/test/unit/atomic.c
new file mode 100644
index 000000000..bdd74f659
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/atomic.c
@@ -0,0 +1,122 @@
+#include "test/jemalloc_test.h"
+
+#define TEST_STRUCT(p, t) \
+struct p##_test_s { \
+ t accum0; \
+ t x; \
+ t s; \
+}; \
+typedef struct p##_test_s p##_test_t;
+
+#define TEST_BODY(p, t, tc, ta, FMT) do { \
+ const p##_test_t tests[] = { \
+ {(t)-1, (t)-1, (t)-2}, \
+ {(t)-1, (t) 0, (t)-2}, \
+ {(t)-1, (t) 1, (t)-2}, \
+ \
+ {(t) 0, (t)-1, (t)-2}, \
+ {(t) 0, (t) 0, (t)-2}, \
+ {(t) 0, (t) 1, (t)-2}, \
+ \
+ {(t) 1, (t)-1, (t)-2}, \
+ {(t) 1, (t) 0, (t)-2}, \
+ {(t) 1, (t) 1, (t)-2}, \
+ \
+ {(t)0, (t)-(1 << 22), (t)-2}, \
+ {(t)0, (t)(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)(1 << 22), (t)-2} \
+ }; \
+ unsigned i; \
+ \
+ for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \
+ bool err; \
+ t accum = tests[i].accum0; \
+ assert_##ta##_eq(atomic_read_##p(&accum), \
+ tests[i].accum0, \
+ "Erroneous read, i=%u", i); \
+ \
+ assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \
+ (t)((tc)tests[i].accum0 + (tc)tests[i].x), \
+ "i=%u, accum=%"FMT", x=%"FMT, \
+ i, tests[i].accum0, tests[i].x); \
+ assert_##ta##_eq(atomic_read_##p(&accum), accum, \
+ "Erroneous add, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \
+ (t)((tc)tests[i].accum0 - (tc)tests[i].x), \
+ "i=%u, accum=%"FMT", x=%"FMT, \
+ i, tests[i].accum0, tests[i].x); \
+ assert_##ta##_eq(atomic_read_##p(&accum), accum, \
+ "Erroneous sub, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \
+ assert_b_eq(err, tests[i].accum0 != tests[i].x, \
+ "Erroneous cas success/failure result"); \
+ assert_##ta##_eq(accum, err ? tests[i].accum0 : \
+ tests[i].s, "Erroneous cas effect, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ atomic_write_##p(&accum, tests[i].s); \
+ assert_##ta##_eq(accum, tests[i].s, \
+ "Erroneous write, i=%u", i); \
+ } \
+} while (0)
+
+TEST_STRUCT(uint64, uint64_t)
+TEST_BEGIN(test_atomic_uint64)
+{
+
+#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+ test_skip("64-bit atomic operations not supported");
+#else
+ TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64);
+#endif
+}
+TEST_END
+
+TEST_STRUCT(uint32, uint32_t)
+TEST_BEGIN(test_atomic_uint32)
+{
+
+ TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32);
+}
+TEST_END
+
+TEST_STRUCT(p, void *)
+TEST_BEGIN(test_atomic_p)
+{
+
+ TEST_BODY(p, void *, uintptr_t, ptr, "p");
+}
+TEST_END
+
+TEST_STRUCT(z, size_t)
+TEST_BEGIN(test_atomic_z)
+{
+
+ TEST_BODY(z, size_t, size_t, zu, "#zx");
+}
+TEST_END
+
+TEST_STRUCT(u, unsigned)
+TEST_BEGIN(test_atomic_u)
+{
+
+ TEST_BODY(u, unsigned, unsigned, u, "#x");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_atomic_uint64,
+ test_atomic_uint32,
+ test_atomic_p,
+ test_atomic_z,
+ test_atomic_u));
+}
diff --git a/memory/jemalloc/src/test/unit/bitmap.c b/memory/jemalloc/src/test/unit/bitmap.c
new file mode 100644
index 000000000..a2dd54630
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/bitmap.c
@@ -0,0 +1,163 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_bitmap_size)
+{
+ size_t i, prev_size;
+
+ prev_size = 0;
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
+ bitmap_info_t binfo;
+ size_t size;
+
+ bitmap_info_init(&binfo, i);
+ size = bitmap_size(&binfo);
+ assert_true(size >= prev_size,
+ "Bitmap size is smaller than expected");
+ prev_size = size;
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_bitmap_init)
+{
+ size_t i;
+
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, i);
+ {
+ size_t j;
+ bitmap_t *bitmap = (bitmap_t *)malloc(
+ bitmap_size(&binfo));
+ bitmap_init(bitmap, &binfo);
+
+ for (j = 0; j < i; j++) {
+ assert_false(bitmap_get(bitmap, &binfo, j),
+ "Bit should be unset");
+ }
+ free(bitmap);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_bitmap_set)
+{
+ size_t i;
+
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, i);
+ {
+ size_t j;
+ bitmap_t *bitmap = (bitmap_t *)malloc(
+ bitmap_size(&binfo));
+ bitmap_init(bitmap, &binfo);
+
+ for (j = 0; j < i; j++)
+ bitmap_set(bitmap, &binfo, j);
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
+ free(bitmap);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_bitmap_unset)
+{
+ size_t i;
+
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, i);
+ {
+ size_t j;
+ bitmap_t *bitmap = (bitmap_t *)malloc(
+ bitmap_size(&binfo));
+ bitmap_init(bitmap, &binfo);
+
+ for (j = 0; j < i; j++)
+ bitmap_set(bitmap, &binfo, j);
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
+ for (j = 0; j < i; j++)
+ bitmap_unset(bitmap, &binfo, j);
+ for (j = 0; j < i; j++)
+ bitmap_set(bitmap, &binfo, j);
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
+ free(bitmap);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_bitmap_sfu)
+{
+ size_t i;
+
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, i);
+ {
+ size_t j;
+ bitmap_t *bitmap = (bitmap_t *)malloc(
+ bitmap_size(&binfo));
+ bitmap_init(bitmap, &binfo);
+
+ /* Iteratively set bits starting at the beginning. */
+ for (j = 0; j < i; j++) {
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should be just after "
+ "previous first unset bit");
+ }
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
+
+ /*
+ * Iteratively unset bits starting at the end, and
+ * verify that bitmap_sfu() reaches the unset bits.
+ */
+ for (j = i - 1; j < i; j--) { /* (i..0] */
+ bitmap_unset(bitmap, &binfo, j);
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should the bit previously "
+ "unset");
+ bitmap_unset(bitmap, &binfo, j);
+ }
+ assert_false(bitmap_get(bitmap, &binfo, 0),
+ "Bit should be unset");
+
+ /*
+ * Iteratively set bits starting at the beginning, and
+ * verify that bitmap_sfu() looks past them.
+ */
+ for (j = 1; j < i; j++) {
+ bitmap_set(bitmap, &binfo, j - 1);
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should be just after the "
+ "bit previously set");
+ bitmap_unset(bitmap, &binfo, j);
+ }
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
+ "First unset bit should be the last bit");
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
+ free(bitmap);
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_bitmap_size,
+ test_bitmap_init,
+ test_bitmap_set,
+ test_bitmap_unset,
+ test_bitmap_sfu));
+}
diff --git a/memory/jemalloc/src/test/unit/ckh.c b/memory/jemalloc/src/test/unit/ckh.c
new file mode 100644
index 000000000..2cbc22688
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/ckh.c
@@ -0,0 +1,214 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_new_delete)
+{
+ tsd_t *tsd;
+ ckh_t ckh;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+
+ assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_count_insert_search_remove)
+{
+ tsd_t *tsd;
+ ckh_t ckh;
+ const char *strs[] = {
+ "a string",
+ "A string",
+ "a string.",
+ "A string."
+ };
+ const char *missing = "A string not in the hash table.";
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu", ZU(0),
+ ckh_count(&ckh));
+
+ /* Insert. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ ckh_insert(tsd, &ckh, strs[i], strs[i]);
+ assert_zu_eq(ckh_count(&ckh), i+1,
+ "ckh_count() should return %zu, but it returned %zu", i+1,
+ ckh_count(&ckh));
+ }
+
+ /* Search. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_search(&ckh, strs[i], kp, vp),
+ "Unexpected ckh_search() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ }
+ assert_true(ckh_search(&ckh, missing, NULL, NULL),
+ "Unexpected ckh_search() success");
+
+ /* Remove. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
+ "Unexpected ckh_remove() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ assert_zu_eq(ckh_count(&ckh),
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ "ckh_count() should return %zu, but it returned %zu",
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ ckh_count(&ckh));
+ }
+
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_insert_iter_remove)
+{
+#define NITEMS ZU(1000)
+ tsd_t *tsd;
+ ckh_t ckh;
+ void **p[NITEMS];
+ void *q, *r;
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+
+ for (i = 0; i < NITEMS; i++) {
+ p[i] = mallocx(i+1, 0);
+ assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ size_t j;
+
+ for (j = i; j < NITEMS; j++) {
+ assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
+ "Unexpected ckh_insert() failure");
+ assert_false(ckh_search(&ckh, p[j], &q, &r),
+ "Unexpected ckh_search() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ }
+
+ assert_zu_eq(ckh_count(&ckh), NITEMS,
+ "ckh_count() should return %zu, but it returned %zu",
+ NITEMS, ckh_count(&ckh));
+
+ for (j = i + 1; j < NITEMS; j++) {
+ assert_false(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() success");
+ }
+
+ {
+ bool seen[NITEMS];
+ size_t tabind;
+
+ memset(seen, 0, sizeof(seen));
+
+ for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
+ size_t k;
+
+ assert_ptr_eq(q, r, "Key and val not equal");
+
+ for (k = 0; k < NITEMS; k++) {
+ if (p[k] == q) {
+ assert_false(seen[k],
+ "Item %zu already seen", k);
+ seen[k] = true;
+ break;
+ }
+ }
+ }
+
+ for (j = 0; j < i + 1; j++)
+ assert_true(seen[j], "Item %zu not seen", j);
+ for (; j < NITEMS; j++)
+ assert_false(seen[j], "Item %zu seen", j);
+ }
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ assert_false(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[i], q, "Key pointer mismatch");
+ assert_ptr_eq(p[i], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() success");
+ dallocx(p[i], 0);
+ }
+
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu",
+ ZU(0), ckh_count(&ckh));
+ ckh_delete(tsd, &ckh);
+#undef NITEMS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_new_delete,
+ test_count_insert_search_remove,
+ test_insert_iter_remove));
+}
diff --git a/memory/jemalloc/src/test/unit/decay.c b/memory/jemalloc/src/test/unit/decay.c
new file mode 100644
index 000000000..e169ae24e
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/decay.c
@@ -0,0 +1,374 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "purge:decay,decay_time:1";
+
+static nstime_monotonic_t *nstime_monotonic_orig;
+static nstime_update_t *nstime_update_orig;
+
+static unsigned nupdates_mock;
+static nstime_t time_mock;
+static bool monotonic_mock;
+
+static bool
+nstime_monotonic_mock(void)
+{
+
+ return (monotonic_mock);
+}
+
+static bool
+nstime_update_mock(nstime_t *time)
+{
+
+ nupdates_mock++;
+ if (monotonic_mock)
+ nstime_copy(time, &time_mock);
+ return (!monotonic_mock);
+}
+
+TEST_BEGIN(test_decay_ticks)
+{
+ ticker_t *decay_ticker;
+ unsigned tick0, tick1;
+ size_t sz, huge0, large0;
+ void *p;
+
+ test_skip_if(opt_purge != purge_mode_decay);
+
+ decay_ticker = decay_ticker_get(tsd_fetch(), 0);
+ assert_ptr_not_null(decay_ticker,
+ "Unexpected failure getting decay ticker");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ /*
+ * Test the standard APIs using a huge size class, since we can't
+ * control tcache interactions (except by completely disabling tcache
+ * for the entire test program).
+ */
+
+ /* malloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = malloc(huge0);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+ /* free(). */
+ tick0 = ticker_read(decay_ticker);
+ free(p);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+ /* calloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = calloc(1, huge0);
+ assert_ptr_not_null(p, "Unexpected calloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+ free(p);
+
+ /* posix_memalign(). */
+ tick0 = ticker_read(decay_ticker);
+ assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
+ "Unexpected posix_memalign() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during posix_memalign()");
+ free(p);
+
+ /* aligned_alloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = aligned_alloc(sizeof(size_t), huge0);
+ assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during aligned_alloc()");
+ free(p);
+
+ /* realloc(). */
+ /* Allocate. */
+ tick0 = ticker_read(decay_ticker);
+ p = realloc(NULL, huge0);
+ assert_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Reallocate. */
+ tick0 = ticker_read(decay_ticker);
+ p = realloc(p, huge0);
+ assert_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Deallocate. */
+ tick0 = ticker_read(decay_ticker);
+ realloc(p, 0);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+ /*
+ * Test the *allocx() APIs using huge, large, and small size classes,
+ * with tcache explicitly disabled.
+ */
+ {
+ unsigned i;
+ size_t allocx_sizes[3];
+ allocx_sizes[0] = huge0;
+ allocx_sizes[1] = large0;
+ allocx_sizes[2] = 1;
+
+ for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
+ sz = allocx_sizes[i];
+
+ /* mallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during mallocx() (sz=%zu)",
+ sz);
+ /* rallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during rallocx() (sz=%zu)",
+ sz);
+ /* xallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during xallocx() (sz=%zu)",
+ sz);
+ /* dallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ dallocx(p, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during dallocx() (sz=%zu)",
+ sz);
+ /* sdallocx(). */
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick0 = ticker_read(decay_ticker);
+ sdallocx(p, sz, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during sdallocx() "
+ "(sz=%zu)", sz);
+ }
+ }
+
+ /*
+ * Test tcache fill/flush interactions for large and small size classes,
+ * using an explicit tcache.
+ */
+ if (config_tcache) {
+ unsigned tcache_ind, i;
+ size_t tcache_sizes[2];
+ tcache_sizes[0] = large0;
+ tcache_sizes[1] = 1;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure");
+
+ for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
+ sz = tcache_sizes[i];
+
+ /* tcache fill. */
+ tick0 = ticker_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache fill "
+ "(sz=%zu)", sz);
+ /* tcache flush. */
+ dallocx(p, MALLOCX_TCACHE(tcache_ind));
+ tick0 = ticker_read(decay_ticker);
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL,
+ &tcache_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache flush "
+ "(sz=%zu)", sz);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_decay_ticker)
+{
+#define NPS 1024
+ int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ uint64_t epoch;
+ uint64_t npurge0 = 0;
+ uint64_t npurge1 = 0;
+ size_t sz, large;
+ unsigned i, nupdates0;
+ nstime_t time, decay_time, deadline;
+
+ test_skip_if(opt_purge != purge_mode_decay);
+
+ /*
+ * Allocate a bunch of large objects, pause the clock, deallocate the
+ * objects, restore the clock, then [md]allocx() in a tight loop to
+ * verify the ticker triggers purging.
+ */
+
+ if (config_tcache) {
+ size_t tcache_max;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ large = nallocx(tcache_max + 1, flags);
+ } else {
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure");
+ }
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+ "Unexpected mallctl failure");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
+ config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+ for (i = 0; i < NPS; i++) {
+ ps[i] = mallocx(large, flags);
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+ }
+
+ nupdates_mock = 0;
+ nstime_init(&time_mock, 0);
+ nstime_update(&time_mock);
+ monotonic_mock = true;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (i = 0; i < NPS; i++) {
+ dallocx(ps[i], flags);
+ nupdates0 = nupdates_mock;
+ assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.decay failure");
+ assert_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ nstime_init2(&decay_time, opt_decay_time, 0);
+ nstime_copy(&deadline, &time);
+ nstime_add(&deadline, &decay_time);
+ do {
+ for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
+ void *p = mallocx(1, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, flags);
+ }
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch,
+ sizeof(uint64_t)), 0, "Unexpected mallctl failure");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz,
+ NULL, 0), config_stats ? 0 : ENOENT,
+ "Unexpected mallctl result");
+
+ nstime_update(&time);
+ } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
+
+ if (config_stats)
+ assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic)
+{
+#define NPS (SMOOTHSTEP_NSTEPS + 1)
+ int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ uint64_t epoch;
+ uint64_t npurge0 = 0;
+ uint64_t npurge1 = 0;
+ size_t sz, large0;
+ unsigned i, nupdates0;
+
+ test_skip_if(opt_purge != purge_mode_decay);
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+ "Unexpected mallctl failure");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
+ config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+ nupdates_mock = 0;
+ nstime_init(&time_mock, 0);
+ nstime_update(&time_mock);
+ monotonic_mock = false;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (i = 0; i < NPS; i++) {
+ ps[i] = mallocx(large0, flags);
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NPS; i++) {
+ dallocx(ps[i], flags);
+ nupdates0 = nupdates_mock;
+ assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.decay failure");
+ assert_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
+ "Unexpected mallctl failure");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0),
+ config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+
+ if (config_stats)
+ assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_decay_ticks,
+ test_decay_ticker,
+ test_decay_nonmonotonic));
+}
diff --git a/memory/jemalloc/src/test/unit/fork.c b/memory/jemalloc/src/test/unit/fork.c
new file mode 100644
index 000000000..c530797c4
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/fork.c
@@ -0,0 +1,64 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _WIN32
+#include <sys/wait.h>
+#endif
+
+TEST_BEGIN(test_fork)
+{
+#ifndef _WIN32
+ void *p;
+ pid_t pid;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ pid = fork();
+
+ free(p);
+
+ p = malloc(64);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+ free(p);
+
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure");
+ } else if (pid == 0) {
+ /* Child. */
+ _exit(0);
+ } else {
+ int status;
+
+ /* Parent. */
+ while (true) {
+ if (waitpid(pid, &status, 0) == -1)
+ test_fail("Unexpected waitpid() failure");
+ if (WIFSIGNALED(status)) {
+ test_fail("Unexpected child termination due to "
+ "signal %d", WTERMSIG(status));
+ break;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) != 0) {
+ test_fail(
+ "Unexpected child exit value %d",
+ WEXITSTATUS(status));
+ }
+ break;
+ }
+ }
+ }
+#else
+ test_skip("fork(2) is irrelevant to Windows");
+#endif
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_fork));
+}
diff --git a/memory/jemalloc/src/test/unit/hash.c b/memory/jemalloc/src/test/unit/hash.c
new file mode 100644
index 000000000..010c9d76f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/hash.c
@@ -0,0 +1,185 @@
+/*
+ * This file is based on code that is part of SMHasher
+ * (https://code.google.com/p/smhasher/), and is subject to the MIT license
+ * (http://www.opensource.org/licenses/mit-license.php). Both email addresses
+ * associated with the source code's revision history belong to Austin Appleby,
+ * and the revision history ranges from 2010 to 2012. Therefore the copyright
+ * and license are here taken to be:
+ *
+ * Copyright (c) 2010-2012 Austin Appleby
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "test/jemalloc_test.h"
+
+typedef enum {
+ hash_variant_x86_32,
+ hash_variant_x86_128,
+ hash_variant_x64_128
+} hash_variant_t;
+
+static int
+hash_variant_bits(hash_variant_t variant)
+{
+
+ switch (variant) {
+ case hash_variant_x86_32: return (32);
+ case hash_variant_x86_128: return (128);
+ case hash_variant_x64_128: return (128);
+ default: not_reached();
+ }
+}
+
+static const char *
+hash_variant_string(hash_variant_t variant)
+{
+
+ switch (variant) {
+ case hash_variant_x86_32: return ("hash_x86_32");
+ case hash_variant_x86_128: return ("hash_x86_128");
+ case hash_variant_x64_128: return ("hash_x64_128");
+ default: not_reached();
+ }
+}
+
+#define KEY_SIZE 256
+static void
+hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
+{
+ const int hashbytes = hash_variant_bits(variant) / 8;
+ const int hashes_size = hashbytes * 256;
+ VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
+ VARIABLE_ARRAY(uint8_t, final, hashbytes);
+ unsigned i;
+ uint32_t computed, expected;
+
+ memset(key, 0, KEY_SIZE);
+ memset(hashes, 0, hashes_size);
+ memset(final, 0, hashbytes);
+
+ /*
+ * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
+ * seed.
+ */
+ for (i = 0; i < 256; i++) {
+ key[i] = (uint8_t)i;
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out;
+ out = hash_x86_32(key, i, 256-i);
+ memcpy(&hashes[i*hashbytes], &out, hashbytes);
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } default: not_reached();
+ }
+ }
+
+ /* Hash the result array. */
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out = hash_x86_32(hashes, hashes_size, 0);
+ memcpy(final, &out, sizeof(out));
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } default: not_reached();
+ }
+
+ computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
+ (final[3] << 24);
+
+ switch (variant) {
+#ifdef JEMALLOC_BIG_ENDIAN
+ case hash_variant_x86_32: expected = 0x6213303eU; break;
+ case hash_variant_x86_128: expected = 0x266820caU; break;
+ case hash_variant_x64_128: expected = 0xcc622b6fU; break;
+#else
+ case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
+ case hash_variant_x86_128: expected = 0xb3ece62aU; break;
+ case hash_variant_x64_128: expected = 0x6384ba69U; break;
+#endif
+ default: not_reached();
+ }
+
+ assert_u32_eq(computed, expected,
+ "Hash mismatch for %s(): expected %#x but got %#x",
+ hash_variant_string(variant), expected, computed);
+}
+
+static void
+hash_variant_verify(hash_variant_t variant)
+{
+#define MAX_ALIGN 16
+ uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
+ unsigned i;
+
+ for (i = 0; i < MAX_ALIGN; i++)
+ hash_variant_verify_key(variant, &key[i]);
+#undef MAX_ALIGN
+}
+#undef KEY_SIZE
+
+TEST_BEGIN(test_hash_x86_32)
+{
+
+ hash_variant_verify(hash_variant_x86_32);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x86_128)
+{
+
+ hash_variant_verify(hash_variant_x86_128);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x64_128)
+{
+
+ hash_variant_verify(hash_variant_x64_128);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_hash_x86_32,
+ test_hash_x86_128,
+ test_hash_x64_128));
+}
diff --git a/memory/jemalloc/src/test/unit/junk.c b/memory/jemalloc/src/test/unit/junk.c
new file mode 100644
index 000000000..460bd524d
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/junk.c
@@ -0,0 +1,253 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+# ifndef JEMALLOC_TEST_JUNK_OPT
+# define JEMALLOC_TEST_JUNK_OPT "junk:true"
+# endif
+const char *malloc_conf =
+ "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT;
+#endif
+
+static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
+static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
+static huge_dalloc_junk_t *huge_dalloc_junk_orig;
+static void *watch_for_junking;
+static bool saw_junking;
+
+static void
+watch_junking(void *p)
+{
+
+ watch_for_junking = p;
+ saw_junking = false;
+}
+
+static void
+arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t i;
+
+ arena_dalloc_junk_small_orig(ptr, bin_info);
+ for (i = 0; i < bin_info->reg_size; i++) {
+ assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, bin_info->reg_size);
+ }
+ if (ptr == watch_for_junking)
+ saw_junking = true;
+}
+
+static void
+arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
+{
+ size_t i;
+
+ arena_dalloc_junk_large_orig(ptr, usize);
+ for (i = 0; i < usize; i++) {
+ assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, usize);
+ }
+ if (ptr == watch_for_junking)
+ saw_junking = true;
+}
+
+static void
+huge_dalloc_junk_intercept(void *ptr, size_t usize)
+{
+
+ huge_dalloc_junk_orig(ptr, usize);
+ /*
+ * The conditions under which junk filling actually occurs are nuanced
+ * enough that it doesn't make sense to duplicate the decision logic in
+ * test code, so don't actually check that the region is junk-filled.
+ */
+ if (ptr == watch_for_junking)
+ saw_junking = true;
+}
+
+static void
+test_junk(size_t sz_min, size_t sz_max)
+{
+ uint8_t *s;
+ size_t sz_prev, sz, i;
+
+ if (opt_junk_free) {
+ arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
+ arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
+ arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
+ huge_dalloc_junk_orig = huge_dalloc_junk;
+ huge_dalloc_junk = huge_dalloc_junk_intercept;
+ }
+
+ sz_prev = 0;
+ s = (uint8_t *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_u_eq(s[0], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_u_eq(s[sz_prev-1], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ if (opt_junk_alloc) {
+ assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
+ "Newly allocated byte %zu/%zu isn't "
+ "junk-filled", i, sz);
+ }
+ s[i] = 'a';
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ watch_junking(s);
+ s = (uint8_t *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ assert_true(!opt_junk_free || saw_junking,
+ "Expected region of size %zu to be junk-filled",
+ sz);
+ }
+ }
+
+ watch_junking(s);
+ dallocx(s, 0);
+ assert_true(!opt_junk_free || saw_junking,
+ "Expected region of size %zu to be junk-filled", sz);
+
+ if (opt_junk_free) {
+ arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
+ huge_dalloc_junk = huge_dalloc_junk_orig;
+ }
+}
+
+TEST_BEGIN(test_junk_small)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(1, SMALL_MAXCLASS-1);
+}
+TEST_END
+
+TEST_BEGIN(test_junk_large)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(SMALL_MAXCLASS+1, large_maxclass);
+}
+TEST_END
+
+TEST_BEGIN(test_junk_huge)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(large_maxclass+1, chunksize*2);
+}
+TEST_END
+
+arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
+static void *most_recently_trimmed;
+
+static size_t
+shrink_size(size_t size)
+{
+ size_t shrink_size;
+
+ for (shrink_size = size - 1; nallocx(shrink_size, 0) == size;
+ shrink_size--)
+ ; /* Do nothing. */
+
+ return (shrink_size);
+}
+
+static void
+arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
+{
+
+ arena_ralloc_junk_large_orig(ptr, old_usize, usize);
+ assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
+ assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
+ most_recently_trimmed = ptr;
+}
+
+TEST_BEGIN(test_junk_large_ralloc_shrink)
+{
+ void *p1, *p2;
+
+ p1 = mallocx(large_maxclass, 0);
+ assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+
+ arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
+ arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
+
+ p2 = rallocx(p1, shrink_size(large_maxclass), 0);
+ assert_ptr_eq(p1, p2, "Unexpected move during shrink");
+
+ arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
+
+ assert_ptr_eq(most_recently_trimmed, p1,
+ "Expected trimmed portion of region to be junk-filled");
+}
+TEST_END
+
+static bool detected_redzone_corruption;
+
+static void
+arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ detected_redzone_corruption = true;
+}
+
+TEST_BEGIN(test_junk_redzone)
+{
+ char *s;
+ arena_redzone_corruption_t *arena_redzone_corruption_orig;
+
+ test_skip_if(!config_fill);
+ test_skip_if(!opt_junk_alloc || !opt_junk_free);
+
+ arena_redzone_corruption_orig = arena_redzone_corruption;
+ arena_redzone_corruption = arena_redzone_corruption_replacement;
+
+ /* Test underflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[-1] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ /* Test overflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[sallocx(s, 0)] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ arena_redzone_corruption = arena_redzone_corruption_orig;
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_junk_small,
+ test_junk_large,
+ test_junk_huge,
+ test_junk_large_ralloc_shrink,
+ test_junk_redzone));
+}
diff --git a/memory/jemalloc/src/test/unit/junk_alloc.c b/memory/jemalloc/src/test/unit/junk_alloc.c
new file mode 100644
index 000000000..a5895b5c0
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/junk_alloc.c
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/memory/jemalloc/src/test/unit/junk_free.c b/memory/jemalloc/src/test/unit/junk_free.c
new file mode 100644
index 000000000..bb5183c90
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/junk_free.c
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:free"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/memory/jemalloc/src/test/unit/lg_chunk.c b/memory/jemalloc/src/test/unit/lg_chunk.c
new file mode 100644
index 000000000..7e5df3814
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/lg_chunk.c
@@ -0,0 +1,26 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test
+ * program will fail a debug assertion during initialization and abort (rather
+ * than the test soft-failing) if clamping is insufficient.
+ */
+const char *malloc_conf = "lg_chunk:0";
+
+TEST_BEGIN(test_lg_chunk_clamp)
+{
+ void *p;
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_lg_chunk_clamp));
+}
diff --git a/memory/jemalloc/src/test/unit/mallctl.c b/memory/jemalloc/src/test/unit/mallctl.c
new file mode 100644
index 000000000..69f8c20c1
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/mallctl.c
@@ -0,0 +1,731 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_mallctl_errors)
+{
+ uint64_t epoch;
+ size_t sz;
+
+ assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
+ "mallctl() should return ENOENT for non-existent names");
+
+ assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
+ EPERM, "mallctl() should return EPERM on attempt to write "
+ "read-only value");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_errors)
+{
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
+ "mallctlnametomib() should return ENOENT for non-existent names");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymib_errors)
+{
+ uint64_t epoch;
+ size_t sz;
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
+ strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
+ "attempt to write read-only value");
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_read_write)
+{
+ uint64_t old_epoch, new_epoch;
+ size_t sz = sizeof(old_epoch);
+
+ /* Blind. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read. */
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Write. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read+write. */
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
+ sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_short_mib)
+{
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 3;
+ mib[3] = 42;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ assert_zu_eq(miblen, 3, "Unexpected mib output length");
+ assert_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_config)
+{
+
+#define TEST_MALLCTL_CONFIG(config, t) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
+ assert_b_eq(oldval, config_##config, "Incorrect config value"); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_CONFIG(cache_oblivious, bool);
+ TEST_MALLCTL_CONFIG(debug, bool);
+ TEST_MALLCTL_CONFIG(fill, bool);
+ TEST_MALLCTL_CONFIG(lazy_lock, bool);
+ TEST_MALLCTL_CONFIG(malloc_conf, const char *);
+ TEST_MALLCTL_CONFIG(munmap, bool);
+ TEST_MALLCTL_CONFIG(prof, bool);
+ TEST_MALLCTL_CONFIG(prof_libgcc, bool);
+ TEST_MALLCTL_CONFIG(prof_libunwind, bool);
+ TEST_MALLCTL_CONFIG(stats, bool);
+ TEST_MALLCTL_CONFIG(tcache, bool);
+ TEST_MALLCTL_CONFIG(tls, bool);
+ TEST_MALLCTL_CONFIG(utrace, bool);
+ TEST_MALLCTL_CONFIG(valgrind, bool);
+ TEST_MALLCTL_CONFIG(xmalloc, bool);
+
+#undef TEST_MALLCTL_CONFIG
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_opt)
+{
+ bool config_always = true;
+
+#define TEST_MALLCTL_OPT(t, opt, config) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ int expected = config_##config ? 0 : ENOENT; \
+ int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
+ assert_d_eq(result, expected, \
+ "Unexpected mallctl() result for opt."#opt); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_OPT(bool, abort, always);
+ TEST_MALLCTL_OPT(size_t, lg_chunk, always);
+ TEST_MALLCTL_OPT(const char *, dss, always);
+ TEST_MALLCTL_OPT(unsigned, narenas, always);
+ TEST_MALLCTL_OPT(const char *, purge, always);
+ TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
+ TEST_MALLCTL_OPT(ssize_t, decay_time, always);
+ TEST_MALLCTL_OPT(bool, stats_print, always);
+ TEST_MALLCTL_OPT(const char *, junk, fill);
+ TEST_MALLCTL_OPT(size_t, quarantine, fill);
+ TEST_MALLCTL_OPT(bool, redzone, fill);
+ TEST_MALLCTL_OPT(bool, zero, fill);
+ TEST_MALLCTL_OPT(bool, utrace, utrace);
+ TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
+ TEST_MALLCTL_OPT(bool, tcache, tcache);
+ TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
+ TEST_MALLCTL_OPT(bool, prof, prof);
+ TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
+ TEST_MALLCTL_OPT(bool, prof_active, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
+ TEST_MALLCTL_OPT(bool, prof_accum, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
+ TEST_MALLCTL_OPT(bool, prof_gdump, prof);
+ TEST_MALLCTL_OPT(bool, prof_final, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak, prof);
+
+#undef TEST_MALLCTL_OPT
+}
+TEST_END
+
+TEST_BEGIN(test_manpage_example)
+{
+ unsigned nbins, i;
+ size_t mib[4];
+ size_t len, miblen;
+
+ len = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
+ 0, "Unexpected mallctlbymib() failure");
+ /* Do something with bin_size... */
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_tcache_none)
+{
+ void *p0, *q, *p1;
+
+ test_skip_if(!config_tcache);
+
+ /* Allocate p and q. */
+ p0 = mallocx(42, 0);
+ assert_ptr_not_null(p0, "Unexpected mallocx() failure");
+ q = mallocx(42, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+
+ /* Deallocate p and q, but bypass the tcache for q. */
+ dallocx(p0, 0);
+ dallocx(q, MALLOCX_TCACHE_NONE);
+
+ /* Make sure that tcache-based allocation returns p, not q. */
+ p1 = mallocx(42, 0);
+ assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+ assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
+
+ /* Clean up. */
+ dallocx(p1, MALLOCX_TCACHE_NONE);
+}
+TEST_END
+
+TEST_BEGIN(test_tcache)
+{
+#define NTCACHES 10
+ unsigned tis[NTCACHES];
+ void *ps[NTCACHES];
+ void *qs[NTCACHES];
+ unsigned i;
+ size_t sz, psz, qsz;
+
+ test_skip_if(!config_tcache);
+
+ psz = 42;
+ qsz = nallocx(psz, 0) + 1;
+
+ /* Create tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Exercise tcache ID recycling. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Flush empty tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Cache some allocations. */
+ for (i = 0; i < NTCACHES; i++) {
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
+
+ qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Verify that tcaches allocate cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *p0 = ps[i];
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(ps[i], p0,
+ "Expected mallocx() to allocate cached region, i=%u", i);
+ }
+
+ /* Verify that reallocation uses cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *q0 = qs[i];
+ qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(qs[i], q0,
+ "Expected rallocx() to allocate cached region, i=%u", i);
+ /* Avoid undefined behavior in case of test failure. */
+ if (qs[i] == NULL)
+ qs[i] = ps[i];
+ }
+ for (i = 0; i < NTCACHES; i++)
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+
+ /* Flush some non-empty tcaches. */
+ for (i = 0; i < NTCACHES/2; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Destroy tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_thread_arena)
+{
+ unsigned arena_old, arena_new, narenas;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
+ arena_new = narenas - 1;
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+ arena_new = 0;
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_lg_dirty_mult)
+{
+ ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+ size_t sz = sizeof(ssize_t);
+
+ test_skip_if(opt_purge != purge_mode_ratio);
+
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ lg_dirty_mult = -2;
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ lg_dirty_mult = (sizeof(size_t) << 3);
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+ lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult
+ = lg_dirty_mult, lg_dirty_mult++) {
+ ssize_t old_lg_dirty_mult;
+
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+ "Unexpected old arena.0.lg_dirty_mult");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_decay_time)
+{
+ ssize_t decay_time, orig_decay_time, prev_decay_time;
+ size_t sz = sizeof(ssize_t);
+
+ test_skip_if(opt_purge != purge_mode_decay);
+
+ assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ decay_time = -2;
+ assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+ &decay_time, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ decay_time = 0x7fffffff;
+ assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
+ &decay_time, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+
+ for (prev_decay_time = decay_time, decay_time = -1;
+ decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+ ssize_t old_decay_time;
+
+ assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time,
+ &sz, &decay_time, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_decay_time, prev_decay_time,
+ "Unexpected old arena.0.decay_time");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_purge)
+{
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_decay)
+{
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dss)
+{
+ const char *dss_prec_old, *dss_prec_new;
+ size_t sz = sizeof(dss_prec_old);
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+
+ mib[1] = narenas_total_get();
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_initialized)
+{
+ unsigned narenas;
+ size_t sz = sizeof(narenas);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+
+ sz = narenas * sizeof(bool);
+ assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lg_dirty_mult)
+{
+ ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+ size_t sz = sizeof(ssize_t);
+
+ test_skip_if(opt_purge != purge_mode_ratio);
+
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ lg_dirty_mult = -2;
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ lg_dirty_mult = (sizeof(size_t) << 3);
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+ lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult =
+ lg_dirty_mult, lg_dirty_mult++) {
+ ssize_t old_lg_dirty_mult;
+
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+ "Unexpected old arenas.lg_dirty_mult");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_decay_time)
+{
+ ssize_t decay_time, orig_decay_time, prev_decay_time;
+ size_t sz = sizeof(ssize_t);
+
+ test_skip_if(opt_purge != purge_mode_decay);
+
+ assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ decay_time = -2;
+ assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+ &decay_time, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ decay_time = 0x7fffffff;
+ assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
+ &decay_time, sizeof(ssize_t)), 0,
+ "Expected mallctl() failure");
+
+ for (prev_decay_time = decay_time, decay_time = -1;
+ decay_time < 20; prev_decay_time = decay_time, decay_time++) {
+ ssize_t old_decay_time;
+
+ assert_d_eq(mallctl("arenas.decay_time", &old_decay_time,
+ &sz, &decay_time, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_decay_time, prev_decay_time,
+ "Unexpected old arenas.decay_time");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_constants)
+{
+
+#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \
+ "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
+ TEST_ARENAS_CONSTANT(size_t, page, PAGE);
+ TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
+ TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
+ TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
+
+#undef TEST_ARENAS_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_bin_constants)
+{
+
+#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
+ TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
+
+#undef TEST_ARENAS_BIN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lrun_constants)
+{
+
+#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS);
+
+#undef TEST_ARENAS_LRUN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_hchunk_constants)
+{
+
+#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
+
+#undef TEST_ARENAS_HCHUNK_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_extend)
+{
+ unsigned narenas_before, arena, narenas_after;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_u_eq(narenas_before+1, narenas_after,
+ "Unexpected number of arenas before versus after extension");
+ assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas)
+{
+
+#define TEST_STATS_ARENAS(t, name) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+} while (0)
+
+ TEST_STATS_ARENAS(unsigned, nthreads);
+ TEST_STATS_ARENAS(const char *, dss);
+ TEST_STATS_ARENAS(ssize_t, lg_dirty_mult);
+ TEST_STATS_ARENAS(ssize_t, decay_time);
+ TEST_STATS_ARENAS(size_t, pactive);
+ TEST_STATS_ARENAS(size_t, pdirty);
+
+#undef TEST_STATS_ARENAS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mallctl_errors,
+ test_mallctlnametomib_errors,
+ test_mallctlbymib_errors,
+ test_mallctl_read_write,
+ test_mallctlnametomib_short_mib,
+ test_mallctl_config,
+ test_mallctl_opt,
+ test_manpage_example,
+ test_tcache_none,
+ test_tcache,
+ test_thread_arena,
+ test_arena_i_lg_dirty_mult,
+ test_arena_i_decay_time,
+ test_arena_i_purge,
+ test_arena_i_decay,
+ test_arena_i_dss,
+ test_arenas_initialized,
+ test_arenas_lg_dirty_mult,
+ test_arenas_decay_time,
+ test_arenas_constants,
+ test_arenas_bin_constants,
+ test_arenas_lrun_constants,
+ test_arenas_hchunk_constants,
+ test_arenas_extend,
+ test_stats_arenas));
+}
diff --git a/memory/jemalloc/src/test/unit/math.c b/memory/jemalloc/src/test/unit/math.c
new file mode 100644
index 000000000..adb72bed9
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/math.c
@@ -0,0 +1,398 @@
+#include "test/jemalloc_test.h"
+
+#define MAX_REL_ERR 1.0e-9
+#define MAX_ABS_ERR 1.0e-9
+
+#include <float.h>
+
+#ifdef __PGI
+#undef INFINITY
+#endif
+
+#ifndef INFINITY
+#define INFINITY (DBL_MAX + DBL_MAX)
+#endif
+
+static bool
+double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
+{
+ double rel_err;
+
+ if (fabs(a - b) < max_abs_err)
+ return (true);
+ rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
+ return (rel_err < max_rel_err);
+}
+
+static uint64_t
+factorial(unsigned x)
+{
+ uint64_t ret = 1;
+ unsigned i;
+
+ for (i = 2; i <= x; i++)
+ ret *= (uint64_t)i;
+
+ return (ret);
+}
+
+TEST_BEGIN(test_ln_gamma_factorial)
+{
+ unsigned x;
+
+ /* exp(ln_gamma(x)) == (x-1)! for integer x. */
+ for (x = 1; x <= 21; x++) {
+ assert_true(double_eq_rel(exp(ln_gamma(x)),
+ (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect factorial result for x=%u", x);
+ }
+}
+TEST_END
+
+/* Expected ln_gamma([0.0..100.0] increment=0.25). */
+static const double ln_gamma_misc_expected[] = {
+ INFINITY,
+ 1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
+ 0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
+ -0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
+ 0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
+ 0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
+ 1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
+ 2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
+ 3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
+ 5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
+ 6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
+ 8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
+ 9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
+ 11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
+ 12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
+ 14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
+ 16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
+ 18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
+ 19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
+ 21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
+ 23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
+ 25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
+ 27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
+ 29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
+ 32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
+ 34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
+ 36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
+ 38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
+ 40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
+ 43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
+ 45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
+ 47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
+ 50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
+ 52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
+ 54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
+ 57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
+ 59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
+ 62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
+ 64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
+ 67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
+ 69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
+ 72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
+ 74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
+ 77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
+ 79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
+ 82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
+ 85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
+ 87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
+ 90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
+ 93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
+ 95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
+ 98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
+ 101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
+ 103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
+ 106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
+ 109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
+ 112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
+ 114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
+ 117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
+ 120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
+ 123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
+ 126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
+ 129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
+ 131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
+ 134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
+ 137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
+ 140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
+ 143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
+ 146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
+ 149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
+ 152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
+ 155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
+ 158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
+ 161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
+ 164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
+ 167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
+ 170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
+ 173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
+ 176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
+ 179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
+ 182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
+ 185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
+ 188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
+ 191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
+ 194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
+ 197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
+ 201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
+ 204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
+ 207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
+ 210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
+ 213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
+ 216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
+ 219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
+ 223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
+ 226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
+ 229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
+ 232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
+ 235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
+ 238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
+ 242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
+ 245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
+ 248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
+ 251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
+ 255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
+ 258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
+ 261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
+ 264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
+ 268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
+ 271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
+ 274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
+ 278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
+ 281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
+ 284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
+ 287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
+ 291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
+ 294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
+ 297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
+ 301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
+ 304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
+ 308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
+ 311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
+ 314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
+ 318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
+ 321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
+ 324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
+ 328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
+ 331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
+ 335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
+ 338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
+ 341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
+ 345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
+ 348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
+ 352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
+ 355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
+ 359.13420536957539753
+};
+
+TEST_BEGIN(test_ln_gamma_misc)
+{
+ unsigned i;
+
+ for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
+ double x = (double)i * 0.25;
+ assert_true(double_eq_rel(ln_gamma(x),
+ ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect ln_gamma result for i=%u", i);
+ }
+}
+TEST_END
+
+/* Expected pt_norm([0.01..0.99] increment=0.01). */
+static const double pt_norm_expected[] = {
+ -INFINITY,
+ -2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
+ -1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
+ -1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
+ -1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
+ -1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
+ -0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
+ -0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
+ -0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
+ -0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
+ -0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
+ -0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
+ -0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
+ -0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
+ -0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
+ -0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
+ -0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
+ -0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
+ 0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
+ 0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
+ 0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
+ 0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
+ 0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
+ 0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
+ 0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
+ 0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
+ 0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
+ 0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
+ 0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
+ 1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
+ 1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
+ 1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
+ 1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
+ 1.88079360815125041, 2.05374891063182208, 2.32634787404084076
+};
+
+TEST_BEGIN(test_pt_norm)
+{
+ unsigned i;
+
+ for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
+ double p = (double)i * 0.01;
+ assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
+ MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_norm result for i=%u", i);
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
+ * df={0.1, 1.1, 10.1, 100.1, 1000.1}).
+ */
+static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
+static const double pt_chi2_expected[] = {
+ 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
+ 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
+ 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
+ 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
+ 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
+
+ 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
+ 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
+ 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
+ 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
+ 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
+
+ 2.606673548632508, 4.602913725294877, 5.646152813924212,
+ 6.488971315540869, 7.249823275816285, 7.977314231410841,
+ 8.700354939944047, 9.441728024225892, 10.224338321374127,
+ 11.076435368801061, 12.039320937038386, 13.183878752697167,
+ 14.657791935084575, 16.885728216339373, 23.361991680031817,
+
+ 70.14844087392152, 80.92379498849355, 85.53325420085891,
+ 88.94433120715347, 91.83732712857017, 94.46719943606301,
+ 96.96896479994635, 99.43412843510363, 101.94074719829733,
+ 104.57228644307247, 107.43900093448734, 110.71844673417287,
+ 114.76616819871325, 120.57422505959563, 135.92318818757556,
+
+ 899.0072447849649, 937.9271278858220, 953.8117189560207,
+ 965.3079371501154, 974.8974061207954, 983.4936235182347,
+ 991.5691170518946, 999.4334123954690, 1007.3391826856553,
+ 1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
+ 1046.4872561869577, 1063.5717461999654, 1107.0741966053859
+};
+
+TEST_BEGIN(test_pt_chi2)
+{
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
+ double df = pt_chi2_df[i];
+ double ln_gamma_df = ln_gamma(df * 0.5);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
+ pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
+ * shape=[0.5..3.0] increment=0.5).
+ */
+static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
+static const double pt_gamma_expected[] = {
+ 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
+ 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
+ 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
+ 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
+ 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
+
+ 0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
+ 0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
+ 0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
+ 1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
+ 1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
+
+ 0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
+ 0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
+ 1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
+ 1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
+ 2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
+
+ 0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
+ 0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
+ 1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
+ 2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
+ 3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
+
+ 0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
+ 1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
+ 1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
+ 2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
+ 4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
+
+ 0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
+ 1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
+ 2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
+ 3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
+ 4.7230515633946677, 5.6417477865306020, 8.4059469148854635
+};
+
+TEST_BEGIN(test_pt_gamma_shape)
+{
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
+ double shape = pt_gamma_shape[i];
+ double ln_gamma_shape = ln_gamma(shape);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
+ ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Incorrect pt_gamma result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_pt_gamma_scale)
+{
+ double shape = 1.0;
+ double ln_gamma_shape = ln_gamma(shape);
+
+ assert_true(double_eq_rel(
+ pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
+ pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Scale should be trivially equivalent to external multiplication");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ln_gamma_factorial,
+ test_ln_gamma_misc,
+ test_pt_norm,
+ test_pt_chi2,
+ test_pt_gamma_shape,
+ test_pt_gamma_scale));
+}
diff --git a/memory/jemalloc/src/test/unit/mq.c b/memory/jemalloc/src/test/unit/mq.c
new file mode 100644
index 000000000..bde2a480b
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/mq.c
@@ -0,0 +1,93 @@
+#include "test/jemalloc_test.h"
+
+#define NSENDERS 3
+#define NMSGS 100000
+
+typedef struct mq_msg_s mq_msg_t;
+struct mq_msg_s {
+ mq_msg(mq_msg_t) link;
+};
+mq_gen(static, mq_, mq_t, mq_msg_t, link)
+
+TEST_BEGIN(test_mq_basic)
+{
+ mq_t mq;
+ mq_msg_t msg;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+ assert_u_eq(mq_count(&mq), 0, "mq should be empty");
+ assert_ptr_null(mq_tryget(&mq),
+ "mq_tryget() should fail when the queue is empty");
+
+ mq_put(&mq, &msg);
+ assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
+ assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
+
+ mq_put(&mq, &msg);
+ assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
+
+ mq_fini(&mq);
+}
+TEST_END
+
+static void *
+thd_receiver_start(void *arg)
+{
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < (NSENDERS * NMSGS); i++) {
+ mq_msg_t *msg = mq_get(mq);
+ assert_ptr_not_null(msg, "mq_get() should never return NULL");
+ dallocx(msg, 0);
+ }
+ return (NULL);
+}
+
+static void *
+thd_sender_start(void *arg)
+{
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < NMSGS; i++) {
+ mq_msg_t *msg;
+ void *p;
+ p = mallocx(sizeof(mq_msg_t), 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ msg = (mq_msg_t *)p;
+ mq_put(mq, msg);
+ }
+ return (NULL);
+}
+
+TEST_BEGIN(test_mq_threaded)
+{
+ mq_t mq;
+ thd_t receiver;
+ thd_t senders[NSENDERS];
+ unsigned i;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+
+ thd_create(&receiver, thd_receiver_start, (void *)&mq);
+ for (i = 0; i < NSENDERS; i++)
+ thd_create(&senders[i], thd_sender_start, (void *)&mq);
+
+ thd_join(receiver, NULL);
+ for (i = 0; i < NSENDERS; i++)
+ thd_join(senders[i], NULL);
+
+ mq_fini(&mq);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mq_basic,
+ test_mq_threaded));
+}
+
diff --git a/memory/jemalloc/src/test/unit/mtx.c b/memory/jemalloc/src/test/unit/mtx.c
new file mode 100644
index 000000000..96ff69486
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/mtx.c
@@ -0,0 +1,60 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 2
+#define NINCRS 2000000
+
+TEST_BEGIN(test_mtx_basic)
+{
+ mtx_t mtx;
+
+ assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
+ mtx_lock(&mtx);
+ mtx_unlock(&mtx);
+ mtx_fini(&mtx);
+}
+TEST_END
+
+typedef struct {
+ mtx_t mtx;
+ unsigned x;
+} thd_start_arg_t;
+
+static void *
+thd_start(void *varg)
+{
+ thd_start_arg_t *arg = (thd_start_arg_t *)varg;
+ unsigned i;
+
+ for (i = 0; i < NINCRS; i++) {
+ mtx_lock(&arg->mtx);
+ arg->x++;
+ mtx_unlock(&arg->mtx);
+ }
+ return (NULL);
+}
+
+TEST_BEGIN(test_mtx_race)
+{
+ thd_start_arg_t arg;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
+ arg.x = 0;
+ for (i = 0; i < NTHREADS; i++)
+ thd_create(&thds[i], thd_start, (void *)&arg);
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+ assert_u_eq(arg.x, NTHREADS * NINCRS,
+ "Race-related counter corruption");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mtx_basic,
+ test_mtx_race));
+}
diff --git a/memory/jemalloc/src/test/unit/nstime.c b/memory/jemalloc/src/test/unit/nstime.c
new file mode 100644
index 000000000..0368bc26e
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/nstime.c
@@ -0,0 +1,227 @@
+#include "test/jemalloc_test.h"
+
+#define BILLION UINT64_C(1000000000)
+
+TEST_BEGIN(test_nstime_init)
+{
+ nstime_t nst;
+
+ nstime_init(&nst, 42000000043);
+ assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
+ assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_init2)
+{
+ nstime_t nst;
+
+ nstime_init2(&nst, 42, 43);
+ assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_copy)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init(&nstb, 0);
+ nstime_copy(&nstb, &nsta);
+ assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
+ assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_compare)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
+
+ nstime_init2(&nstb, 42, 42);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 42, 44);
+ assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+
+ nstime_init2(&nstb, 41, BILLION - 1);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 43, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_add)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 84, 86);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+
+ nstime_init2(&nsta, 42, BILLION - 1);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 85, BILLION - 2);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_subtract)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init(&nstb, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init2(&nstb, 41, 44);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init2(&nstb, 0, BILLION - 1);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_imultiply)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_imultiply(&nsta, 10);
+ nstime_init2(&nstb, 420, 430);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_imultiply(&nsta, 3);
+ nstime_init2(&nstb, 127, 999999998);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_idivide)
+{
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_idivide(&nsta, 10);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 3);
+ nstime_idivide(&nsta, 3);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_divide)
+{
+ nstime_t nsta, nstb, nstc;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_add(&nsta, &nstc);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_subtract(&nsta, &nstc);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
+ "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_monotonic)
+{
+
+ nstime_monotonic();
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_update)
+{
+ nstime_t nst;
+
+ nstime_init(&nst, 0);
+
+ assert_false(nstime_update(&nst), "Basic time update failed.");
+
+ /* Only Rip Van Winkle sleeps this long. */
+ {
+ nstime_t addend;
+ nstime_init2(&addend, 631152000, 0);
+ nstime_add(&nst, &addend);
+ }
+ {
+ nstime_t nst0;
+ nstime_copy(&nst0, &nst);
+ assert_true(nstime_update(&nst),
+ "Update should detect time roll-back.");
+ assert_d_eq(nstime_compare(&nst, &nst0), 0,
+ "Time should not have been modified");
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_nstime_init,
+ test_nstime_init2,
+ test_nstime_copy,
+ test_nstime_compare,
+ test_nstime_add,
+ test_nstime_subtract,
+ test_nstime_imultiply,
+ test_nstime_idivide,
+ test_nstime_divide,
+ test_nstime_monotonic,
+ test_nstime_update));
+}
diff --git a/memory/jemalloc/src/test/unit/ph.c b/memory/jemalloc/src/test/unit/ph.c
new file mode 100644
index 000000000..da442f07e
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/ph.c
@@ -0,0 +1,290 @@
+#include "test/jemalloc_test.h"
+
+typedef struct node_s node_t;
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ phn(node_t) link;
+ uint64_t key;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b)
+{
+ int ret;
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the heap, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return (ret);
+}
+
+static int
+node_cmp_magic(const node_t *a, const node_t *b) {
+
+ assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ return (node_cmp(a, b));
+}
+
+typedef ph(node_t) heap_t;
+ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
+
+static void
+node_print(const node_t *node, unsigned depth)
+{
+ unsigned i;
+ node_t *leftmost_child, *sibling;
+
+ for (i = 0; i < depth; i++)
+ malloc_printf("\t");
+ malloc_printf("%2"FMTu64"\n", node->key);
+
+ leftmost_child = phn_lchild_get(node_t, link, node);
+ if (leftmost_child == NULL)
+ return;
+ node_print(leftmost_child, depth + 1);
+
+ for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
+ NULL; sibling = phn_next_get(node_t, link, sibling)) {
+ node_print(sibling, depth + 1);
+ }
+}
+
+static void
+heap_print(const heap_t *heap)
+{
+ node_t *auxelm;
+
+ malloc_printf("vvv heap %p vvv\n", heap);
+ if (heap->ph_root == NULL)
+ goto label_return;
+
+ node_print(heap->ph_root, 0);
+
+ for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
+ auxelm = phn_next_get(node_t, link, auxelm)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ node_print(auxelm, 0);
+ }
+
+label_return:
+ malloc_printf("^^^ heap %p ^^^\n", heap);
+}
+
+static unsigned
+node_validate(const node_t *node, const node_t *parent)
+{
+ unsigned nnodes = 1;
+ node_t *leftmost_child, *sibling;
+
+ if (parent != NULL) {
+ assert_d_ge(node_cmp_magic(node, parent), 0,
+ "Child is less than parent");
+ }
+
+ leftmost_child = phn_lchild_get(node_t, link, node);
+ if (leftmost_child == NULL)
+ return (nnodes);
+ assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
+ (void *)node, "Leftmost child does not link to node");
+ nnodes += node_validate(leftmost_child, node);
+
+ for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
+ NULL; sibling = phn_next_get(node_t, link, sibling)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, sibling)), sibling,
+ "sibling's prev doesn't link to sibling");
+ nnodes += node_validate(sibling, node);
+ }
+ return (nnodes);
+}
+
+static unsigned
+heap_validate(const heap_t *heap)
+{
+ unsigned nnodes = 0;
+ node_t *auxelm;
+
+ if (heap->ph_root == NULL)
+ goto label_return;
+
+ nnodes += node_validate(heap->ph_root, NULL);
+
+ for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
+ auxelm = phn_next_get(node_t, link, auxelm)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ nnodes += node_validate(auxelm, NULL);
+ }
+
+label_return:
+ if (false)
+ heap_print(heap);
+ return (nnodes);
+}
+
+TEST_BEGIN(test_ph_empty)
+{
+ heap_t heap;
+
+ heap_new(&heap);
+ assert_true(heap_empty(&heap), "Heap should be empty");
+ assert_ptr_null(heap_first(&heap), "Unexpected node");
+}
+TEST_END
+
+static void
+node_remove(heap_t *heap, node_t *node)
+{
+
+ heap_remove(heap, node);
+
+ node->magic = 0;
+}
+
+static node_t *
+node_remove_first(heap_t *heap)
+{
+ node_t *node = heap_remove_first(heap);
+ node->magic = 0;
+ return (node);
+}
+
+TEST_BEGIN(test_ph_random)
+{
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ heap_t heap;
+ node_t nodes[NNODES];
+ unsigned i, j, k;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = j;
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = NNODES - j - 1;
+ break;
+ default:
+ for (j = 0; j < NNODES; j++)
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize heap and nodes. */
+ heap_new(&heap);
+ assert_u_eq(heap_validate(&heap), 0,
+ "Incorrect node count");
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ heap_insert(&heap, &nodes[k]);
+ if (i % 13 == 12) {
+ /* Trigger merging. */
+ assert_ptr_not_null(heap_first(&heap),
+ "Heap should not be empty");
+ }
+ assert_u_eq(heap_validate(&heap), k + 1,
+ "Incorrect node count");
+ }
+
+ assert_false(heap_empty(&heap),
+ "Heap should not be empty");
+
+ /* Remove nodes. */
+ switch (i % 4) {
+ case 0:
+ for (k = 0; k < j; k++) {
+ assert_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ node_remove(&heap, &nodes[k]);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ case 1:
+ for (k = j; k > 0; k--) {
+ node_remove(&heap, &nodes[k-1]);
+ assert_u_eq(heap_validate(&heap), k - 1,
+ "Incorrect node count");
+ }
+ break;
+ case 2: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = node_remove_first(&heap);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ if (prev != NULL) {
+ assert_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ prev = node;
+ }
+ break;
+ } case 3: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = heap_first(&heap);
+ assert_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ if (prev != NULL) {
+ assert_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ node_remove(&heap, node);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ prev = node;
+ }
+ break;
+ } default:
+ not_reached();
+ }
+
+ assert_ptr_null(heap_first(&heap),
+ "Heap should be empty");
+ assert_true(heap_empty(&heap), "Heap should be empty");
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef SEED
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ph_empty,
+ test_ph_random));
+}
diff --git a/memory/jemalloc/src/test/unit/prng.c b/memory/jemalloc/src/test/unit/prng.c
new file mode 100644
index 000000000..80c9d733f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prng.c
@@ -0,0 +1,263 @@
+#include "test/jemalloc_test.h"
+
+static void
+test_prng_lg_range_u32(bool atomic)
+{
+ uint32_t sa, sb, ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ sa = 42;
+ rb = prng_lg_range_u32(&sa, 32, atomic);
+ assert_u32_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, 32, atomic);
+ assert_u32_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ rb = prng_lg_range_u32(&sa, 32, atomic);
+ assert_u32_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ for (lg_range = 31; lg_range > 0; lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, lg_range, atomic);
+ assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_u32_eq(rb, (ra >> (32 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+}
+
+static void
+test_prng_lg_range_u64(void)
+{
+ uint64_t sa, sb, ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ sa = 42;
+ rb = prng_lg_range_u64(&sa, 64);
+ assert_u64_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, 64);
+ assert_u64_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ rb = prng_lg_range_u64(&sa, 64);
+ assert_u64_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ for (lg_range = 63; lg_range > 0; lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, lg_range);
+ assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_u64_eq(rb, (ra >> (64 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+}
+
+static void
+test_prng_lg_range_zu(bool atomic)
+{
+ size_t sa, sb, ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ sa = 42;
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
+ lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, lg_range, atomic);
+ assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
+ lg_range)), "Expected high order bits of full-width "
+ "result, lg_range=%u", lg_range);
+ }
+}
+
+TEST_BEGIN(test_prng_lg_range_u32_nonatomic)
+{
+
+ test_prng_lg_range_u32(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_u32_atomic)
+{
+
+ test_prng_lg_range_u32(true);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_u64_nonatomic)
+{
+
+ test_prng_lg_range_u64();
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_zu_nonatomic)
+{
+
+ test_prng_lg_range_zu(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_zu_atomic)
+{
+
+ test_prng_lg_range_zu(true);
+}
+TEST_END
+
+static void
+test_prng_range_u32(bool atomic)
+{
+ uint32_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ uint32_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < NREPS; rep++) {
+ uint32_t r = prng_range_u32(&s, range, atomic);
+
+ assert_u32_lt(r, range, "Out of range");
+ }
+ }
+}
+
+static void
+test_prng_range_u64(void)
+{
+ uint64_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ uint64_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < NREPS; rep++) {
+ uint64_t r = prng_range_u64(&s, range);
+
+ assert_u64_lt(r, range, "Out of range");
+ }
+ }
+}
+
+static void
+test_prng_range_zu(bool atomic)
+{
+ size_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ size_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < NREPS; rep++) {
+ size_t r = prng_range_zu(&s, range, atomic);
+
+ assert_zu_lt(r, range, "Out of range");
+ }
+ }
+}
+
+TEST_BEGIN(test_prng_range_u32_nonatomic)
+{
+
+ test_prng_range_u32(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u32_atomic)
+{
+
+ test_prng_range_u32(true);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u64_nonatomic)
+{
+
+ test_prng_range_u64();
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_zu_nonatomic)
+{
+
+ test_prng_range_zu(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_zu_atomic)
+{
+
+ test_prng_range_zu(true);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_prng_lg_range_u32_nonatomic,
+ test_prng_lg_range_u32_atomic,
+ test_prng_lg_range_u64_nonatomic,
+ test_prng_lg_range_zu_nonatomic,
+ test_prng_lg_range_zu_atomic,
+ test_prng_range_u32_nonatomic,
+ test_prng_range_u32_atomic,
+ test_prng_range_u64_nonatomic,
+ test_prng_range_zu_nonatomic,
+ test_prng_range_zu_atomic));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_accum.c b/memory/jemalloc/src/test/unit/prof_accum.c
new file mode 100644
index 000000000..fd229e0fd
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_accum.c
@@ -0,0 +1,91 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD 50
+#define DUMP_INTERVAL 1
+#define BT_COUNT_CHECK_INTERVAL 5
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
+#endif
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+static void *
+alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
+{
+
+ return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration));
+}
+
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ size_t bt_count_prev, bt_count;
+ unsigned i_prev, i;
+
+ i_prev = 0;
+ bt_count_prev = 0;
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ void *p = alloc_from_permuted_backtrace(thd_ind, i);
+ dallocx(p, 0);
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
+ i+1 == NALLOCS_PER_THREAD) {
+ bt_count = prof_bt_count();
+ assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
+ "Expected larger backtrace count increase");
+ i_prev = i;
+ bt_count_prev = bt_count;
+ }
+ }
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_idump)
+{
+ bool active;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_idump));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_active.c b/memory/jemalloc/src/test/unit/prof_active.c
new file mode 100644
index 000000000..814909572
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_active.c
@@ -0,0 +1,136 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_thread_active_init:false,lg_prof_sample:0";
+#endif
+
+static void
+mallctl_bool_get(const char *name, bool expected, const char *func, int line)
+{
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
+ assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
+ name);
+}
+
+static void
+mallctl_bool_set(const char *name, bool old_expected, bool val_new,
+ const char *func, int line)
+{
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
+ "%s():%d: Unexpected mallctl failure reading/writing %s", func,
+ line, name);
+ assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
+ line, name);
+}
+
+static void
+mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
+ int line)
+{
+
+ mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
+}
+#define mallctl_prof_active_get(a) \
+ mallctl_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_prof_active_set_impl(bool prof_active_old_expected,
+ bool prof_active_new, const char *func, int line)
+{
+
+ mallctl_bool_set("prof.active", prof_active_old_expected,
+ prof_active_new, func, line);
+}
+#define mallctl_prof_active_set(a, b) \
+ mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
+ const char *func, int line)
+{
+
+ mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
+ func, line);
+}
+#define mallctl_thread_prof_active_get(a) \
+ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
+ bool thread_prof_active_new, const char *func, int line)
+{
+
+ mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
+ thread_prof_active_new, func, line);
+}
+#define mallctl_thread_prof_active_set(a, b) \
+ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
+{
+ void *p;
+ size_t expected_backtraces = expect_sample ? 1 : 0;
+
+ assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
+ line);
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), expected_backtraces,
+ "%s():%d: Unexpected backtrace count", func, line);
+ dallocx(p, 0);
+}
+#define prof_sampling_probe(a) \
+ prof_sampling_probe_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_active)
+{
+
+ test_skip_if(!config_prof);
+
+ mallctl_prof_active_get(true);
+ mallctl_thread_prof_active_get(false);
+
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(false, false);
+ /* prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(true, false);
+ mallctl_thread_prof_active_set(false, false);
+ /* !prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, false);
+ mallctl_thread_prof_active_set(false, true);
+ /* !prof.active, thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, true);
+ mallctl_thread_prof_active_set(true, true);
+ /* prof.active, thread.prof.active. */
+ prof_sampling_probe(true);
+
+ /* Restore settings. */
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(true, false);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_prof_active));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_gdump.c b/memory/jemalloc/src/test/unit/prof_gdump.c
new file mode 100644
index 000000000..a0e6ee921
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_gdump.c
@@ -0,0 +1,81 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
+#endif
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+TEST_BEGIN(test_gdump)
+{
+ bool active, gdump, gdump_old;
+ void *p, *q, *r, *s;
+ size_t sz;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ did_prof_dump_open = false;
+ q = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ gdump = false;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
+ "Unexpected mallctl failure while disabling prof.gdump");
+ assert(gdump_old);
+ did_prof_dump_open = false;
+ r = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_false(did_prof_dump_open, "Unexpected profile dump");
+
+ gdump = true;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
+ "Unexpected mallctl failure while enabling prof.gdump");
+ assert(!gdump_old);
+ did_prof_dump_open = false;
+ s = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+ dallocx(q, 0);
+ dallocx(r, 0);
+ dallocx(s, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_gdump));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_idump.c b/memory/jemalloc/src/test/unit/prof_idump.c
new file mode 100644
index 000000000..bdea53ecd
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_idump.c
@@ -0,0 +1,51 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
+ "lg_prof_interval:0";
+#endif
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+TEST_BEGIN(test_idump)
+{
+ bool active;
+ void *p;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_idump));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_reset.c b/memory/jemalloc/src/test/unit/prof_reset.c
new file mode 100644
index 000000000..5ae45fd2c
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_reset.c
@@ -0,0 +1,303 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_active:false,lg_prof_sample:0";
+#endif
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+static void
+set_prof_active(bool active)
+{
+
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure");
+}
+
+static size_t
+get_lg_prof_sample(void)
+{
+ size_t lg_prof_sample;
+ size_t sz = sizeof(size_t);
+
+ assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ return (lg_prof_sample);
+}
+
+static void
+do_prof_reset(size_t lg_prof_sample)
+{
+ assert_d_eq(mallctl("prof.reset", NULL, NULL,
+ &lg_prof_sample, sizeof(size_t)), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
+ "Expected profile sample rate change");
+}
+
+TEST_BEGIN(test_prof_reset_basic)
+{
+ size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
+ size_t sz;
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
+ NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ assert_zu_eq(lg_prof_sample_orig, 0,
+ "Unexpected profiling sample rate");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+
+ /* Test simple resets. */
+ for (i = 0; i < 2; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected profile sample rate change");
+ }
+
+ /* Test resets with prof.lg_sample changes. */
+ lg_prof_sample_next = 1;
+ for (i = 0; i < 2; i++) {
+ do_prof_reset(lg_prof_sample_next);
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
+ "Expected profile sample rate change");
+ lg_prof_sample_next = lg_prof_sample_orig;
+ }
+
+ /* Make sure the test code restored prof.lg_sample. */
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+}
+TEST_END
+
+bool prof_dump_header_intercepted = false;
+prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
+static bool
+prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
+ const prof_cnt_t *cnt_all)
+{
+
+ prof_dump_header_intercepted = true;
+ memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
+
+ return (false);
+}
+
+TEST_BEGIN(test_prof_reset_cleanup)
+{
+ void *p;
+ prof_dump_header_t *prof_dump_header_orig;
+
+ test_skip_if(!config_prof);
+
+ set_prof_active(true);
+
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header_orig = prof_dump_header;
+ prof_dump_header = prof_dump_header_intercept;
+ assert_false(prof_dump_header_intercepted, "Unexpected intercept");
+
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_true(prof_dump_header_intercepted, "Expected intercept");
+ assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
+
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header = prof_dump_header_orig;
+
+ dallocx(p, 0);
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+
+ set_prof_active(false);
+}
+TEST_END
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD (1U << 13)
+#define OBJ_RING_BUF_COUNT 1531
+#define RESET_INTERVAL (1U << 10)
+#define DUMP_INTERVAL 3677
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ unsigned i;
+ void *objs[OBJ_RING_BUF_COUNT];
+
+ memset(objs, 0, sizeof(objs));
+
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ if (i % RESET_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while resetting heap profile "
+ "data");
+ }
+
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
+ assert_ptr_not_null(*pp,
+ "Unexpected btalloc() failure");
+ }
+ }
+
+ /* Clean up any remaining objects. */
+ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ }
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_prof_reset)
+{
+ size_t lg_prof_sample_orig;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+ size_t bt_count, tdata_count;
+
+ test_skip_if(!config_prof);
+
+ bt_count = prof_bt_count();
+ assert_zu_eq(bt_count, 0,
+ "Unexpected pre-existing tdata structures");
+ tdata_count = prof_tdata_count();
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ do_prof_reset(5);
+
+ set_prof_active(true);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+
+ assert_zu_eq(prof_bt_count(), bt_count,
+ "Unexpected bactrace count change");
+ assert_zu_eq(prof_tdata_count(), tdata_count,
+ "Unexpected remaining tdata structures");
+
+ set_prof_active(false);
+
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NTHREADS
+#undef NALLOCS_PER_THREAD
+#undef OBJ_RING_BUF_COUNT
+#undef RESET_INTERVAL
+#undef DUMP_INTERVAL
+
+/* Test sampling at the same allocation site across resets. */
+#define NITER 10
+TEST_BEGIN(test_xallocx)
+{
+ size_t lg_prof_sample_orig;
+ unsigned i;
+ void *ptrs[NITER];
+
+ test_skip_if(!config_prof);
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ set_prof_active(true);
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ for (i = 0; i < NITER; i++) {
+ void *p;
+ size_t sz, nsz;
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Allocate small object (which will be promoted). */
+ p = ptrs[i] = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Perform successful xallocx(). */
+ sz = sallocx(p, 0);
+ assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+ "Unexpected xallocx() failure");
+
+ /* Perform unsuccessful xallocx(). */
+ nsz = nallocx(sz+1, 0);
+ assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+ "Unexpected xallocx() success");
+ }
+
+ for (i = 0; i < NITER; i++) {
+ /* dallocx. */
+ dallocx(ptrs[i], 0);
+ }
+
+ set_prof_active(false);
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
+int
+main(void)
+{
+
+ /* Intercept dumping prior to running any tests. */
+ prof_dump_open = prof_dump_open_intercept;
+
+ return (test(
+ test_prof_reset_basic,
+ test_prof_reset_cleanup,
+ test_prof_reset,
+ test_xallocx));
+}
diff --git a/memory/jemalloc/src/test/unit/prof_thread_name.c b/memory/jemalloc/src/test/unit/prof_thread_name.c
new file mode 100644
index 000000000..f501158d7
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/prof_thread_name.c
@@ -0,0 +1,129 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf = "prof:true,prof_active:false";
+#endif
+
+static void
+mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
+ int line)
+{
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
+ 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ assert_str_eq(thread_name_old, thread_name_expected,
+ "%s():%d: Unexpected thread.prof.name value", func, line);
+}
+#define mallctl_thread_name_get(a) \
+ mallctl_thread_name_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_name_set_impl(const char *thread_name, const char *func,
+ int line)
+{
+
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), 0,
+ "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ mallctl_thread_name_get_impl(thread_name, func, line);
+}
+#define mallctl_thread_name_set(a) \
+ mallctl_thread_name_set_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_thread_name_validation)
+{
+ const char *thread_name;
+
+ test_skip_if(!config_prof);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set("hi there");
+
+ /* NULL input shouldn't be allowed. */
+ thread_name = NULL;
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* '\n' shouldn't be allowed. */
+ thread_name = "hi\nthere";
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* Simultaneous read/write shouldn't be allowed. */
+ {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
+ &thread_name, sizeof(thread_name)), EPERM,
+ "Unexpected mallctl result writing \"%s\" to "
+ "thread.prof.name", thread_name);
+ }
+
+ mallctl_thread_name_set("");
+}
+TEST_END
+
+#define NTHREADS 4
+#define NRESET 25
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ char thread_name[16] = "";
+ unsigned i;
+
+ malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set(thread_name);
+
+ for (i = 0; i < NRESET; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ mallctl_thread_name_get(thread_name);
+ }
+
+ mallctl_thread_name_set(thread_name);
+ mallctl_thread_name_set("");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_prof_thread_name_threaded)
+{
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+#undef NTHREADS
+#undef NRESET
+
+int
+main(void)
+{
+
+ return (test(
+ test_prof_thread_name_validation,
+ test_prof_thread_name_threaded));
+}
diff --git a/memory/jemalloc/src/test/unit/ql.c b/memory/jemalloc/src/test/unit/ql.c
new file mode 100644
index 000000000..05fad450f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/ql.c
@@ -0,0 +1,209 @@
+#include "test/jemalloc_test.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+
+typedef struct list_s list_t;
+typedef ql_head(list_t) list_head_t;
+
+struct list_s {
+ ql_elm(list_t) link;
+ char id;
+};
+
+static void
+test_empty_list(list_head_t *head)
+{
+ list_t *t;
+ unsigned i;
+
+ assert_ptr_null(ql_first(head), "Unexpected element for empty list");
+ assert_ptr_null(ql_last(head, link),
+ "Unexpected element for empty list");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+}
+
+TEST_BEGIN(test_ql_empty)
+{
+ list_head_t head;
+
+ ql_new(&head);
+ test_empty_list(&head);
+}
+TEST_END
+
+static void
+init_entries(list_t *entries, unsigned nentries)
+{
+ unsigned i;
+
+ for (i = 0; i < nentries; i++) {
+ entries[i].id = 'a' + i;
+ ql_elm_new(&entries[i], link);
+ }
+}
+
+static void
+test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
+{
+ list_t *t;
+ unsigned i;
+
+ assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
+ assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
+ "Element id mismatch");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[i].id, "Element id mismatch");
+ i++;
+ }
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[nentries-i-1].id,
+ "Element id mismatch");
+ i++;
+ }
+
+ for (i = 0; i < nentries-1; i++) {
+ t = ql_next(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
+ }
+ assert_ptr_null(ql_next(head, &entries[nentries-1], link),
+ "Unexpected element");
+
+ assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
+ for (i = 1; i < nentries; i++) {
+ t = ql_prev(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_ql_tail_insert)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_tail_insert(&head, &entries[i], link);
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_tail_remove)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_tail_insert(&head, &entries[i], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, entries, NENTRIES-i);
+ ql_tail_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_insert)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_remove)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, &entries[i], NENTRIES-i);
+ ql_head_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_insert)
+{
+ list_head_t head;
+ list_t entries[8];
+ list_t *a, *b, *c, *d, *e, *f, *g, *h;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ a = &entries[0];
+ b = &entries[1];
+ c = &entries[2];
+ d = &entries[3];
+ e = &entries[4];
+ f = &entries[5];
+ g = &entries[6];
+ h = &entries[7];
+
+ /*
+ * ql_remove(), ql_before_insert(), and ql_after_insert() are used
+ * internally by other macros that are already tested, so there's no
+ * need to test them completely. However, insertion/deletion from the
+ * middle of lists is not otherwise tested; do so here.
+ */
+ ql_tail_insert(&head, f, link);
+ ql_before_insert(&head, f, b, link);
+ ql_before_insert(&head, f, c, link);
+ ql_after_insert(f, h, link);
+ ql_after_insert(f, g, link);
+ ql_before_insert(&head, b, a, link);
+ ql_after_insert(c, d, link);
+ ql_before_insert(&head, f, e, link);
+
+ test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ql_empty,
+ test_ql_tail_insert,
+ test_ql_tail_remove,
+ test_ql_head_insert,
+ test_ql_head_remove,
+ test_ql_insert));
+}
diff --git a/memory/jemalloc/src/test/unit/qr.c b/memory/jemalloc/src/test/unit/qr.c
new file mode 100644
index 000000000..a2a2d902b
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/qr.c
@@ -0,0 +1,248 @@
+#include "test/jemalloc_test.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+/* Split index, in [1..NENTRIES). */
+#define SPLIT_INDEX 5
+
+typedef struct ring_s ring_t;
+
+struct ring_s {
+ qr(ring_t) link;
+ char id;
+};
+
+static void
+init_entries(ring_t *entries)
+{
+ unsigned i;
+
+ for (i = 0; i < NENTRIES; i++) {
+ qr_new(&entries[i], link);
+ entries[i].id = 'a' + i;
+ }
+}
+
+static void
+test_independent_entries(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Next element in single-element ring should be same as "
+ "current element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Previous element in single-element ring should be same as "
+ "current element");
+ }
+}
+
+TEST_BEGIN(test_qr_one)
+{
+ ring_t entries[NENTRIES];
+
+ init_entries(entries);
+ test_independent_entries(entries);
+}
+TEST_END
+
+static void
+test_entries_ring(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_qr_after_insert)
+{
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_remove)
+{
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[i+j].id,
+ "Element id mismatch");
+ j++;
+ }
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
+ "Element id mismatch");
+ j++;
+ }
+ qr_remove(&entries[i], link);
+ }
+ test_independent_entries(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_before_insert)
+{
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_before_insert(&entries[i - 1], &entries[i], link);
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+TEST_END
+
+static void
+test_split_entries(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ if (i < SPLIT_INDEX) {
+ assert_c_eq(t->id,
+ entries[(i+j) % SPLIT_INDEX].id,
+ "Element id mismatch");
+ } else {
+ assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
+ (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
+ "Element id mismatch");
+ }
+ j++;
+ }
+ }
+}
+
+TEST_BEGIN(test_qr_meld_split)
+{
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_split(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_qr_one,
+ test_qr_after_insert,
+ test_qr_remove,
+ test_qr_before_insert,
+ test_qr_meld_split));
+}
diff --git a/memory/jemalloc/src/test/unit/quarantine.c b/memory/jemalloc/src/test/unit/quarantine.c
new file mode 100644
index 000000000..bbd48a51d
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/quarantine.c
@@ -0,0 +1,108 @@
+#include "test/jemalloc_test.h"
+
+#define QUARANTINE_SIZE 8192
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
+ STRINGIFY(QUARANTINE_SIZE);
+#endif
+
+void
+quarantine_clear(void)
+{
+ void *p;
+
+ p = mallocx(QUARANTINE_SIZE*2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+}
+
+TEST_BEGIN(test_quarantine)
+{
+#define SZ ZU(256)
+#define NQUARANTINED (QUARANTINE_SIZE/SZ)
+ void *quarantined[NQUARANTINED+1];
+ size_t i, j;
+
+ test_skip_if(!config_fill);
+
+ assert_zu_eq(nallocx(SZ, 0), SZ,
+ "SZ=%zu does not precisely equal a size class", SZ);
+
+ quarantine_clear();
+
+ /*
+ * Allocate enough regions to completely fill the quarantine, plus one
+ * more. The last iteration occurs with a completely full quarantine,
+ * but no regions should be drained from the quarantine until the last
+ * deallocation occurs. Therefore no region recycling should occur
+ * until after this loop completes.
+ */
+ for (i = 0; i < NQUARANTINED+1; i++) {
+ void *p = mallocx(SZ, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ quarantined[i] = p;
+ dallocx(p, 0);
+ for (j = 0; j < i; j++) {
+ assert_ptr_ne(p, quarantined[j],
+ "Quarantined region recycled too early; "
+ "i=%zu, j=%zu", i, j);
+ }
+ }
+#undef NQUARANTINED
+#undef SZ
+}
+TEST_END
+
+static bool detected_redzone_corruption;
+
+static void
+arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ detected_redzone_corruption = true;
+}
+
+TEST_BEGIN(test_quarantine_redzone)
+{
+ char *s;
+ arena_redzone_corruption_t *arena_redzone_corruption_orig;
+
+ test_skip_if(!config_fill);
+
+ arena_redzone_corruption_orig = arena_redzone_corruption;
+ arena_redzone_corruption = arena_redzone_corruption_replacement;
+
+ /* Test underflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[-1] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ /* Test overflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[sallocx(s, 0)] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ arena_redzone_corruption = arena_redzone_corruption_orig;
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_quarantine,
+ test_quarantine_redzone));
+}
diff --git a/memory/jemalloc/src/test/unit/rb.c b/memory/jemalloc/src/test/unit/rb.c
new file mode 100644
index 000000000..cf3d3a783
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/rb.c
@@ -0,0 +1,354 @@
+#include "test/jemalloc_test.h"
+
+#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
+ rbp_bh_t != NULL; \
+ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
+ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+typedef struct node_s node_t;
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ rb_node(node_t) link;
+ uint64_t key;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b) {
+ int ret;
+
+ assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the tree, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return (ret);
+}
+
+typedef rb_tree(node_t) tree_t;
+rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
+
+TEST_BEGIN(test_rb_empty)
+{
+ tree_t tree;
+ node_t key;
+
+ tree_new(&tree);
+
+ assert_true(tree_empty(&tree), "Tree should be empty");
+ assert_ptr_null(tree_first(&tree), "Unexpected node");
+ assert_ptr_null(tree_last(&tree), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+}
+TEST_END
+
+static unsigned
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
+{
+ unsigned ret = 0;
+ node_t *left_node;
+ node_t *right_node;
+
+ if (node == NULL)
+ return (ret);
+
+ left_node = rbtn_left_get(node_t, link, node);
+ right_node = rbtn_right_get(node_t, link, node);
+
+ if (!rbtn_red_get(node_t, link, node))
+ black_depth++;
+
+ /* Red nodes must be interleaved with black nodes. */
+ if (rbtn_red_get(node_t, link, node)) {
+ if (left_node != NULL)
+ assert_false(rbtn_red_get(node_t, link, left_node),
+ "Node should be black");
+ if (right_node != NULL)
+ assert_false(rbtn_red_get(node_t, link, right_node),
+ "Node should be black");
+ }
+
+ /* Self. */
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Left subtree. */
+ if (left_node != NULL)
+ ret += tree_recurse(left_node, black_height, black_depth);
+ else
+ ret += (black_depth != black_height);
+
+ /* Right subtree. */
+ if (right_node != NULL)
+ ret += tree_recurse(right_node, black_height, black_depth);
+ else
+ ret += (black_depth != black_height);
+
+ return (ret);
+}
+
+static node_t *
+tree_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *i = (unsigned *)data;
+ node_t *search_node;
+
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Test rb_search(). */
+ search_node = tree_search(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_search() returned unexpected node");
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_nsearch() returned unexpected node");
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_psearch() returned unexpected node");
+
+ (*i)++;
+
+ return (NULL);
+}
+
+static unsigned
+tree_iterate(tree_t *tree)
+{
+ unsigned i;
+
+ i = 0;
+ tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return (i);
+}
+
+static unsigned
+tree_iterate_reverse(tree_t *tree)
+{
+ unsigned i;
+
+ i = 0;
+ tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return (i);
+}
+
+static void
+node_remove(tree_t *tree, node_t *node, unsigned nnodes)
+{
+ node_t *search_node;
+ unsigned black_height, imbalances;
+
+ tree_remove(tree, node);
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_ge(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_le(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ node->magic = 0;
+
+ rbtn_black_height(node_t, link, tree, black_height);
+ imbalances = tree_recurse(tree->rbt_root, black_height, 0);
+ assert_u_eq(imbalances, 0, "Tree is unbalanced");
+ assert_u_eq(tree_iterate(tree), nnodes-1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
+ "Unexpected node iteration count");
+}
+
+static node_t *
+remove_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_next(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return (ret);
+}
+
+static node_t *
+remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_prev(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return (ret);
+}
+
+static void
+destroy_cb(node_t *node, void *data)
+{
+ unsigned *nnodes = (unsigned *)data;
+
+ assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
+ (*nnodes)--;
+}
+
+TEST_BEGIN(test_rb_random)
+{
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ tree_t tree;
+ node_t nodes[NNODES];
+ unsigned i, j, k, black_height, imbalances;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = j;
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = NNODES - j - 1;
+ break;
+ default:
+ for (j = 0; j < NNODES; j++)
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize tree and nodes. */
+ tree_new(&tree);
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ tree_insert(&tree, &nodes[k]);
+
+ rbtn_black_height(node_t, link, &tree,
+ black_height);
+ imbalances = tree_recurse(tree.rbt_root,
+ black_height, 0);
+ assert_u_eq(imbalances, 0,
+ "Tree is unbalanced");
+
+ assert_u_eq(tree_iterate(&tree), k+1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(&tree), k+1,
+ "Unexpected node iteration count");
+
+ assert_false(tree_empty(&tree),
+ "Tree should not be empty");
+ assert_ptr_not_null(tree_first(&tree),
+ "Tree should not be empty");
+ assert_ptr_not_null(tree_last(&tree),
+ "Tree should not be empty");
+
+ tree_next(&tree, &nodes[k]);
+ tree_prev(&tree, &nodes[k]);
+ }
+
+ /* Remove nodes. */
+ switch (i % 5) {
+ case 0:
+ for (k = 0; k < j; k++)
+ node_remove(&tree, &nodes[k], j - k);
+ break;
+ case 1:
+ for (k = j; k > 0; k--)
+ node_remove(&tree, &nodes[k-1], k);
+ break;
+ case 2: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_iter(&tree, start,
+ remove_iterate_cb, (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 3: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_reverse_iter(&tree, start,
+ remove_reverse_iterate_cb,
+ (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 4: {
+ unsigned nnodes = j;
+ tree_destroy(&tree, destroy_cb, &nnodes);
+ assert_u_eq(nnodes, 0,
+ "Destruction terminated early");
+ break;
+ } default:
+ not_reached();
+ }
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef NBAGS
+#undef SEED
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_rb_empty,
+ test_rb_random));
+}
diff --git a/memory/jemalloc/src/test/unit/rtree.c b/memory/jemalloc/src/test/unit/rtree.c
new file mode 100644
index 000000000..b54b3e86f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/rtree.c
@@ -0,0 +1,151 @@
+#include "test/jemalloc_test.h"
+
+static rtree_node_elm_t *
+node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)));
+}
+
+static void
+node_dalloc(rtree_node_elm_t *node)
+{
+
+ free(node);
+}
+
+TEST_BEGIN(test_rtree_get_empty)
+{
+ unsigned i;
+
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ rtree_t rtree;
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
+ assert_ptr_null(rtree_get(&rtree, 0, false),
+ "rtree_get() should return NULL for empty tree");
+ rtree_delete(&rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_extrema)
+{
+ unsigned i;
+ extent_node_t node_a, node_b;
+
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ rtree_t rtree;
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
+
+ assert_false(rtree_set(&rtree, 0, &node_a),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a,
+ "rtree_get() should return previously set value");
+
+ assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b,
+ "rtree_get() should return previously set value");
+
+ rtree_delete(&rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_bits)
+{
+ unsigned i, j, k;
+
+ for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
+ uintptr_t keys[] = {0, 1,
+ (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
+ extent_node_t node;
+ rtree_t rtree;
+
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
+
+ for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
+ assert_false(rtree_set(&rtree, keys[j], &node),
+ "Unexpected rtree_set() failure");
+ for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
+ assert_ptr_eq(rtree_get(&rtree, keys[k], true),
+ &node, "rtree_get() should return "
+ "previously set value and ignore "
+ "insignificant key bits; i=%u, j=%u, k=%u, "
+ "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
+ j, k, keys[j], keys[k]);
+ }
+ assert_ptr_null(rtree_get(&rtree,
+ (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false),
+ "Only leftmost rtree leaf should be set; "
+ "i=%u, j=%u", i, j);
+ assert_false(rtree_set(&rtree, keys[j], NULL),
+ "Unexpected rtree_set() failure");
+ }
+
+ rtree_delete(&rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_random)
+{
+ unsigned i;
+ sfmt_t *sfmt;
+#define NSET 16
+#define SEED 42
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ uintptr_t keys[NSET];
+ extent_node_t node;
+ unsigned j;
+ rtree_t rtree;
+
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
+
+ for (j = 0; j < NSET; j++) {
+ keys[j] = (uintptr_t)gen_rand64(sfmt);
+ assert_false(rtree_set(&rtree, keys[j], &node),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
+ "rtree_get() should return previously set value");
+ }
+ for (j = 0; j < NSET; j++) {
+ assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
+ "rtree_get() should return previously set value");
+ }
+
+ for (j = 0; j < NSET; j++) {
+ assert_false(rtree_set(&rtree, keys[j], NULL),
+ "Unexpected rtree_set() failure");
+ assert_ptr_null(rtree_get(&rtree, keys[j], true),
+ "rtree_get() should return previously set value");
+ }
+ for (j = 0; j < NSET; j++) {
+ assert_ptr_null(rtree_get(&rtree, keys[j], true),
+ "rtree_get() should return previously set value");
+ }
+
+ rtree_delete(&rtree);
+ }
+ fini_gen_rand(sfmt);
+#undef NSET
+#undef SEED
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_rtree_get_empty,
+ test_rtree_extrema,
+ test_rtree_bits,
+ test_rtree_random));
+}
diff --git a/memory/jemalloc/src/test/unit/run_quantize.c b/memory/jemalloc/src/test/unit/run_quantize.c
new file mode 100644
index 000000000..b1ca6356d
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/run_quantize.c
@@ -0,0 +1,149 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_small_run_size)
+{
+ unsigned nbins, i;
+ size_t sz, run_size;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all small size classes, get their run sizes, and verify
+ * that the quantized size is the same as the run size.
+ */
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nbins; i++) {
+ mib[2] = i;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0),
+ 0, "Unexpected mallctlbymib failure");
+ assert_zu_eq(run_size, run_quantize_floor(run_size),
+ "Small run quantization should be a no-op (run_size=%zu)",
+ run_size);
+ assert_zu_eq(run_size, run_quantize_ceil(run_size),
+ "Small run quantization should be a no-op (run_size=%zu)",
+ run_size);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_large_run_size)
+{
+ bool cache_oblivious;
+ unsigned nlruns, i;
+ size_t sz, run_size_prev, ceil_prev;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all large size classes, get their run sizes, and verify
+ * that the quantized size is the same as the run size.
+ */
+
+ sz = sizeof(bool);
+ assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nlruns; i++) {
+ size_t lrun_size, run_size, floor, ceil;
+
+ mib[2] = i;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0),
+ 0, "Unexpected mallctlbymib failure");
+ run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
+ floor = run_quantize_floor(run_size);
+ ceil = run_quantize_ceil(run_size);
+
+ assert_zu_eq(run_size, floor,
+ "Large run quantization should be a no-op for precise "
+ "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
+ assert_zu_eq(run_size, ceil,
+ "Large run quantization should be a no-op for precise "
+ "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
+
+ if (i > 0) {
+ assert_zu_eq(run_size_prev, run_quantize_floor(run_size
+ - PAGE), "Floor should be a precise size");
+ if (run_size_prev < ceil_prev) {
+ assert_zu_eq(ceil_prev, run_size,
+ "Ceiling should be a precise size "
+ "(run_size_prev=%zu, ceil_prev=%zu, "
+ "run_size=%zu)", run_size_prev, ceil_prev,
+ run_size);
+ }
+ }
+ run_size_prev = floor;
+ ceil_prev = run_quantize_ceil(run_size + PAGE);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_monotonic)
+{
+ unsigned nbins, nlruns, i;
+ size_t sz, floor_prev, ceil_prev;
+
+ /*
+ * Iterate over all run sizes and verify that
+ * run_quantize_{floor,ceil}() are monotonic.
+ */
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ floor_prev = 0;
+ ceil_prev = 0;
+ for (i = 1; i <= chunksize >> LG_PAGE; i++) {
+ size_t run_size, floor, ceil;
+
+ run_size = i << LG_PAGE;
+ floor = run_quantize_floor(run_size);
+ ceil = run_quantize_ceil(run_size);
+
+ assert_zu_le(floor, run_size,
+ "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
+ floor, run_size, ceil);
+ assert_zu_ge(ceil, run_size,
+ "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
+ floor, run_size, ceil);
+
+ assert_zu_le(floor_prev, floor, "Floor should be monotonic "
+ "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
+ floor_prev, floor, run_size, ceil);
+ assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
+ "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
+ floor, run_size, ceil_prev, ceil);
+
+ floor_prev = floor;
+ ceil_prev = ceil;
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_small_run_size,
+ test_large_run_size,
+ test_monotonic));
+}
diff --git a/memory/jemalloc/src/test/unit/size_classes.c b/memory/jemalloc/src/test/unit/size_classes.c
new file mode 100644
index 000000000..4e1e0ce4f
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/size_classes.c
@@ -0,0 +1,184 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+get_max_size_class(void)
+{
+ unsigned nhchunks;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nhchunks - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
+
+ return (max_size_class);
+}
+
+TEST_BEGIN(test_size_classes)
+{
+ size_t size_class, max_size_class;
+ szind_t index, max_index;
+
+ max_size_class = get_max_size_class();
+ max_index = size2index(max_size_class);
+
+ for (index = 0, size_class = index2size(index); index < max_index ||
+ size_class < max_size_class; index++, size_class =
+ index2size(index)) {
+ assert_true(index < max_index,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+ assert_true(size_class < max_size_class,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+
+ assert_u_eq(index, size2index(size_class),
+ "size2index() does not reverse index2size(): index=%u -->"
+ " size_class=%zu --> index=%u --> size_class=%zu", index,
+ size_class, size2index(size_class),
+ index2size(size2index(size_class)));
+ assert_zu_eq(size_class, index2size(size2index(size_class)),
+ "index2size() does not reverse size2index(): index=%u -->"
+ " size_class=%zu --> index=%u --> size_class=%zu", index,
+ size_class, size2index(size_class),
+ index2size(size2index(size_class)));
+
+ assert_u_eq(index+1, size2index(size_class+1),
+ "Next size_class does not round up properly");
+
+ assert_zu_eq(size_class, (index > 0) ?
+ s2u(index2size(index-1)+1) : s2u(1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class-1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class),
+ "s2u() does not compute same size class");
+ assert_zu_eq(s2u(size_class+1), index2size(index+1),
+ "s2u() does not round up to next size class");
+ }
+
+ assert_u_eq(index, size2index(index2size(index)),
+ "size2index() does not reverse index2size()");
+ assert_zu_eq(max_size_class, index2size(size2index(max_size_class)),
+ "index2size() does not reverse size2index()");
+
+ assert_zu_eq(size_class, s2u(index2size(index-1)+1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class-1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class),
+ "s2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_psize_classes)
+{
+ size_t size_class, max_size_class;
+ pszind_t pind, max_pind;
+
+ max_size_class = get_max_size_class();
+ max_pind = psz2ind(max_size_class);
+
+ for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
+ size_class < max_size_class; pind++, size_class =
+ pind2sz(pind)) {
+ assert_true(pind < max_pind,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+ assert_true(size_class < max_size_class,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+
+ assert_u_eq(pind, psz2ind(size_class),
+ "psz2ind() does not reverse pind2sz(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, psz2ind(size_class),
+ pind2sz(psz2ind(size_class)));
+ assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
+ "pind2sz() does not reverse psz2ind(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, psz2ind(size_class),
+ pind2sz(psz2ind(size_class)));
+
+ assert_u_eq(pind+1, psz2ind(size_class+1),
+ "Next size_class does not round up properly");
+
+ assert_zu_eq(size_class, (pind > 0) ?
+ psz2u(pind2sz(pind-1)+1) : psz2u(1),
+ "psz2u() does not round up to size class");
+ assert_zu_eq(size_class, psz2u(size_class-1),
+ "psz2u() does not round up to size class");
+ assert_zu_eq(size_class, psz2u(size_class),
+ "psz2u() does not compute same size class");
+ assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
+ "psz2u() does not round up to next size class");
+ }
+
+ assert_u_eq(pind, psz2ind(pind2sz(pind)),
+ "psz2ind() does not reverse pind2sz()");
+ assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
+ "pind2sz() does not reverse psz2ind()");
+
+ assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
+ "psz2u() does not round up to size class");
+ assert_zu_eq(size_class, psz2u(size_class-1),
+ "psz2u() does not round up to size class");
+ assert_zu_eq(size_class, psz2u(size_class),
+ "psz2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_overflow)
+{
+ size_t max_size_class;
+
+ max_size_class = get_max_size_class();
+
+ assert_u_eq(size2index(max_size_class+1), NSIZES,
+ "size2index() should return NSIZES on overflow");
+ assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
+ "size2index() should return NSIZES on overflow");
+ assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
+ "size2index() should return NSIZES on overflow");
+
+ assert_zu_eq(s2u(max_size_class+1), 0,
+ "s2u() should return 0 for unsupported size");
+ assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
+ "s2u() should return 0 for unsupported size");
+ assert_zu_eq(s2u(SIZE_T_MAX), 0,
+ "s2u() should return 0 on overflow");
+
+ assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
+ "psz2ind() should return NPSIZES on overflow");
+ assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
+ "psz2ind() should return NPSIZES on overflow");
+ assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
+ "psz2ind() should return NPSIZES on overflow");
+
+ assert_zu_eq(psz2u(max_size_class+1), 0,
+ "psz2u() should return 0 for unsupported size");
+ assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
+ "psz2u() should return 0 for unsupported size");
+ assert_zu_eq(psz2u(SIZE_T_MAX), 0,
+ "psz2u() should return 0 on overflow");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_size_classes,
+ test_psize_classes,
+ test_overflow));
+}
diff --git a/memory/jemalloc/src/test/unit/smoothstep.c b/memory/jemalloc/src/test/unit/smoothstep.c
new file mode 100644
index 000000000..4cfb21343
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/smoothstep.c
@@ -0,0 +1,106 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t smoothstep_tab[] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
+
+TEST_BEGIN(test_smoothstep_integral)
+{
+ uint64_t sum, min, max;
+ unsigned i;
+
+ /*
+ * The integral of smoothstep in the [0..1] range equals 1/2. Verify
+ * that the fixed point representation's integral is no more than
+ * rounding error distant from 1/2. Regarding rounding, each table
+ * element is rounded down to the nearest fixed point value, so the
+ * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+ sum += smoothstep_tab[i];
+
+ max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
+ min = max - SMOOTHSTEP_NSTEPS;
+
+ assert_u64_ge(sum, min,
+ "Integral too small, even accounting for truncation");
+ assert_u64_le(sum, max, "Integral exceeds 1/2");
+ if (false) {
+ malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
+ max - sum, SMOOTHSTEP_NSTEPS);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_monotonic)
+{
+ uint64_t prev_h;
+ unsigned i;
+
+ /*
+ * The smoothstep function is monotonic in [0..1], i.e. its slope is
+ * non-negative. In practice we want to parametrize table generation
+ * such that piecewise slope is greater than zero, but do not require
+ * that here.
+ */
+ prev_h = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ uint64_t h = smoothstep_tab[i];
+ assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
+ prev_h = h;
+ }
+ assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
+ (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_slope)
+{
+ uint64_t prev_h, prev_delta;
+ unsigned i;
+
+ /*
+ * The smoothstep slope strictly increases until x=0.5, and then
+ * strictly decreases until x=1.0. Verify the slightly weaker
+ * requirement of monotonicity, so that inadequate table precision does
+ * not cause false test failures.
+ */
+ prev_h = 0;
+ prev_delta = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = h - prev_h;
+ assert_u64_ge(delta, prev_delta,
+ "Slope must monotonically increase in 0.0 <= x <= 0.5, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+
+ prev_h = KQU(1) << SMOOTHSTEP_BFP;
+ prev_delta = 0;
+ for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = prev_h - h;
+ assert_u64_ge(delta, prev_delta,
+ "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_smoothstep_integral,
+ test_smoothstep_monotonic,
+ test_smoothstep_slope));
+}
diff --git a/memory/jemalloc/src/test/unit/stats.c b/memory/jemalloc/src/test/unit/stats.c
new file mode 100644
index 000000000..a9a3981fb
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/stats.c
@@ -0,0 +1,447 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_stats_summary)
+{
+ size_t *cactive;
+ size_t sz, allocated, active, resident, mapped;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(cactive);
+ assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_le(active, *cactive,
+ "active should be no larger than cactive");
+ assert_zu_le(allocated, active,
+ "allocated should be no larger than active");
+ assert_zu_lt(active, resident,
+ "active should be less than resident");
+ assert_zu_lt(active, mapped,
+ "active should be less than mapped");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_huge)
+{
+ void *p;
+ uint64_t epoch;
+ size_t allocated;
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx(large_maxclass+1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_le(nmalloc, nrequests,
+ "nmalloc should no larger than nrequests");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_summary)
+{
+ unsigned arena;
+ void *little, *large, *huge;
+ uint64_t epoch;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+ size_t mapped;
+ uint64_t npurge, nmadvise, purged;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ little = mallocx(SMALL_MAXCLASS, 0);
+ assert_ptr_not_null(little, "Unexpected mallocx() failure");
+ large = mallocx(large_maxclass, 0);
+ assert_ptr_not_null(large, "Unexpected mallocx() failure");
+ huge = mallocx(chunksize, 0);
+ assert_ptr_not_null(huge, "Unexpected mallocx() failure");
+
+ dallocx(little, 0);
+ dallocx(large, 0);
+ dallocx(huge, 0);
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(npurge, 0,
+ "At least one purge should have occurred");
+ assert_u64_le(nmadvise, purged,
+ "nmadvise should be no greater than purged");
+ }
+}
+TEST_END
+
+void *
+thd_start(void *arg)
+{
+
+ return (NULL);
+}
+
+static void
+no_lazy_lock(void)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+
+TEST_BEGIN(test_stats_arenas_small)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(SMALL_MAXCLASS, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be no greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_large)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(large_maxclass, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_huge)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_bins)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, curruns, curregs;
+ uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nruns, nreruns;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(arena_bin_info[0].reg_size, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
+ "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ assert_zu_gt(curregs, 0,
+ "allocated should be greater than zero");
+ if (config_tcache) {
+ assert_u64_gt(nfills, 0,
+ "At least one fill should have occurred");
+ assert_u64_gt(nflushes, 0,
+ "At least one flush should have occurred");
+ }
+ assert_u64_gt(nruns, 0,
+ "At least one run should have been allocated");
+ assert_zu_gt(curruns, 0,
+ "At least one run should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_lruns)
+{
+ unsigned arena;
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ size_t curruns, sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(LARGE_MINCLASS, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ assert_u64_gt(curruns, 0,
+ "At least one run should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_hchunks)
+{
+ unsigned arena;
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc;
+ size_t curhchunks, sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(curhchunks, 0,
+ "At least one chunk should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_stats_summary,
+ test_stats_huge,
+ test_stats_arenas_summary,
+ test_stats_arenas_small,
+ test_stats_arenas_large,
+ test_stats_arenas_huge,
+ test_stats_arenas_bins,
+ test_stats_arenas_lruns,
+ test_stats_arenas_hchunks));
+}
diff --git a/memory/jemalloc/src/test/unit/ticker.c b/memory/jemalloc/src/test/unit/ticker.c
new file mode 100644
index 000000000..e737020ab
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/ticker.c
@@ -0,0 +1,76 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_ticker_tick)
+{
+#define NREPS 2
+#define NTICKS 3
+ ticker_t ticker;
+ int32_t i, j;
+
+ ticker_init(&ticker, NTICKS);
+ for (i = 0; i < NREPS; i++) {
+ for (j = 0; j < NTICKS; j++) {
+ assert_u_eq(ticker_read(&ticker), NTICKS - j,
+ "Unexpected ticker value (i=%d, j=%d)", i, j);
+ assert_false(ticker_tick(&ticker),
+ "Unexpected ticker fire (i=%d, j=%d)", i, j);
+ }
+ assert_u32_eq(ticker_read(&ticker), 0,
+ "Expected ticker depletion");
+ assert_true(ticker_tick(&ticker),
+ "Expected ticker fire (i=%d)", i);
+ assert_u32_eq(ticker_read(&ticker), NTICKS,
+ "Expected ticker reset");
+ }
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_ticks)
+{
+#define NTICKS 3
+ ticker_t ticker;
+
+ ticker_init(&ticker, NTICKS);
+
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
+ assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
+ assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+
+ assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_copy)
+{
+#define NTICKS 3
+ ticker_t ta, tb;
+
+ ticker_init(&ta, NTICKS);
+ ticker_copy(&tb, &ta);
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+
+ ticker_tick(&ta);
+ ticker_copy(&tb, &ta);
+ assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
+ assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ticker_tick,
+ test_ticker_ticks,
+ test_ticker_copy));
+}
diff --git a/memory/jemalloc/src/test/unit/tsd.c b/memory/jemalloc/src/test/unit/tsd.c
new file mode 100644
index 000000000..4e2622a34
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/tsd.c
@@ -0,0 +1,112 @@
+#include "test/jemalloc_test.h"
+
+#define THREAD_DATA 0x72b65c10
+
+typedef unsigned int data_t;
+
+static bool data_cleanup_executed;
+
+malloc_tsd_types(data_, data_t)
+malloc_tsd_protos(, data_, data_t)
+
+void
+data_cleanup(void *arg)
+{
+ data_t *data = (data_t *)arg;
+
+ if (!data_cleanup_executed) {
+ assert_x_eq(*data, THREAD_DATA,
+ "Argument passed into cleanup function should match tsd "
+ "value");
+ }
+ data_cleanup_executed = true;
+
+ /*
+ * Allocate during cleanup for two rounds, in order to assure that
+ * jemalloc's internal tsd reinitialization happens.
+ */
+ switch (*data) {
+ case THREAD_DATA:
+ *data = 1;
+ data_tsd_set(data);
+ break;
+ case 1:
+ *data = 2;
+ data_tsd_set(data);
+ break;
+ case 2:
+ return;
+ default:
+ not_reached();
+ }
+
+ {
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpeced mallocx() failure");
+ dallocx(p, 0);
+ }
+}
+
+malloc_tsd_externs(data_, data_t)
+#define DATA_INIT 0x12345678
+malloc_tsd_data(, data_, data_t, DATA_INIT)
+malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup)
+
+static void *
+thd_start(void *arg)
+{
+ data_t d = (data_t)(uintptr_t)arg;
+ void *p;
+
+ assert_x_eq(*data_tsd_get(true), DATA_INIT,
+ "Initial tsd get should return initialization value");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ data_tsd_set(&d);
+ assert_x_eq(*data_tsd_get(true), d,
+ "After tsd set, tsd get should return value that was set");
+
+ d = 0;
+ assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg,
+ "Resetting local data should have no effect on tsd");
+
+ free(p);
+ return (NULL);
+}
+
+TEST_BEGIN(test_tsd_main_thread)
+{
+
+ thd_start((void *) 0xa5f3e329);
+}
+TEST_END
+
+TEST_BEGIN(test_tsd_sub_thread)
+{
+ thd_t thd;
+
+ data_cleanup_executed = false;
+ thd_create(&thd, thd_start, (void *)THREAD_DATA);
+ thd_join(thd, NULL);
+ assert_true(data_cleanup_executed,
+ "Cleanup function should have executed");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return (test_status_fail);
+ }
+ data_tsd_boot();
+
+ return (test(
+ test_tsd_main_thread,
+ test_tsd_sub_thread));
+}
diff --git a/memory/jemalloc/src/test/unit/util.c b/memory/jemalloc/src/test/unit/util.c
new file mode 100644
index 000000000..c958dc0fb
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/util.c
@@ -0,0 +1,317 @@
+#include "test/jemalloc_test.h"
+
+#define TEST_POW2_CEIL(t, suf, pri) do { \
+ unsigned i, pow2; \
+ t x; \
+ \
+ assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
+ \
+ for (i = 0; i < sizeof(t) * 8; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
+ << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 2; i < sizeof(t) * 8; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
+ ((t)1) << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
+ ((t)1) << (i+1), "Unexpected result"); \
+ } \
+ \
+ for (pow2 = 1; pow2 < 25; pow2++) { \
+ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
+ x++) { \
+ assert_##suf##_eq(pow2_ceil_##suf(x), \
+ ((t)1) << pow2, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+} while (0)
+
+TEST_BEGIN(test_pow2_ceil_u64)
+{
+
+ TEST_POW2_CEIL(uint64_t, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_u32)
+{
+
+ TEST_POW2_CEIL(uint32_t, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_zu)
+{
+
+ TEST_POW2_CEIL(size_t, zu, "zu");
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax_no_endptr)
+{
+ int err;
+
+ set_errno(0);
+ assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
+ err = get_errno();
+ assert_d_eq(err, 0, "Unexpected failure");
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax)
+{
+ struct test_s {
+ const char *input;
+ const char *expected_remainder;
+ int base;
+ int expected_errno;
+ const char *expected_errno_name;
+ uintmax_t expected_x;
+ };
+#define ERR(e) e, #e
+#define KUMAX(x) ((uintmax_t)x##ULL)
+ struct test_s tests[] = {
+ {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
+
+ {"", "", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {"+42", "", 0, ERR(0), KUMAX(42)},
+ {"-42", "", 0, ERR(0), KUMAX(-42)},
+ {"042", "", 0, ERR(0), KUMAX(042)},
+ {"+042", "", 0, ERR(0), KUMAX(042)},
+ {"-042", "", 0, ERR(0), KUMAX(-042)},
+ {"0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
+
+ {"0", "", 0, ERR(0), KUMAX(0)},
+ {"1", "", 0, ERR(0), KUMAX(1)},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {" 42", "", 0, ERR(0), KUMAX(42)},
+ {"42 ", " ", 0, ERR(0), KUMAX(42)},
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"42x", "x", 0, ERR(0), KUMAX(42)},
+
+ {"07", "", 0, ERR(0), KUMAX(7)},
+ {"010", "", 0, ERR(0), KUMAX(8)},
+ {"08", "8", 0, ERR(0), KUMAX(0)},
+ {"0_", "_", 0, ERR(0), KUMAX(0)},
+
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"0X", "X", 0, ERR(0), KUMAX(0)},
+ {"0xg", "xg", 0, ERR(0), KUMAX(0)},
+ {"0XA", "", 0, ERR(0), KUMAX(10)},
+
+ {"010", "", 10, ERR(0), KUMAX(10)},
+ {"0x3", "x3", 10, ERR(0), KUMAX(0)},
+
+ {"12", "2", 2, ERR(0), KUMAX(1)},
+ {"78", "8", 8, ERR(0), KUMAX(7)},
+ {"9a", "a", 10, ERR(0), KUMAX(9)},
+ {"9A", "A", 10, ERR(0), KUMAX(9)},
+ {"fg", "g", 16, ERR(0), KUMAX(15)},
+ {"FG", "G", 16, ERR(0), KUMAX(15)},
+ {"0xfg", "g", 16, ERR(0), KUMAX(15)},
+ {"0XFG", "G", 16, ERR(0), KUMAX(15)},
+ {"z_", "_", 36, ERR(0), KUMAX(35)},
+ {"Z_", "_", 36, ERR(0), KUMAX(35)}
+ };
+#undef ERR
+#undef KUMAX
+ unsigned i;
+
+ for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
+ struct test_s *test = &tests[i];
+ int err;
+ uintmax_t result;
+ char *remainder;
+
+ set_errno(0);
+ result = malloc_strtoumax(test->input, &remainder, test->base);
+ err = get_errno();
+ assert_d_eq(err, test->expected_errno,
+ "Expected errno %s for \"%s\", base %d",
+ test->expected_errno_name, test->input, test->base);
+ assert_str_eq(remainder, test->expected_remainder,
+ "Unexpected remainder for \"%s\", base %d",
+ test->input, test->base);
+ if (err == 0) {
+ assert_ju_eq(result, test->expected_x,
+ "Unexpected result for \"%s\", base %d",
+ test->input, test->base);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf_truncated)
+{
+#define BUFLEN 15
+ char buf[BUFLEN];
+ size_t result;
+ size_t len;
+#define TEST(expected_str_untruncated, ...) do { \
+ result = malloc_snprintf(buf, len, __VA_ARGS__); \
+ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
+ "Unexpected string inequality (\"%s\" vs \"%s\")", \
+ buf, expected_str_untruncated); \
+ assert_zu_eq(result, strlen(expected_str_untruncated), \
+ "Unexpected result"); \
+} while (0)
+
+ for (len = 1; len < BUFLEN; len++) {
+ TEST("012346789", "012346789");
+ TEST("a0123b", "a%sb", "0123");
+ TEST("a01234567", "a%s%s", "0123", "4567");
+ TEST("a0123 ", "a%-6s", "0123");
+ TEST("a 0123", "a%6s", "0123");
+ TEST("a 012", "a%6.3s", "0123");
+ TEST("a 012", "a%*.*s", 6, 3, "0123");
+ TEST("a 123b", "a% db", 123);
+ TEST("a123b", "a%-db", 123);
+ TEST("a-123b", "a%-db", -123);
+ TEST("a+123b", "a%+db", 123);
+ }
+#undef BUFLEN
+#undef TEST
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf)
+{
+#define BUFLEN 128
+ char buf[BUFLEN];
+ size_t result;
+#define TEST(expected_str, ...) do { \
+ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
+ assert_str_eq(buf, expected_str, "Unexpected output"); \
+ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
+} while (0)
+
+ TEST("hello", "hello");
+
+ TEST("50%, 100%", "50%%, %d%%", 100);
+
+ TEST("a0123b", "a%sb", "0123");
+
+ TEST("a 0123b", "a%5sb", "0123");
+ TEST("a 0123b", "a%*sb", 5, "0123");
+
+ TEST("a0123 b", "a%-5sb", "0123");
+ TEST("a0123b", "a%*sb", -1, "0123");
+ TEST("a0123 b", "a%*sb", -5, "0123");
+ TEST("a0123 b", "a%-*sb", -5, "0123");
+
+ TEST("a012b", "a%.3sb", "0123");
+ TEST("a012b", "a%.*sb", 3, "0123");
+ TEST("a0123b", "a%.*sb", -3, "0123");
+
+ TEST("a 012b", "a%5.3sb", "0123");
+ TEST("a 012b", "a%5.*sb", 3, "0123");
+ TEST("a 012b", "a%*.3sb", 5, "0123");
+ TEST("a 012b", "a%*.*sb", 5, 3, "0123");
+ TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
+
+ TEST("_abcd_", "_%x_", 0xabcd);
+ TEST("_0xabcd_", "_%#x_", 0xabcd);
+ TEST("_1234_", "_%o_", 01234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_ 1234_", "_% d_", 1234);
+ TEST("_+1234_", "_%+d_", 1234);
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_-1234_", "_% d_", -1234);
+ TEST("_-1234_", "_%+d_", -1234);
+
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_-1234_", "_%i_", -1234);
+ TEST("_1234_", "_%i_", 1234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+ TEST("_0x1234abc_", "_%#x_", 0x1234abc);
+ TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
+ TEST("_c_", "_%c_", 'c');
+ TEST("_string_", "_%s_", "string");
+ TEST("_0x42_", "_%p_", ((void *)0x42));
+
+ TEST("_-1234_", "_%ld_", ((long)-1234));
+ TEST("_1234_", "_%ld_", ((long)1234));
+ TEST("_-1234_", "_%li_", ((long)-1234));
+ TEST("_1234_", "_%li_", ((long)1234));
+ TEST("_01234_", "_%#lo_", ((long)01234));
+ TEST("_1234_", "_%lu_", ((long)1234));
+ TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
+
+ TEST("_-1234_", "_%lld_", ((long long)-1234));
+ TEST("_1234_", "_%lld_", ((long long)1234));
+ TEST("_-1234_", "_%lli_", ((long long)-1234));
+ TEST("_1234_", "_%lli_", ((long long)1234));
+ TEST("_01234_", "_%#llo_", ((long long)01234));
+ TEST("_1234_", "_%llu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%qd_", ((long long)-1234));
+ TEST("_1234_", "_%qd_", ((long long)1234));
+ TEST("_-1234_", "_%qi_", ((long long)-1234));
+ TEST("_1234_", "_%qi_", ((long long)1234));
+ TEST("_01234_", "_%#qo_", ((long long)01234));
+ TEST("_1234_", "_%qu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
+ TEST("_1234_", "_%jd_", ((intmax_t)1234));
+ TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
+ TEST("_1234_", "_%ji_", ((intmax_t)1234));
+ TEST("_01234_", "_%#jo_", ((intmax_t)01234));
+ TEST("_1234_", "_%ju_", ((intmax_t)1234));
+ TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
+
+ TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
+ TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
+
+ TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zd_", ((ssize_t)1234));
+ TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zi_", ((ssize_t)1234));
+ TEST("_01234_", "_%#zo_", ((ssize_t)01234));
+ TEST("_1234_", "_%zu_", ((ssize_t)1234));
+ TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
+#undef BUFLEN
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_pow2_ceil_u64,
+ test_pow2_ceil_u32,
+ test_pow2_ceil_zu,
+ test_malloc_strtoumax_no_endptr,
+ test_malloc_strtoumax,
+ test_malloc_snprintf_truncated,
+ test_malloc_snprintf));
+}
diff --git a/memory/jemalloc/src/test/unit/witness.c b/memory/jemalloc/src/test/unit/witness.c
new file mode 100644
index 000000000..ed172753c
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/witness.c
@@ -0,0 +1,278 @@
+#include "test/jemalloc_test.h"
+
+static witness_lock_error_t *witness_lock_error_orig;
+static witness_owner_error_t *witness_owner_error_orig;
+static witness_not_owner_error_t *witness_not_owner_error_orig;
+static witness_lockless_error_t *witness_lockless_error_orig;
+
+static bool saw_lock_error;
+static bool saw_owner_error;
+static bool saw_not_owner_error;
+static bool saw_lockless_error;
+
+static void
+witness_lock_error_intercept(const witness_list_t *witnesses,
+ const witness_t *witness)
+{
+
+ saw_lock_error = true;
+}
+
+static void
+witness_owner_error_intercept(const witness_t *witness)
+{
+
+ saw_owner_error = true;
+}
+
+static void
+witness_not_owner_error_intercept(const witness_t *witness)
+{
+
+ saw_not_owner_error = true;
+}
+
+static void
+witness_lockless_error_intercept(const witness_list_t *witnesses)
+{
+
+ saw_lockless_error = true;
+}
+
+static int
+witness_comp(const witness_t *a, const witness_t *b)
+{
+
+ assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ return (strcmp(a->name, b->name));
+}
+
+static int
+witness_comp_reverse(const witness_t *a, const witness_t *b)
+{
+
+ assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ return (-strcmp(a->name, b->name));
+}
+
+TEST_BEGIN(test_witness)
+{
+ witness_t a, b;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, NULL);
+ witness_assert_not_owner(tsdn, &a);
+ witness_lock(tsdn, &a);
+ witness_assert_owner(tsdn, &a);
+
+ witness_init(&b, "b", 2, NULL);
+ witness_assert_not_owner(tsdn, &b);
+ witness_lock(tsdn, &b);
+ witness_assert_owner(tsdn, &b);
+
+ witness_unlock(tsdn, &a);
+ witness_unlock(tsdn, &b);
+
+ witness_assert_lockless(tsdn);
+}
+TEST_END
+
+TEST_BEGIN(test_witness_comp)
+{
+ witness_t a, b, c, d;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, witness_comp);
+ witness_assert_not_owner(tsdn, &a);
+ witness_lock(tsdn, &a);
+ witness_assert_owner(tsdn, &a);
+
+ witness_init(&b, "b", 1, witness_comp);
+ witness_assert_not_owner(tsdn, &b);
+ witness_lock(tsdn, &b);
+ witness_assert_owner(tsdn, &b);
+ witness_unlock(tsdn, &b);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_init(&c, "c", 1, witness_comp_reverse);
+ witness_assert_not_owner(tsdn, &c);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(tsdn, &c);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(tsdn, &c);
+
+ saw_lock_error = false;
+
+ witness_init(&d, "d", 1, NULL);
+ witness_assert_not_owner(tsdn, &d);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(tsdn, &d);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(tsdn, &d);
+
+ witness_unlock(tsdn, &a);
+
+ witness_assert_lockless(tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_reversal)
+{
+ witness_t a, b;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, NULL);
+ witness_init(&b, "b", 2, NULL);
+
+ witness_lock(tsdn, &b);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(tsdn, &a);
+ assert_true(saw_lock_error, "Expected witness lock error");
+
+ witness_unlock(tsdn, &a);
+ witness_unlock(tsdn, &b);
+
+ witness_assert_lockless(tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_recursive)
+{
+ witness_t a;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ witness_not_owner_error_orig = witness_not_owner_error;
+ witness_not_owner_error = witness_not_owner_error_intercept;
+ saw_not_owner_error = false;
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, NULL);
+
+ witness_lock(tsdn, &a);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ assert_false(saw_not_owner_error, "Unexpected witness not owner error");
+ witness_lock(tsdn, &a);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ assert_true(saw_not_owner_error, "Expected witness not owner error");
+
+ witness_unlock(tsdn, &a);
+
+ witness_assert_lockless(tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+ witness_lock_error = witness_lock_error_orig;
+
+}
+TEST_END
+
+TEST_BEGIN(test_witness_unlock_not_owned)
+{
+ witness_t a;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ witness_owner_error_orig = witness_owner_error;
+ witness_owner_error = witness_owner_error_intercept;
+ saw_owner_error = false;
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, NULL);
+
+ assert_false(saw_owner_error, "Unexpected owner error");
+ witness_unlock(tsdn, &a);
+ assert_true(saw_owner_error, "Expected owner error");
+
+ witness_assert_lockless(tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_lockful)
+{
+ witness_t a;
+ tsdn_t *tsdn;
+
+ test_skip_if(!config_debug);
+
+ witness_lockless_error_orig = witness_lockless_error;
+ witness_lockless_error = witness_lockless_error_intercept;
+ saw_lockless_error = false;
+
+ tsdn = tsdn_fetch();
+
+ witness_assert_lockless(tsdn);
+
+ witness_init(&a, "a", 1, NULL);
+
+ assert_false(saw_lockless_error, "Unexpected lockless error");
+ witness_assert_lockless(tsdn);
+
+ witness_lock(tsdn, &a);
+ witness_assert_lockless(tsdn);
+ assert_true(saw_lockless_error, "Expected lockless error");
+
+ witness_unlock(tsdn, &a);
+
+ witness_assert_lockless(tsdn);
+
+ witness_lockless_error = witness_lockless_error_orig;
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_witness,
+ test_witness_comp,
+ test_witness_reversal,
+ test_witness_recursive,
+ test_witness_unlock_not_owned,
+ test_witness_lockful));
+}
diff --git a/memory/jemalloc/src/test/unit/zero.c b/memory/jemalloc/src/test/unit/zero.c
new file mode 100644
index 000000000..30ebe37a4
--- /dev/null
+++ b/memory/jemalloc/src/test/unit/zero.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf =
+ "abort:false,junk:false,zero:true,redzone:false,quarantine:0";
+#endif
+
+static void
+test_zero(size_t sz_min, size_t sz_max)
+{
+ uint8_t *s;
+ size_t sz_prev, sz, i;
+#define MAGIC ((uint8_t)0x61)
+
+ sz_prev = 0;
+ s = (uint8_t *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_u_eq(s[0], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_u_eq(s[sz_prev-1], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ assert_u_eq(s[i], 0x0,
+ "Newly allocated byte %zu/%zu isn't zero-filled",
+ i, sz);
+ s[i] = MAGIC;
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ s = (uint8_t *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ }
+ }
+
+ dallocx(s, 0);
+#undef MAGIC
+}
+
+TEST_BEGIN(test_zero_small)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(1, SMALL_MAXCLASS-1);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_large)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(SMALL_MAXCLASS+1, large_maxclass);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(large_maxclass+1, chunksize*2);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_zero_small,
+ test_zero_large,
+ test_zero_huge));
+}
diff --git a/memory/jemalloc/update.sh b/memory/jemalloc/update.sh
new file mode 100755
index 000000000..bd126ec8a
--- /dev/null
+++ b/memory/jemalloc/update.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -e
+
+cd `dirname $0`
+
+source upstream.info
+
+rm -rf src
+git clone "$UPSTREAM_REPO" src
+cd src
+git checkout "$UPSTREAM_COMMIT"
+autoconf
+git describe --long --abbrev=40 > VERSION
+rm -rf .git .gitignore .gitattributes autom4te.cache .autom4te.cfg
+
+cd ..
+hg addremove -q src
+
+echo "jemalloc has now been updated. Don't forget to run hg commit!"
diff --git a/memory/jemalloc/upstream.info b/memory/jemalloc/upstream.info
new file mode 100644
index 000000000..ba561cc54
--- /dev/null
+++ b/memory/jemalloc/upstream.info
@@ -0,0 +1,2 @@
+UPSTREAM_REPO=https://github.com/jemalloc/jemalloc
+UPSTREAM_COMMIT=4.3.1
diff --git a/memory/moz.build b/memory/moz.build
new file mode 100644
index 000000000..cf8bbc0f3
--- /dev/null
+++ b/memory/moz.build
@@ -0,0 +1,28 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ 'mozalloc',
+ 'fallible',
+]
+
+if not CONFIG['JS_STANDALONE']:
+ DIRS += ['volatile']
+
+if CONFIG['MOZ_MEMORY']:
+ # NB: gtest dir is included in toolkit/toolkit.build due to its dependency
+ # on libxul.
+ DIRS += [
+ 'build',
+ 'mozjemalloc',
+ ]
+
+ if CONFIG['MOZ_JEMALLOC4'] or CONFIG['MOZ_REPLACE_MALLOC']:
+ if not CONFIG['MOZ_SYSTEM_JEMALLOC']:
+ DIRS += ['jemalloc']
+
+ if CONFIG['MOZ_REPLACE_MALLOC']:
+ DIRS += ['replace']
diff --git a/memory/mozalloc/moz.build b/memory/mozalloc/moz.build
new file mode 100644
index 000000000..56d4b8d80
--- /dev/null
+++ b/memory/mozalloc/moz.build
@@ -0,0 +1,57 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+NO_VISIBILITY_FLAGS = True
+
+EXPORTS.mozilla += [
+ 'mozalloc.h',
+ 'mozalloc_abort.h',
+ 'mozalloc_oom.h',
+]
+
+if CONFIG['WRAP_STL_INCLUDES']:
+ if CONFIG['GNU_CXX']:
+ EXPORTS.mozilla += ['throw_gcc.h']
+ elif CONFIG['_MSC_VER']:
+ DEFINES['_HAS_EXCEPTIONS'] = 0
+ if CONFIG['MOZ_MSVC_STL_WRAP_RAISE']:
+ EXPORTS.mozilla += [
+ 'msvc_raise_wrappers.h',
+ 'throw_msvc.h',
+ ]
+ SOURCES += [
+ 'msvc_raise_wrappers.cpp',
+ ]
+
+if CONFIG['OS_TARGET'] == 'WINNT':
+ # Keep this file separate to avoid #include'ing windows.h everywhere.
+ SOURCES += [
+ 'winheap.cpp',
+ ]
+
+UNIFIED_SOURCES += [
+ 'mozalloc.cpp',
+ 'mozalloc_abort.cpp',
+ 'mozalloc_oom.cpp',
+]
+
+FINAL_LIBRARY = 'mozglue'
+
+# The strndup declaration in string.h is in an ifdef __USE_GNU section
+DEFINES['_GNU_SOURCE'] = True
+
+DISABLE_STL_WRAPPING = True
+
+DEFINES['IMPL_MFBT'] = True
+
+if CONFIG['_MSC_VER']:
+ DIRS += ['staticruntime']
+
+LOCAL_INCLUDES += [
+ '!/xpcom',
+ '/memory/build',
+]
+
+DIST_INSTALL = True
diff --git a/memory/mozalloc/mozalloc.cpp b/memory/mozalloc/mozalloc.cpp
new file mode 100644
index 000000000..55d36d80b
--- /dev/null
+++ b/memory/mozalloc/mozalloc.cpp
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stddef.h> // for size_t
+
+// Building with USE_STATIC_LIBS = True sets -MT instead of -MD. -MT sets _MT,
+// while -MD sets _MT and _DLL.
+#if defined(_MT) && !defined(_DLL)
+#define MOZ_STATIC_RUNTIME
+#endif
+
+#if defined(MOZ_MEMORY) && !defined(MOZ_STATIC_RUNTIME)
+// mozalloc.cpp is part of the same library as mozmemory, thus MOZ_MEMORY_IMPL
+// is needed.
+#define MOZ_MEMORY_IMPL
+#include "mozmemory_wrap.h"
+
+#if defined(XP_DARWIN)
+#include <malloc/malloc.h> // for malloc_size
+#endif
+
+// See mozmemory_wrap.h for more details. This file is part of libmozglue, so
+// it needs to use _impl suffixes. However, with libmozglue growing, this is
+// becoming cumbersome, so we will likely use a malloc.h wrapper of some sort
+// and allow the use of the functions without a _impl suffix.
+#define MALLOC_DECL(name, return_type, ...) \
+ extern "C" MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
+#include "malloc_decls.h"
+
+extern "C" MOZ_MEMORY_API char *strdup_impl(const char *);
+extern "C" MOZ_MEMORY_API char *strndup_impl(const char *, size_t);
+
+#else
+// When jemalloc is disabled, or when building the static runtime variant,
+// we need not to use the suffixes.
+
+#if defined(MALLOC_H)
+# include MALLOC_H // for memalign, valloc, malloc_size, malloc_us
+#endif // if defined(MALLOC_H)
+#include <stdlib.h> // for malloc, free
+#if defined(XP_UNIX)
+# include <unistd.h> // for valloc on *BSD
+#endif //if defined(XP_UNIX)
+
+#define malloc_impl malloc
+#define posix_memalign_impl posix_memalign
+#define calloc_impl calloc
+#define realloc_impl realloc
+#define free_impl free
+#define memalign_impl memalign
+#define valloc_impl valloc
+#define malloc_usable_size_impl malloc_usable_size
+#define strdup_impl strdup
+#define strndup_impl strndup
+
+#endif
+
+#include <errno.h>
+#include <new> // for std::bad_alloc
+#include <string.h>
+
+#include <sys/types.h>
+
+#include "mozilla/mozalloc.h"
+#include "mozilla/mozalloc_oom.h" // for mozalloc_handle_oom
+
+#ifdef __GNUC__
+#define LIKELY(x) (__builtin_expect(!!(x), 1))
+#define UNLIKELY(x) (__builtin_expect(!!(x), 0))
+#else
+#define LIKELY(x) (x)
+#define UNLIKELY(x) (x)
+#endif
+
+void*
+moz_xmalloc(size_t size)
+{
+ void* ptr = malloc_impl(size);
+ if (UNLIKELY(!ptr && size)) {
+ mozalloc_handle_oom(size);
+ return moz_xmalloc(size);
+ }
+ return ptr;
+}
+
+void*
+moz_xcalloc(size_t nmemb, size_t size)
+{
+ void* ptr = calloc_impl(nmemb, size);
+ if (UNLIKELY(!ptr && nmemb && size)) {
+ mozalloc_handle_oom(size);
+ return moz_xcalloc(nmemb, size);
+ }
+ return ptr;
+}
+
+void*
+moz_xrealloc(void* ptr, size_t size)
+{
+ void* newptr = realloc_impl(ptr, size);
+ if (UNLIKELY(!newptr && size)) {
+ mozalloc_handle_oom(size);
+ return moz_xrealloc(ptr, size);
+ }
+ return newptr;
+}
+
+char*
+moz_xstrdup(const char* str)
+{
+ char* dup = strdup_impl(str);
+ if (UNLIKELY(!dup)) {
+ mozalloc_handle_oom(0);
+ return moz_xstrdup(str);
+ }
+ return dup;
+}
+
+#if defined(HAVE_STRNDUP)
+char*
+moz_xstrndup(const char* str, size_t strsize)
+{
+ char* dup = strndup_impl(str, strsize);
+ if (UNLIKELY(!dup)) {
+ mozalloc_handle_oom(strsize);
+ return moz_xstrndup(str, strsize);
+ }
+ return dup;
+}
+#endif // if defined(HAVE_STRNDUP)
+
+#if defined(HAVE_POSIX_MEMALIGN)
+int
+moz_xposix_memalign(void **ptr, size_t alignment, size_t size)
+{
+ int err = posix_memalign_impl(ptr, alignment, size);
+ if (UNLIKELY(err && ENOMEM == err)) {
+ mozalloc_handle_oom(size);
+ return moz_xposix_memalign(ptr, alignment, size);
+ }
+ // else: (0 == err) or (EINVAL == err)
+ return err;
+}
+int
+moz_posix_memalign(void **ptr, size_t alignment, size_t size)
+{
+ int code = posix_memalign_impl(ptr, alignment, size);
+ if (code)
+ return code;
+
+#if defined(XP_DARWIN)
+ // Workaround faulty OSX posix_memalign, which provides memory with the
+ // incorrect alignment sometimes, but returns 0 as if nothing was wrong.
+ size_t mask = alignment - 1;
+ if (((size_t)(*ptr) & mask) != 0) {
+ void* old = *ptr;
+ code = moz_posix_memalign(ptr, alignment, size);
+ free(old);
+ }
+#endif
+
+ return code;
+
+}
+#endif // if defined(HAVE_POSIX_MEMALIGN)
+
+#if defined(HAVE_MEMALIGN)
+void*
+moz_xmemalign(size_t boundary, size_t size)
+{
+ void* ptr = memalign_impl(boundary, size);
+ if (UNLIKELY(!ptr && EINVAL != errno)) {
+ mozalloc_handle_oom(size);
+ return moz_xmemalign(boundary, size);
+ }
+ // non-NULL ptr or errno == EINVAL
+ return ptr;
+}
+#endif // if defined(HAVE_MEMALIGN)
+
+#if defined(HAVE_VALLOC)
+void*
+moz_xvalloc(size_t size)
+{
+ void* ptr = valloc_impl(size);
+ if (UNLIKELY(!ptr)) {
+ mozalloc_handle_oom(size);
+ return moz_xvalloc(size);
+ }
+ return ptr;
+}
+#endif // if defined(HAVE_VALLOC)
+
+#ifndef MOZ_STATIC_RUNTIME
+size_t
+moz_malloc_usable_size(void *ptr)
+{
+ if (!ptr)
+ return 0;
+
+#if defined(XP_DARWIN)
+ return malloc_size(ptr);
+#elif defined(HAVE_MALLOC_USABLE_SIZE) || defined(MOZ_MEMORY)
+ return malloc_usable_size_impl(ptr);
+#elif defined(XP_WIN)
+ return _msize(ptr);
+#else
+ return 0;
+#endif
+}
+
+size_t moz_malloc_size_of(const void *ptr)
+{
+ return moz_malloc_usable_size((void *)ptr);
+}
+#endif
diff --git a/memory/mozalloc/mozalloc.h b/memory/mozalloc/mozalloc.h
new file mode 100644
index 000000000..f7ddb7e6d
--- /dev/null
+++ b/memory/mozalloc/mozalloc.h
@@ -0,0 +1,361 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_mozalloc_h
+#define mozilla_mozalloc_h
+
+/*
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=427099
+ */
+
+#if defined(__cplusplus)
+# include <new>
+// Since libstdc++ 6, including the C headers (e.g. stdlib.h) instead of the
+// corresponding C++ header (e.g. cstdlib) can cause confusion in C++ code
+// using things defined there. Specifically, with stdlib.h, the use of abs()
+// in gfx/graphite2/src/inc/UtfCodec.h somehow ends up picking the wrong abs()
+# include <cstdlib>
+# include <cstring>
+#else
+# include <stdlib.h>
+# include <string.h>
+#endif
+
+#if defined(__cplusplus)
+#include "mozilla/fallible.h"
+#include "mozilla/mozalloc_abort.h"
+#include "mozilla/TemplateLib.h"
+#endif
+#include "mozilla/Attributes.h"
+#include "mozilla/Types.h"
+
+#define MOZALLOC_HAVE_XMALLOC
+
+#if defined(MOZ_ALWAYS_INLINE_EVEN_DEBUG)
+# define MOZALLOC_INLINE MOZ_ALWAYS_INLINE_EVEN_DEBUG
+#elif defined(HAVE_FORCEINLINE)
+# define MOZALLOC_INLINE __forceinline
+#else
+# define MOZALLOC_INLINE inline
+#endif
+
+/* Workaround build problem with Sun Studio 12 */
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+# undef MOZ_MUST_USE
+# define MOZ_MUST_USE
+# undef MOZ_ALLOCATOR
+# define MOZ_ALLOCATOR
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif /* ifdef __cplusplus */
+
+/*
+ * We need to use malloc_impl and free_impl in this file when they are
+ * defined, because of how mozglue.dll is linked on Windows, where using
+ * malloc/free would end up using the symbols from the MSVCRT instead of
+ * ours.
+ */
+#ifndef free_impl
+#define free_impl free
+#define free_impl_
+#endif
+#ifndef malloc_impl
+#define malloc_impl malloc
+#define malloc_impl_
+#endif
+
+/*
+ * Each declaration below is analogous to a "standard" allocation
+ * function, except that the out-of-memory handling is made explicit.
+ * The |moz_x| versions will never return a NULL pointer; if memory
+ * is exhausted, they abort. The |moz_| versions may return NULL
+ * pointers if memory is exhausted: their return value must be checked.
+ *
+ * All these allocation functions are *guaranteed* to return a pointer
+ * to memory allocated in such a way that that memory can be freed by
+ * passing that pointer to |free()|.
+ */
+
+MFBT_API void* moz_xmalloc(size_t size)
+ MOZ_ALLOCATOR;
+
+MFBT_API void* moz_xcalloc(size_t nmemb, size_t size)
+ MOZ_ALLOCATOR;
+
+MFBT_API void* moz_xrealloc(void* ptr, size_t size)
+ MOZ_ALLOCATOR;
+
+MFBT_API char* moz_xstrdup(const char* str)
+ MOZ_ALLOCATOR;
+
+MFBT_API size_t moz_malloc_usable_size(void *ptr);
+
+MFBT_API size_t moz_malloc_size_of(const void *ptr);
+
+#if defined(HAVE_STRNDUP)
+MFBT_API char* moz_xstrndup(const char* str, size_t strsize)
+ MOZ_ALLOCATOR;
+#endif /* if defined(HAVE_STRNDUP) */
+
+
+#if defined(HAVE_POSIX_MEMALIGN)
+MFBT_API MOZ_MUST_USE
+int moz_xposix_memalign(void **ptr, size_t alignment, size_t size);
+
+MFBT_API MOZ_MUST_USE
+int moz_posix_memalign(void **ptr, size_t alignment, size_t size);
+#endif /* if defined(HAVE_POSIX_MEMALIGN) */
+
+
+#if defined(HAVE_MEMALIGN)
+MFBT_API void* moz_xmemalign(size_t boundary, size_t size)
+ MOZ_ALLOCATOR;
+#endif /* if defined(HAVE_MEMALIGN) */
+
+
+#if defined(HAVE_VALLOC)
+MFBT_API void* moz_xvalloc(size_t size)
+ MOZ_ALLOCATOR;
+#endif /* if defined(HAVE_VALLOC) */
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* ifdef __cplusplus */
+
+
+#ifdef __cplusplus
+
+/*
+ * We implement the default operators new/delete as part of
+ * libmozalloc, replacing their definitions in libstdc++. The
+ * operator new* definitions in libmozalloc will never return a NULL
+ * pointer.
+ *
+ * Each operator new immediately below returns a pointer to memory
+ * that can be delete'd by any of
+ *
+ * (1) the matching infallible operator delete immediately below
+ * (2) the matching "fallible" operator delete further below
+ * (3) the matching system |operator delete(void*, std::nothrow)|
+ * (4) the matching system |operator delete(void*) throw(std::bad_alloc)|
+ *
+ * NB: these are declared |throw(std::bad_alloc)|, though they will never
+ * throw that exception. This declaration is consistent with the rule
+ * that |::operator new() throw(std::bad_alloc)| will never return NULL.
+ */
+
+/* NB: This is defined just to silence vacuous warnings about symbol
+ * visibility on OS X/gcc. These symbols are force-inline and not
+ * exported. */
+#if defined(XP_MACOSX)
+# define MOZALLOC_EXPORT_NEW MFBT_API
+#else
+# define MOZALLOC_EXPORT_NEW
+#endif
+
+#if defined(ANDROID)
+/*
+ * It's important to always specify 'throw()' in GCC because it's used to tell
+ * GCC that 'new' may return null. That makes GCC null-check the result before
+ * potentially initializing the memory to zero.
+ * Also, the Android minimalistic headers don't include std::bad_alloc.
+ */
+#define MOZALLOC_THROW_IF_HAS_EXCEPTIONS throw()
+#define MOZALLOC_THROW_BAD_ALLOC_IF_HAS_EXCEPTIONS
+#elif defined(_MSC_VER)
+/*
+ * Suppress build warning spam (bug 578546).
+ */
+#define MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+#define MOZALLOC_THROW_BAD_ALLOC_IF_HAS_EXCEPTIONS
+#else
+#define MOZALLOC_THROW_IF_HAS_EXCEPTIONS throw()
+#define MOZALLOC_THROW_BAD_ALLOC_IF_HAS_EXCEPTIONS throw(std::bad_alloc)
+#endif
+
+#define MOZALLOC_THROW_BAD_ALLOC MOZALLOC_THROW_BAD_ALLOC_IF_HAS_EXCEPTIONS
+
+MOZALLOC_EXPORT_NEW
+#if defined(__GNUC__) && !defined(__clang__) && defined(__SANITIZE_ADDRESS__)
+/* gcc's asan somehow doesn't like always_inline on this function. */
+__attribute__((gnu_inline)) inline
+#else
+MOZALLOC_INLINE
+#endif
+void* operator new(size_t size) MOZALLOC_THROW_BAD_ALLOC
+{
+ return moz_xmalloc(size);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void* operator new(size_t size, const std::nothrow_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return malloc_impl(size);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void* operator new[](size_t size) MOZALLOC_THROW_BAD_ALLOC
+{
+ return moz_xmalloc(size);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void* operator new[](size_t size, const std::nothrow_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return malloc_impl(size);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void operator delete(void* ptr) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return free_impl(ptr);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void operator delete(void* ptr, const std::nothrow_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return free_impl(ptr);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void operator delete[](void* ptr) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return free_impl(ptr);
+}
+
+MOZALLOC_EXPORT_NEW MOZALLOC_INLINE
+void operator delete[](void* ptr, const std::nothrow_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return free_impl(ptr);
+}
+
+
+/*
+ * We also add a new allocator variant: "fallible operator new."
+ * Unlike libmozalloc's implementations of the standard nofail
+ * allocators, this allocator is allowed to return NULL. It can be used
+ * as follows
+ *
+ * Foo* f = new (mozilla::fallible) Foo(...);
+ *
+ * operator delete(fallible) is defined for completeness only.
+ *
+ * Each operator new below returns a pointer to memory that can be
+ * delete'd by any of
+ *
+ * (1) the matching "fallible" operator delete below
+ * (2) the matching infallible operator delete above
+ * (3) the matching system |operator delete(void*, std::nothrow)|
+ * (4) the matching system |operator delete(void*) throw(std::bad_alloc)|
+ */
+
+MOZALLOC_INLINE
+void* operator new(size_t size, const mozilla::fallible_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return malloc_impl(size);
+}
+
+MOZALLOC_INLINE
+void* operator new[](size_t size, const mozilla::fallible_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ return malloc_impl(size);
+}
+
+MOZALLOC_INLINE
+void operator delete(void* ptr, const mozilla::fallible_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ free_impl(ptr);
+}
+
+MOZALLOC_INLINE
+void operator delete[](void* ptr, const mozilla::fallible_t&) MOZALLOC_THROW_IF_HAS_EXCEPTIONS
+{
+ free_impl(ptr);
+}
+
+
+/*
+ * This policy is identical to MallocAllocPolicy, except it uses
+ * moz_xmalloc/moz_xcalloc/moz_xrealloc instead of
+ * malloc/calloc/realloc.
+ */
+class InfallibleAllocPolicy
+{
+public:
+ template <typename T>
+ T* maybe_pod_malloc(size_t aNumElems)
+ {
+ return pod_malloc<T>(aNumElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_calloc(size_t aNumElems)
+ {
+ return pod_calloc<T>(aNumElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+ {
+ return pod_realloc<T>(aPtr, aOldSize, aNewSize);
+ }
+
+ template <typename T>
+ T* pod_malloc(size_t aNumElems)
+ {
+ if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ reportAllocOverflow();
+ }
+ return static_cast<T*>(moz_xmalloc(aNumElems * sizeof(T)));
+ }
+
+ template <typename T>
+ T* pod_calloc(size_t aNumElems)
+ {
+ return static_cast<T*>(moz_xcalloc(aNumElems, sizeof(T)));
+ }
+
+ template <typename T>
+ T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+ {
+ if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
+ reportAllocOverflow();
+ }
+ return static_cast<T*>(moz_xrealloc(aPtr, aNewSize * sizeof(T)));
+ }
+
+ void free_(void* aPtr)
+ {
+ free_impl(aPtr);
+ }
+
+ void reportAllocOverflow() const
+ {
+ mozalloc_abort("alloc overflow");
+ }
+
+ bool checkSimulatedOOM() const
+ {
+ return true;
+ }
+};
+
+#endif /* ifdef __cplusplus */
+
+#ifdef malloc_impl_
+#undef malloc_impl_
+#undef malloc_impl
+#endif
+#ifdef free_impl_
+#undef free_impl_
+#undef free_impl
+#endif
+
+#endif /* ifndef mozilla_mozalloc_h */
diff --git a/memory/mozalloc/mozalloc_abort.cpp b/memory/mozalloc/mozalloc_abort.cpp
new file mode 100644
index 000000000..a998d8164
--- /dev/null
+++ b/memory/mozalloc/mozalloc_abort.cpp
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/mozalloc_abort.h"
+
+#ifdef ANDROID
+# include <android/log.h>
+#endif
+#ifdef MOZ_WIDGET_ANDROID
+# include "APKOpen.h"
+# include "dlfcn.h"
+#endif
+#include <stdio.h>
+
+#include "mozilla/Assertions.h"
+
+void
+mozalloc_abort(const char* const msg)
+{
+#ifndef ANDROID
+ fputs(msg, stderr);
+ fputs("\n", stderr);
+#else
+ __android_log_print(ANDROID_LOG_ERROR, "Gecko", "mozalloc_abort: %s", msg);
+#endif
+#ifdef MOZ_WIDGET_ANDROID
+ abortThroughJava(msg);
+#endif
+ MOZ_CRASH();
+}
+
+#ifdef MOZ_WIDGET_ANDROID
+template <size_t N>
+void fillAbortMessage(char (&msg)[N], uintptr_t retAddress) {
+ /*
+ * On Android, we often don't have reliable backtrace when crashing inside
+ * abort(). Therefore, we try to find out who is calling abort() and add
+ * that to the message.
+ */
+ Dl_info info = {};
+ dladdr(reinterpret_cast<void*>(retAddress), &info);
+
+ const char* const module = info.dli_fname ? info.dli_fname : "";
+ const char* const base_module = strrchr(module, '/');
+ const void* const module_offset =
+ reinterpret_cast<void*>(retAddress - uintptr_t(info.dli_fbase));
+ const char* const sym = info.dli_sname ? info.dli_sname : "";
+
+ snprintf(msg, sizeof(msg), "abort() called from %s:%p (%s)",
+ base_module ? base_module + 1 : module, module_offset, sym);
+}
+#endif
+
+#if defined(XP_UNIX) && !defined(MOZ_ASAN)
+// Define abort() here, so that it is used instead of the system abort(). This
+// lets us control the behavior when aborting, in order to get better results
+// on *NIX platforms. See mozalloc_abort for details.
+//
+// For AddressSanitizer, we must not redefine system abort because the ASan
+// option "abort_on_error=1" calls abort() and therefore causes the following
+// call chain with our redefined abort:
+//
+// ASan -> abort() -> moz_abort() -> MOZ_CRASH() -> Segmentation fault
+//
+// That segmentation fault will be interpreted as another bug by ASan and as a
+// result, ASan will just exit(1) instead of aborting.
+void abort(void)
+{
+#ifdef MOZ_WIDGET_ANDROID
+ char msg[64] = {};
+ fillAbortMessage(msg, uintptr_t(__builtin_return_address(0)));
+#else
+ const char* const msg = "Redirecting call to abort() to mozalloc_abort\n";
+#endif
+
+ mozalloc_abort(msg);
+
+ // We won't reach here because mozalloc_abort() is MOZ_NORETURN. But that
+ // annotation isn't used on ARM (see mozalloc_abort.h for why) so we add a
+ // redundant MOZ_CRASH() here to avoid a "'noreturn' function does return"
+ // warning.
+ MOZ_CRASH();
+}
+#endif
+
diff --git a/memory/mozalloc/mozalloc_abort.h b/memory/mozalloc/mozalloc_abort.h
new file mode 100644
index 000000000..065cebcb3
--- /dev/null
+++ b/memory/mozalloc/mozalloc_abort.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_mozalloc_abort_h
+#define mozilla_mozalloc_abort_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Types.h"
+
+/**
+ * Terminate this process in such a way that breakpad is triggered, if
+ * at all possible.
+ *
+ * Note: MOZ_NORETURN seems to break crash stacks on ARM, so we don't
+ * use that annotation there.
+ */
+MFBT_API
+#if !defined(__arm__)
+ MOZ_NORETURN
+#endif
+ void mozalloc_abort(const char* const msg);
+
+
+#endif /* ifndef mozilla_mozalloc_abort_h */
diff --git a/memory/mozalloc/mozalloc_oom.cpp b/memory/mozalloc/mozalloc_oom.cpp
new file mode 100644
index 000000000..820888cdb
--- /dev/null
+++ b/memory/mozalloc/mozalloc_oom.cpp
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/mozalloc_abort.h"
+#include "mozilla/mozalloc_oom.h"
+#include "mozilla/Assertions.h"
+
+static mozalloc_oom_abort_handler gAbortHandler;
+
+#define OOM_MSG_LEADER "out of memory: 0x"
+#define OOM_MSG_DIGITS "0000000000000000" // large enough for 2^64
+#define OOM_MSG_TRAILER " bytes requested"
+#define OOM_MSG_FIRST_DIGIT_OFFSET sizeof(OOM_MSG_LEADER) - 1
+#define OOM_MSG_LAST_DIGIT_OFFSET sizeof(OOM_MSG_LEADER) + \
+ sizeof(OOM_MSG_DIGITS) - 3
+
+static const char *hex = "0123456789ABCDEF";
+
+void
+mozalloc_handle_oom(size_t size)
+{
+ char oomMsg[] = OOM_MSG_LEADER OOM_MSG_DIGITS OOM_MSG_TRAILER;
+ size_t i;
+
+ // NB: this is handle_oom() stage 1, which simply aborts on OOM.
+ // we might proceed to a stage 2 in which an attempt is made to
+ // reclaim memory
+
+ if (gAbortHandler)
+ gAbortHandler(size);
+
+ static_assert(OOM_MSG_FIRST_DIGIT_OFFSET > 0,
+ "Loop below will never terminate (i can't go below 0)");
+
+ // Insert size into the diagnostic message using only primitive operations
+ for (i = OOM_MSG_LAST_DIGIT_OFFSET;
+ size && i >= OOM_MSG_FIRST_DIGIT_OFFSET; i--) {
+ oomMsg[i] = hex[size % 16];
+ size /= 16;
+ }
+
+ mozalloc_abort(oomMsg);
+}
+
+void
+mozalloc_set_oom_abort_handler(mozalloc_oom_abort_handler handler)
+{
+ gAbortHandler = handler;
+}
diff --git a/memory/mozalloc/mozalloc_oom.h b/memory/mozalloc/mozalloc_oom.h
new file mode 100644
index 000000000..35bb9acc8
--- /dev/null
+++ b/memory/mozalloc/mozalloc_oom.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_mozalloc_oom_h
+#define mozilla_mozalloc_oom_h
+
+#include "mozalloc.h"
+
+/**
+ * Called when memory is critically low. Returns iff it was able to
+ * remedy the critical memory situation; if not, it will abort().
+ */
+MFBT_API void mozalloc_handle_oom(size_t requestedSize);
+
+/**
+ * Called by embedders (specifically Mozilla breakpad) which wants to be
+ * notified of an intentional abort, to annotate any crash report with
+ * the size of the allocation on which we aborted.
+ */
+typedef void (*mozalloc_oom_abort_handler)(size_t size);
+MFBT_API void mozalloc_set_oom_abort_handler(mozalloc_oom_abort_handler handler);
+
+/* TODO: functions to query system memory usage and register
+ * critical-memory handlers. */
+
+
+#endif /* ifndef mozilla_mozalloc_oom_h */
diff --git a/memory/mozalloc/msvc_raise_wrappers.cpp b/memory/mozalloc/msvc_raise_wrappers.cpp
new file mode 100644
index 000000000..820663f63
--- /dev/null
+++ b/memory/mozalloc/msvc_raise_wrappers.cpp
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+
+#include "mozalloc_abort.h"
+
+__declspec(noreturn) static void abort_from_exception(const char* const which,
+ const char* const what);
+static void
+abort_from_exception(const char* const which, const char* const what)
+{
+ fprintf(stderr, "fatal: STL threw %s: ", which);
+ mozalloc_abort(what);
+}
+
+namespace std {
+
+// NB: user code is not supposed to touch the std:: namespace. We're
+// doing this after careful review because we want to define our own
+// exception throwing semantics. Don't try this at home!
+
+MFBT_API __declspec(noreturn) void
+moz_Xinvalid_argument(const char* what)
+{
+ abort_from_exception("invalid_argument", what);
+}
+
+MFBT_API __declspec(noreturn) void
+moz_Xlength_error(const char* what)
+{
+ abort_from_exception("length_error", what);
+}
+
+MFBT_API __declspec(noreturn) void
+moz_Xout_of_range(const char* what)
+{
+ abort_from_exception("out_of_range", what);
+}
+
+MFBT_API __declspec(noreturn) void
+moz_Xoverflow_error(const char* what)
+{
+ abort_from_exception("overflow_error", what);
+}
+
+MFBT_API __declspec(noreturn) void
+moz_Xruntime_error(const char* what)
+{
+ abort_from_exception("runtime_error", what);
+}
+
+MFBT_API __declspec(noreturn) void
+moz_Xbad_function_call()
+{
+ abort_from_exception("bad_function_call", "bad function call");
+}
+
+} // namespace std
diff --git a/memory/mozalloc/msvc_raise_wrappers.h b/memory/mozalloc/msvc_raise_wrappers.h
new file mode 100644
index 000000000..91b77f453
--- /dev/null
+++ b/memory/mozalloc/msvc_raise_wrappers.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_msvc_raise_wrappers_h
+#define mozilla_msvc_raise_wrappers_h
+
+#ifdef _XSTDDEF_
+# error "Unable to wrap _RAISE(); CRT _RAISE() already defined"
+#endif
+#ifdef _XUTILITY_
+# error "Unable to wrap _X[exception](); CRT versions already declared"
+#endif
+#ifdef _FUNCTIONAL_
+# error "Unable to wrap _Xbad_function_call(); CRT version already declared"
+#endif
+
+#include "mozilla/mozalloc_abort.h"
+
+// xutility will declare the following functions in the std namespace.
+// We #define them to be named differently so we can ensure the exception
+// throwing semantics of these functions work exactly the way we want, by
+// defining our own versions in msvc_raise_wrappers.cpp.
+# define _Xinvalid_argument moz_Xinvalid_argument
+# define _Xlength_error moz_Xlength_error
+# define _Xout_of_range moz_Xout_of_range
+# define _Xoverflow_error moz_Xoverflow_error
+# define _Xruntime_error moz_Xruntime_error
+// used by <functional>
+# define _Xbad_function_call moz_Xbad_function_call
+
+# include <xstddef>
+# include <xutility>
+
+# undef _RAISE
+# define _RAISE(x) mozalloc_abort((x).what())
+
+#endif // ifndef mozilla_msvc_raise_wrappers_h
diff --git a/memory/mozalloc/staticruntime/moz.build b/memory/mozalloc/staticruntime/moz.build
new file mode 100644
index 000000000..55c71907d
--- /dev/null
+++ b/memory/mozalloc/staticruntime/moz.build
@@ -0,0 +1,35 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+NO_VISIBILITY_FLAGS = True
+
+if CONFIG['WRAP_STL_INCLUDES']:
+ DEFINES['_HAS_EXCEPTIONS'] = 0
+ if CONFIG['MOZ_MSVC_STL_WRAP_RAISE']:
+ SOURCES += [
+ '../msvc_raise_wrappers.cpp',
+ ]
+
+UNIFIED_SOURCES += [
+ '../mozalloc.cpp',
+ '../mozalloc_abort.cpp',
+ '../mozalloc_oom.cpp',
+]
+
+# Keep this file separate to avoid #include'ing windows.h everywhere.
+SOURCES += [
+ '../winheap.cpp',
+]
+
+LOCAL_INCLUDES += ['!/xpcom']
+
+DISABLE_STL_WRAPPING = True
+
+DEFINES['IMPL_MFBT'] = True
+
+USE_STATIC_LIBS = True
+
+Library('mozalloc_staticruntime')
diff --git a/memory/mozalloc/throw_gcc.h b/memory/mozalloc/throw_gcc.h
new file mode 100644
index 000000000..4264df63d
--- /dev/null
+++ b/memory/mozalloc/throw_gcc.h
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_throw_gcc_h
+#define mozilla_throw_gcc_h
+
+#include "mozilla/Attributes.h"
+
+#include <stdio.h> // snprintf
+#include <string.h> // strerror
+
+// For gcc, we define these inline to abort so that we're absolutely
+// certain that (i) no exceptions are thrown from Gecko; (ii) these
+// errors are always terminal and caught by breakpad.
+
+#include "mozilla/mozalloc_abort.h"
+
+// libc++ 4.0.0 and higher use C++11 [[noreturn]] attributes for the functions
+// below, and since clang does not allow mixing __attribute__((noreturn)) and
+// [[noreturn]], we have to explicitly use the latter here. See bug 1329520.
+#if defined(__clang__)
+# if __has_feature(cxx_attributes) && \
+ defined(_LIBCPP_VERSION) && _LIBCPP_VERSION >= 4000
+# define MOZ_THROW_NORETURN [[noreturn]]
+# endif
+#endif
+#ifndef MOZ_THROW_NORETURN
+# define MOZ_THROW_NORETURN MOZ_NORETURN
+#endif
+
+namespace std {
+
+// NB: user code is not supposed to touch the std:: namespace. We're
+// doing this after careful review because we want to define our own
+// exception throwing semantics. Don't try this at home!
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_bad_exception(void)
+{
+ mozalloc_abort("fatal: STL threw bad_exception");
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_bad_alloc(void)
+{
+ mozalloc_abort("fatal: STL threw bad_alloc");
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_bad_cast(void)
+{
+ mozalloc_abort("fatal: STL threw bad_cast");
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_bad_typeid(void)
+{
+ mozalloc_abort("fatal: STL threw bad_typeid");
+}
+
+// used by <functional>
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_bad_function_call(void)
+{
+ mozalloc_abort("fatal: STL threw bad_function_call");
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_logic_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_domain_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_invalid_argument(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_length_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_out_of_range(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_runtime_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_range_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_overflow_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_underflow_error(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_ios_failure(const char* msg)
+{
+ mozalloc_abort(msg);
+}
+
+MOZ_THROW_NORETURN MOZ_EXPORT MOZ_ALWAYS_INLINE void
+__throw_system_error(int err)
+{
+ char error[128];
+ snprintf(error, sizeof(error)-1,
+ "fatal: STL threw system_error: %s (%d)", strerror(err), err);
+ mozalloc_abort(error);
+}
+
+} // namespace std
+
+#undef MOZ_THROW_NORETURN
+
+#endif // mozilla_throw_gcc_h
diff --git a/memory/mozalloc/throw_msvc.h b/memory/mozalloc/throw_msvc.h
new file mode 100644
index 000000000..e6ebf46dc
--- /dev/null
+++ b/memory/mozalloc/throw_msvc.h
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_throw_msvc_h
+#define mozilla_throw_msvc_h
+
+#if defined(MOZ_MSVC_STL_WRAP_RAISE)
+# include "msvc_raise_wrappers.h"
+#else
+# error "Unknown STL wrapper tactic"
+#endif
+
+#endif // mozilla_throw_msvc_h
diff --git a/memory/mozalloc/winheap.cpp b/memory/mozalloc/winheap.cpp
new file mode 100644
index 000000000..79ff35fff
--- /dev/null
+++ b/memory/mozalloc/winheap.cpp
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: sw=4 ts=4 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Types.h"
+#include <windows.h>
+
+// Building with USE_STATIC_LIBS = True sets -MT instead of -MD. -MT sets _MT,
+// while -MD sets _MT and _DLL.
+#if defined(_MT) && !defined(_DLL)
+#define MOZ_STATIC_RUNTIME
+#endif
+
+#if defined(MOZ_MEMORY) && !defined(MOZ_STATIC_RUNTIME)
+// mozalloc.cpp is part of the same library as mozmemory, thus MOZ_MEMORY_IMPL
+// is needed.
+#define MOZ_MEMORY_IMPL
+#include "mozmemory_wrap.h"
+
+// See mozmemory_wrap.h for more details. This file is part of libmozglue, so
+// it needs to use _impl suffixes. However, with libmozglue growing, this is
+// becoming cumbersome, so we will likely use a malloc.h wrapper of some sort
+// and allow the use of the functions without a _impl suffix.
+#define MALLOC_DECL(name, return_type, ...) \
+ extern "C" MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
+#include "malloc_decls.h"
+
+// Warning: C4273: 'HeapAlloc': inconsistent dll linkage
+// The Windows headers define HeapAlloc as dllimport, but we define it as
+// dllexport, which is a voluntary inconsistency.
+#pragma warning(disable: 4273)
+
+MFBT_API
+LPVOID WINAPI HeapAlloc(_In_ HANDLE hHeap, _In_ DWORD dwFlags,
+ _In_ SIZE_T dwBytes)
+{
+ if (dwFlags & HEAP_ZERO_MEMORY) {
+ return calloc_impl(1, dwBytes);
+ }
+ return malloc_impl(dwBytes);
+}
+
+MFBT_API
+LPVOID WINAPI HeapReAlloc(_In_ HANDLE hHeap, _In_ DWORD dwFlags,
+ _In_ LPVOID lpMem, _In_ SIZE_T dwBytes)
+{
+ // The HeapReAlloc contract is that failures preserve the existing
+ // allocation. We can't try to realloc in-place without possibly
+ // freeing the original allocation, breaking the contract.
+ // We also can't guarantee we zero all the memory from the end of
+ // the original allocation to the end of the new one because of the
+ // difference between the originally requested size and what
+ // malloc_usable_size would return us.
+ // So for both cases, just tell the caller we can't do what they
+ // requested.
+ if (dwFlags & (HEAP_REALLOC_IN_PLACE_ONLY | HEAP_ZERO_MEMORY)) {
+ return NULL;
+ }
+ return realloc_impl(lpMem, dwBytes);
+}
+
+MFBT_API
+BOOL WINAPI HeapFree(_In_ HANDLE hHeap, _In_ DWORD dwFlags,
+ _In_ LPVOID lpMem)
+{
+ free_impl(lpMem);
+ return true;
+}
+
+#endif
diff --git a/memory/mozjemalloc/Makefile.in b/memory/mozjemalloc/Makefile.in
new file mode 100644
index 000000000..6c47e1fa2
--- /dev/null
+++ b/memory/mozjemalloc/Makefile.in
@@ -0,0 +1,13 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+ifndef MOZ_JEMALLOC4
+# Force optimize mozjemalloc on --disable-optimize builds.
+# This works around the issue that the Android NDK's definition of ffs is
+# broken when compiling without optimization, while avoiding to add yet another
+# configure test.
+MOZ_OPTIMIZE = 1
+endif
+
diff --git a/memory/mozjemalloc/jemalloc.c b/memory/mozjemalloc/jemalloc.c
new file mode 100644
index 000000000..5d4d83a24
--- /dev/null
+++ b/memory/mozjemalloc/jemalloc.c
@@ -0,0 +1,7188 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8; indent-tabs-mode: t -*- */
+/* vim:set softtabstop=8 shiftwidth=8 noet: */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *******************************************************************************
+ *
+ * This allocator implementation is designed to provide scalable performance
+ * for multi-threaded programs on multi-processor systems. The following
+ * features are included for this purpose:
+ *
+ * + Multiple arenas are used if there are multiple CPUs, which reduces lock
+ * contention and cache sloshing.
+ *
+ * + Cache line sharing between arenas is avoided for internal data
+ * structures.
+ *
+ * + Memory is managed in chunks and runs (chunks can be split into runs),
+ * rather than as individual pages. This provides a constant-time
+ * mechanism for associating allocations with particular arenas.
+ *
+ * Allocation requests are rounded up to the nearest size class, and no record
+ * of the original request size is maintained. Allocations are broken into
+ * categories according to size class. Assuming runtime defaults, 4 kB pages
+ * and a 16 byte quantum on a 32-bit system, the size classes in each category
+ * are as follows:
+ *
+ * |=====================================|
+ * | Category | Subcategory | Size |
+ * |=====================================|
+ * | Small | Tiny | 2 |
+ * | | | 4 |
+ * | | | 8 |
+ * | |----------------+---------|
+ * | | Quantum-spaced | 16 |
+ * | | | 32 |
+ * | | | 48 |
+ * | | | ... |
+ * | | | 480 |
+ * | | | 496 |
+ * | | | 512 |
+ * | |----------------+---------|
+ * | | Sub-page | 1 kB |
+ * | | | 2 kB |
+ * |=====================================|
+ * | Large | 4 kB |
+ * | | 8 kB |
+ * | | 12 kB |
+ * | | ... |
+ * | | 1012 kB |
+ * | | 1016 kB |
+ * | | 1020 kB |
+ * |=====================================|
+ * | Huge | 1 MB |
+ * | | 2 MB |
+ * | | 3 MB |
+ * | | ... |
+ * |=====================================|
+ *
+ * NOTE: Due to Mozilla bug 691003, we cannot reserve less than one word for an
+ * allocation on Linux or Mac. So on 32-bit *nix, the smallest bucket size is
+ * 4 bytes, and on 64-bit, the smallest bucket size is 8 bytes.
+ *
+ * A different mechanism is used for each category:
+ *
+ * Small : Each size class is segregated into its own set of runs. Each run
+ * maintains a bitmap of which regions are free/allocated.
+ *
+ * Large : Each allocation is backed by a dedicated run. Metadata are stored
+ * in the associated arena chunk header maps.
+ *
+ * Huge : Each allocation is backed by a dedicated contiguous set of chunks.
+ * Metadata are stored in a separate red-black tree.
+ *
+ *******************************************************************************
+ */
+
+#ifdef MOZ_MEMORY_ANDROID
+#define NO_TLS
+#define _pthread_self() pthread_self()
+#endif
+
+/*
+ * On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
+ * operating system. If we release 1MB of live pages with MADV_DONTNEED, our
+ * RSS will decrease by 1MB (almost) immediately.
+ *
+ * On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
+ * on Mac doesn't cause the OS to release the specified pages immediately; the
+ * OS keeps them in our process until the machine comes under memory pressure.
+ *
+ * It's therefore difficult to measure the process's RSS on Mac, since, in the
+ * absence of memory pressure, the contribution from the heap to RSS will not
+ * decrease due to our madvise calls.
+ *
+ * We therefore define MALLOC_DOUBLE_PURGE on Mac. This causes jemalloc to
+ * track which pages have been MADV_FREE'd. You can then call
+ * jemalloc_purge_freed_pages(), which will force the OS to release those
+ * MADV_FREE'd pages, making the process's RSS reflect its true memory usage.
+ *
+ * The jemalloc_purge_freed_pages definition in memory/build/mozmemory.h needs
+ * to be adjusted if MALLOC_DOUBLE_PURGE is ever enabled on Linux.
+ */
+#ifdef MOZ_MEMORY_DARWIN
+#define MALLOC_DOUBLE_PURGE
+#endif
+
+/*
+ * MALLOC_PRODUCTION disables assertions and statistics gathering. It also
+ * defaults the A and J runtime options to off. These settings are appropriate
+ * for production systems.
+ */
+#ifndef MOZ_MEMORY_DEBUG
+# define MALLOC_PRODUCTION
+#endif
+
+/*
+ * Use only one arena by default. Mozilla does not currently make extensive
+ * use of concurrent allocation, so the increased fragmentation associated with
+ * multiple arenas is not warranted.
+ *
+ * When using the Servo style system, we do indeed make use of significant
+ * concurrent allocation, and the overhead matters. Bug 1291355 tracks
+ * investigating the fragmentation overhead of turning this on for users.
+ */
+#ifndef MOZ_STYLO
+#define MOZ_MEMORY_NARENAS_DEFAULT_ONE
+#endif
+
+/*
+ * Pass this set of options to jemalloc as its default. It does not override
+ * the options passed via the MALLOC_OPTIONS environment variable but is
+ * applied in addition to them.
+ */
+#ifdef MOZ_WIDGET_GONK
+ /* Reduce the amount of unused dirty pages to 1MiB on B2G */
+# define MOZ_MALLOC_OPTIONS "ff"
+#else
+# define MOZ_MALLOC_OPTIONS ""
+#endif
+
+/*
+ * MALLOC_STATS enables statistics calculation, and is required for
+ * jemalloc_stats().
+ */
+#define MALLOC_STATS
+
+/* Memory filling (junk/poison/zero). */
+#define MALLOC_FILL
+
+#ifndef MALLOC_PRODUCTION
+ /*
+ * MALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+# define MALLOC_DEBUG
+
+ /* Allocation tracing. */
+# ifndef MOZ_MEMORY_WINDOWS
+# define MALLOC_UTRACE
+# endif
+
+ /* Support optional abort() on OOM. */
+# define MALLOC_XMALLOC
+
+ /* Support SYSV semantics. */
+# define MALLOC_SYSV
+#endif
+
+/*
+ * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
+ * validation. There are many possible errors that validation does not even
+ * attempt to detect.
+ */
+#define MALLOC_VALIDATE
+
+/*
+ * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
+ * re-balances arena load if exponentially averaged contention exceeds a
+ * certain threshold.
+ */
+/* #define MALLOC_BALANCE */
+
+#if defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
+#define _GNU_SOURCE /* For mremap(2). */
+#if 0 /* Enable in order to test decommit code on Linux. */
+# define MALLOC_DECOMMIT
+#endif
+#endif
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef MOZ_MEMORY_WINDOWS
+
+/* Some defines from the CRT internal headers that we need here. */
+#define _CRT_SPINCOUNT 5000
+#define __crtInitCritSecAndSpinCount InitializeCriticalSectionAndSpinCount
+#include <io.h>
+#include <windows.h>
+#include <intrin.h>
+
+#pragma warning( disable: 4267 4996 4146 )
+
+#define bool BOOL
+#define false FALSE
+#define true TRUE
+#define inline __inline
+#define SIZE_T_MAX SIZE_MAX
+#define STDERR_FILENO 2
+#define PATH_MAX MAX_PATH
+#define vsnprintf _vsnprintf
+
+#ifndef NO_TLS
+static unsigned long tlsIndex = 0xffffffff;
+#endif
+
+#define __thread
+#define _pthread_self() __threadid()
+
+/* use MSVC intrinsics */
+#pragma intrinsic(_BitScanForward)
+static __forceinline int
+ffs(int x)
+{
+ unsigned long i;
+
+ if (_BitScanForward(&i, x) != 0)
+ return (i + 1);
+
+ return (0);
+}
+
+/* Implement getenv without using malloc */
+static char mozillaMallocOptionsBuf[64];
+
+#define getenv xgetenv
+static char *
+getenv(const char *name)
+{
+
+ if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
+ sizeof(mozillaMallocOptionsBuf)) > 0)
+ return (mozillaMallocOptionsBuf);
+
+ return (NULL);
+}
+
+typedef unsigned char uint8_t;
+typedef unsigned uint32_t;
+typedef unsigned long long uint64_t;
+typedef unsigned long long uintmax_t;
+#if defined(_WIN64)
+typedef long long ssize_t;
+#else
+typedef long ssize_t;
+#endif
+
+#define MALLOC_DECOMMIT
+#endif
+
+/*
+ * Allow unmapping pages on all platforms. Note that if this is disabled,
+ * jemalloc will never unmap anything, instead recycling pages for later use.
+ */
+#define JEMALLOC_MUNMAP
+
+/*
+ * Enable limited chunk recycling on all platforms. Note that when
+ * JEMALLOC_MUNMAP is not defined, all chunks will be recycled unconditionally.
+ */
+#define JEMALLOC_RECYCLE
+
+#ifndef MOZ_MEMORY_WINDOWS
+#ifndef MOZ_MEMORY_SOLARIS
+#include <sys/cdefs.h>
+#endif
+#ifndef __DECONST
+# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#endif
+#ifndef MOZ_MEMORY
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
+#include "libc_private.h"
+#ifdef MALLOC_DEBUG
+# define _LOCK_DEBUG
+#endif
+#include "spinlock.h"
+#include "namespace.h"
+#endif
+#include <sys/mman.h>
+#ifndef MADV_FREE
+# define MADV_FREE MADV_DONTNEED
+#endif
+#ifndef MAP_NOSYNC
+# define MAP_NOSYNC 0
+#endif
+#include <sys/param.h>
+#ifndef MOZ_MEMORY
+#include <sys/stddef.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h>
+#if !defined(MOZ_MEMORY_SOLARIS) && !defined(MOZ_MEMORY_ANDROID)
+#include <sys/sysctl.h>
+#endif
+#include <sys/uio.h>
+#ifndef MOZ_MEMORY
+#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
+
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/vmparam.h>
+#endif
+
+#include <errno.h>
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <pthread.h>
+#ifdef MOZ_MEMORY_DARWIN
+#define _pthread_self pthread_self
+#define _pthread_mutex_init pthread_mutex_init
+#define _pthread_mutex_trylock pthread_mutex_trylock
+#define _pthread_mutex_lock pthread_mutex_lock
+#define _pthread_mutex_unlock pthread_mutex_unlock
+#endif
+#include <sched.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef MOZ_MEMORY_DARWIN
+#include <strings.h>
+#endif
+#include <unistd.h>
+
+#ifdef MOZ_MEMORY_DARWIN
+#include <libkern/OSAtomic.h>
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <malloc/malloc.h>
+#endif
+
+#ifndef MOZ_MEMORY
+#include "un-namespace.h"
+#endif
+
+#endif
+
+#include "jemalloc_types.h"
+#include "linkedlist.h"
+#include "mozmemory_wrap.h"
+
+/* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
+ * happen to override mmap() and call dlsym() from their overridden
+ * mmap(). The problem is that dlsym() calls malloc(), and this ends
+ * up in a dead lock in jemalloc.
+ * On these systems, we prefer to directly use the system call.
+ * We do that for Linux systems and kfreebsd with GNU userland.
+ * Note sanity checks are not done (alignment of offset, ...) because
+ * the uses of mmap are pretty limited, in jemalloc.
+ *
+ * On Alpha, glibc has a bug that prevents syscall() to work for system
+ * calls with 6 arguments
+ */
+#if (defined(MOZ_MEMORY_LINUX) && !defined(__alpha__)) || \
+ (defined(MOZ_MEMORY_BSD) && defined(__GLIBC__))
+#include <sys/syscall.h>
+#if defined(SYS_mmap) || defined(SYS_mmap2)
+static inline
+void *_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, off_t offset)
+{
+/* S390 only passes one argument to the mmap system call, which is a
+ * pointer to a structure containing the arguments */
+#ifdef __s390__
+ struct {
+ void *addr;
+ size_t length;
+ long prot;
+ long flags;
+ long fd;
+ off_t offset;
+ } args = { addr, length, prot, flags, fd, offset };
+ return (void *) syscall(SYS_mmap, &args);
+#else
+#ifdef SYS_mmap2
+ return (void *) syscall(SYS_mmap2, addr, length, prot, flags,
+ fd, offset >> 12);
+#else
+ return (void *) syscall(SYS_mmap, addr, length, prot, flags,
+ fd, offset);
+#endif
+#endif
+}
+#define mmap _mmap
+#define munmap(a, l) syscall(SYS_munmap, a, l)
+#endif
+#endif
+
+#ifdef MOZ_MEMORY_DARWIN
+static const bool isthreaded = true;
+#endif
+
+#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
+#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */
+#endif
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#endif
+
+#ifdef MOZ_MEMORY_WINDOWS
+ /* MSVC++ does not support C99 variable-length arrays. */
+# define RB_NO_C99_VARARRAYS
+#endif
+#include "rb.h"
+
+#ifdef MALLOC_DEBUG
+ /* Disable inlining to make debugging easier. */
+#ifdef inline
+#undef inline
+#endif
+
+# define inline
+#endif
+
+/* Size of stack-allocated buffer passed to strerror_r(). */
+#define STRERROR_BUF 64
+
+/* Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes. */
+# define QUANTUM_2POW_MIN 4
+#if defined(_WIN64) || defined(__LP64__)
+# define SIZEOF_PTR_2POW 3
+#else
+# define SIZEOF_PTR_2POW 2
+#endif
+#define PIC
+#ifndef MOZ_MEMORY_DARWIN
+static const bool isthreaded = true;
+#else
+# define NO_TLS
+#endif
+#if 0
+#ifdef __i386__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 2
+# define CPU_SPINWAIT __asm__ volatile("pause")
+#endif
+#ifdef __ia64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+#endif
+#ifdef __alpha__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define NO_TLS
+#endif
+#ifdef __sparc64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define NO_TLS
+#endif
+#ifdef __amd64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define CPU_SPINWAIT __asm__ volatile("pause")
+#endif
+#ifdef __arm__
+# define QUANTUM_2POW_MIN 3
+# define SIZEOF_PTR_2POW 2
+# define NO_TLS
+#endif
+#ifdef __mips__
+# define QUANTUM_2POW_MIN 3
+# define SIZEOF_PTR_2POW 2
+# define NO_TLS
+#endif
+#ifdef __powerpc__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 2
+#endif
+#endif
+
+#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
+
+/* sizeof(int) == (1U << SIZEOF_INT_2POW). */
+#ifndef SIZEOF_INT_2POW
+# define SIZEOF_INT_2POW 2
+#endif
+
+/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
+#if (!defined(PIC) && !defined(NO_TLS))
+# define NO_TLS
+#endif
+
+#ifdef NO_TLS
+ /* MALLOC_BALANCE requires TLS. */
+# ifdef MALLOC_BALANCE
+# undef MALLOC_BALANCE
+# endif
+#endif
+
+/*
+ * Size and alignment of memory chunks that are allocated by the OS's virtual
+ * memory system.
+ */
+#define CHUNK_2POW_DEFAULT 20
+/* Maximum number of dirty pages per arena. */
+#define DIRTY_MAX_DEFAULT (1U << 8)
+
+/*
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing,
+ * so over-estimates are okay (up to a point), but under-estimates will
+ * negatively affect performance.
+ */
+#define CACHELINE_2POW 6
+#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
+
+/*
+ * Smallest size class to support. On Windows the smallest allocation size
+ * must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
+ * malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
+ */
+#ifdef MOZ_MEMORY_WINDOWS
+#define TINY_MIN_2POW (sizeof(void*) == 8 ? 4 : 3)
+#else
+#define TINY_MIN_2POW (sizeof(void*) == 8 ? 3 : 2)
+#endif
+
+/*
+ * Maximum size class that is a multiple of the quantum, but not (necessarily)
+ * a power of 2. Above this size, allocations are rounded up to the nearest
+ * power of 2.
+ */
+#define SMALL_MAX_2POW_DEFAULT 9
+#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
+
+/*
+ * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
+ * as small as possible such that this setting is still honored, without
+ * violating other constraints. The goal is to make runs as small as possible
+ * without exceeding a per run external fragmentation threshold.
+ *
+ * We use binary fixed point math for overhead computations, where the binary
+ * point is implicitly RUN_BFP bits to the left.
+ *
+ * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+ * honored for some/all object sizes, since there is one bit of header overhead
+ * per object (plus a constant). This constraint is relaxed (ignored) for runs
+ * that are so small that the per-region overhead is greater than:
+ *
+ * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+ */
+#define RUN_BFP 12
+/* \/ Implicit binary fixed point. */
+#define RUN_MAX_OVRHD 0x0000003dU
+#define RUN_MAX_OVRHD_RELAX 0x00001800U
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU. If no such instruction is defined
+ * above, make CPU_SPINWAIT a no-op.
+ */
+#ifndef CPU_SPINWAIT
+# define CPU_SPINWAIT
+#endif
+
+/*
+ * Adaptive spinning must eventually switch to blocking, in order to avoid the
+ * potential for priority inversion deadlock. Backing off past a certain point
+ * can actually waste time.
+ */
+#define SPIN_LIMIT_2POW 11
+
+/*
+ * Conversion from spinning to blocking is expensive; we use (1U <<
+ * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
+ * worst-case spinning.
+ */
+#define BLOCK_COST_2POW 4
+
+#ifdef MALLOC_BALANCE
+ /*
+ * We use an exponential moving average to track recent lock contention,
+ * where the size of the history window is N, and alpha=2/(N+1).
+ *
+ * Due to integer math rounding, very small values here can cause
+ * substantial degradation in accuracy, thus making the moving average decay
+ * faster than it would with precise calculation.
+ */
+# define BALANCE_ALPHA_INV_2POW 9
+
+ /*
+ * Threshold value for the exponential moving contention average at which to
+ * re-assign a thread.
+ */
+# define BALANCE_THRESHOLD_DEFAULT (1U << (SPIN_LIMIT_2POW-4))
+#endif
+
+/******************************************************************************/
+
+/* MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive. */
+#if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
+#error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
+#endif
+
+/*
+ * Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
+ * places, because they require malloc()ed memory, which causes bootstrapping
+ * issues in some cases.
+ */
+#if defined(MOZ_MEMORY_WINDOWS)
+#define malloc_mutex_t CRITICAL_SECTION
+#define malloc_spinlock_t CRITICAL_SECTION
+#elif defined(MOZ_MEMORY_DARWIN)
+typedef struct {
+ OSSpinLock lock;
+} malloc_mutex_t;
+typedef struct {
+ OSSpinLock lock;
+} malloc_spinlock_t;
+#elif defined(MOZ_MEMORY)
+typedef pthread_mutex_t malloc_mutex_t;
+typedef pthread_mutex_t malloc_spinlock_t;
+#else
+/* XXX these should #ifdef these for freebsd (and linux?) only */
+typedef struct {
+ spinlock_t lock;
+} malloc_mutex_t;
+typedef malloc_spinlock_t malloc_mutex_t;
+#endif
+
+/* Set to true once the allocator has been initialized. */
+static bool malloc_initialized = false;
+
+#if defined(MOZ_MEMORY_WINDOWS)
+/* No init lock for Windows. */
+#elif defined(MOZ_MEMORY_DARWIN)
+static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
+static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
+#elif defined(MOZ_MEMORY)
+static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+#else
+static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
+#endif
+
+/******************************************************************************/
+/*
+ * Statistics data structures.
+ */
+
+#ifdef MALLOC_STATS
+
+typedef struct malloc_bin_stats_s malloc_bin_stats_t;
+struct malloc_bin_stats_s {
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
+
+ /* Total number of runs created for this bin's size class. */
+ uint64_t nruns;
+
+ /*
+ * Total number of runs reused by extracting them from the runs tree for
+ * this bin's size class.
+ */
+ uint64_t reruns;
+
+ /* High-water mark for this bin. */
+ unsigned long highruns;
+
+ /* Current number of runs in this bin. */
+ unsigned long curruns;
+};
+
+typedef struct arena_stats_s arena_stats_t;
+struct arena_stats_s {
+ /* Number of bytes currently mapped. */
+ size_t mapped;
+
+ /*
+ * Total number of purge sweeps, total number of madvise calls made,
+ * and total pages purged in order to keep dirty unused memory under
+ * control.
+ */
+ uint64_t npurge;
+ uint64_t nmadvise;
+ uint64_t purged;
+#ifdef MALLOC_DECOMMIT
+ /*
+ * Total number of decommit/commit operations, and total number of
+ * pages decommitted.
+ */
+ uint64_t ndecommit;
+ uint64_t ncommit;
+ uint64_t decommitted;
+#endif
+
+ /* Current number of committed pages. */
+ size_t committed;
+
+ /* Per-size-category statistics. */
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+
+ size_t allocated_large;
+ uint64_t nmalloc_large;
+ uint64_t ndalloc_large;
+
+#ifdef MALLOC_BALANCE
+ /* Number of times this arena reassigned a thread due to contention. */
+ uint64_t nbalance;
+#endif
+};
+
+#endif /* #ifdef MALLOC_STATS */
+
+/******************************************************************************/
+/*
+ * Extent data structures.
+ */
+
+/* Tree of extents. */
+typedef struct extent_node_s extent_node_t;
+struct extent_node_s {
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) link_szad;
+
+ /* Linkage for the address-ordered tree. */
+ rb_node(extent_node_t) link_ad;
+
+ /* Pointer to the extent that this tree node is responsible for. */
+ void *addr;
+
+ /* Total region size. */
+ size_t size;
+
+ /* True if zero-filled; used by chunk recycling code. */
+ bool zeroed;
+};
+typedef rb_tree(extent_node_t) extent_tree_t;
+
+/******************************************************************************/
+/*
+ * Radix tree data structures.
+ */
+
+#ifdef MALLOC_VALIDATE
+ /*
+ * Size of each radix tree node (must be a power of 2). This impacts tree
+ * depth.
+ */
+# if (SIZEOF_PTR == 4)
+# define MALLOC_RTREE_NODESIZE (1U << 14)
+# else
+# define MALLOC_RTREE_NODESIZE CACHELINE
+# endif
+
+typedef struct malloc_rtree_s malloc_rtree_t;
+struct malloc_rtree_s {
+ malloc_spinlock_t lock;
+ void **root;
+ unsigned height;
+ unsigned level2bits[1]; /* Dynamically sized. */
+};
+#endif
+
+/******************************************************************************/
+/*
+ * Arena data structures.
+ */
+
+typedef struct arena_s arena_t;
+typedef struct arena_bin_s arena_bin_t;
+
+/* Each element of the chunk map corresponds to one page within the chunk. */
+typedef struct arena_chunk_map_s arena_chunk_map_t;
+struct arena_chunk_map_s {
+ /*
+ * Linkage for run trees. There are two disjoint uses:
+ *
+ * 1) arena_t's runs_avail tree.
+ * 2) arena_run_t conceptually uses this linkage for in-use non-full
+ * runs, rather than directly embedding linkage.
+ */
+ rb_node(arena_chunk_map_t) link;
+
+ /*
+ * Run address (or size) and various flags are stored together. The bit
+ * layout looks like (assuming 32-bit system):
+ *
+ * ???????? ???????? ????---- -mckdzla
+ *
+ * ? : Unallocated: Run address for first/last pages, unset for internal
+ * pages.
+ * Small: Run address.
+ * Large: Run size for first page, unset for trailing pages.
+ * - : Unused.
+ * m : MADV_FREE/MADV_DONTNEED'ed?
+ * c : decommitted?
+ * k : key?
+ * d : dirty?
+ * z : zeroed?
+ * l : large?
+ * a : allocated?
+ *
+ * Following are example bit patterns for the three types of runs.
+ *
+ * r : run address
+ * s : run size
+ * x : don't care
+ * - : 0
+ * [cdzla] : bit set
+ *
+ * Unallocated:
+ * ssssssss ssssssss ssss---- --c-----
+ * xxxxxxxx xxxxxxxx xxxx---- ----d---
+ * ssssssss ssssssss ssss---- -----z--
+ *
+ * Small:
+ * rrrrrrrr rrrrrrrr rrrr---- -------a
+ * rrrrrrrr rrrrrrrr rrrr---- -------a
+ * rrrrrrrr rrrrrrrr rrrr---- -------a
+ *
+ * Large:
+ * ssssssss ssssssss ssss---- ------la
+ * -------- -------- -------- ------la
+ * -------- -------- -------- ------la
+ */
+ size_t bits;
+
+/* Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
+ * MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
+ *
+ * If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
+ * re-committed with pages_commit() before it may be touched. If
+ * MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
+ *
+ * If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
+ * are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
+ * CHUNK_MAP_MADVISED.
+ *
+ * Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
+ * defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
+ * When it's finally freed with jemalloc_purge_freed_pages, the page is marked
+ * as CHUNK_MAP_DECOMMITTED.
+ */
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS) || defined(MALLOC_DOUBLE_PURGE)
+#define CHUNK_MAP_MADVISED ((size_t)0x40U)
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
+#define CHUNK_MAP_MADVISED_OR_DECOMMITTED (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
+#endif
+#define CHUNK_MAP_KEY ((size_t)0x10U)
+#define CHUNK_MAP_DIRTY ((size_t)0x08U)
+#define CHUNK_MAP_ZEROED ((size_t)0x04U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+};
+typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
+
+/* Arena chunk header. */
+typedef struct arena_chunk_s arena_chunk_t;
+struct arena_chunk_s {
+ /* Arena that owns the chunk. */
+ arena_t *arena;
+
+ /* Linkage for the arena's chunks_dirty tree. */
+ rb_node(arena_chunk_t) link_dirty;
+
+#ifdef MALLOC_DOUBLE_PURGE
+ /* If we're double-purging, we maintain a linked list of chunks which
+ * have pages which have been madvise(MADV_FREE)'d but not explicitly
+ * purged.
+ *
+ * We're currently lazy and don't remove a chunk from this list when
+ * all its madvised pages are recommitted. */
+ LinkedList chunks_madvised_elem;
+#endif
+
+ /* Number of dirty pages. */
+ size_t ndirty;
+
+ /* Map of pages within chunk that keeps track of free/large/small. */
+ arena_chunk_map_t map[1]; /* Dynamically sized. */
+};
+typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
+
+typedef struct arena_run_s arena_run_t;
+struct arena_run_s {
+#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+ uint32_t magic;
+# define ARENA_RUN_MAGIC 0x384adf93
+#endif
+
+ /* Bin this run is associated with. */
+ arena_bin_t *bin;
+
+ /* Index of first element that might have a free region. */
+ unsigned regs_minelm;
+
+ /* Number of free regions in run. */
+ unsigned nfree;
+
+ /* Bitmask of in-use regions (0: in use, 1: free). */
+ unsigned regs_mask[1]; /* Dynamically sized. */
+};
+
+struct arena_bin_s {
+ /*
+ * Current run being used to service allocations of this bin's size
+ * class.
+ */
+ arena_run_t *runcur;
+
+ /*
+ * Tree of non-full runs. This tree is used when looking for an
+ * existing run when runcur is no longer usable. We choose the
+ * non-full run that is lowest in memory; this policy tends to keep
+ * objects packed well, and it can also help reduce the number of
+ * almost-empty chunks.
+ */
+ arena_run_tree_t runs;
+
+ /* Size of regions in a run for this bin's size class. */
+ size_t reg_size;
+
+ /* Total size of a run for this bin's size class. */
+ size_t run_size;
+
+ /* Total number of regions in a run for this bin's size class. */
+ uint32_t nregs;
+
+ /* Number of elements in a run's regs_mask for this bin's size class. */
+ uint32_t regs_mask_nelms;
+
+ /* Offset of first region in a run for this bin's size class. */
+ uint32_t reg0_offset;
+
+#ifdef MALLOC_STATS
+ /* Bin statistics. */
+ malloc_bin_stats_t stats;
+#endif
+};
+
+struct arena_s {
+#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+ uint32_t magic;
+# define ARENA_MAGIC 0x947d3d24
+#endif
+
+ /* All operations on this arena require that lock be locked. */
+#ifdef MOZ_MEMORY
+ malloc_spinlock_t lock;
+#else
+ pthread_mutex_t lock;
+#endif
+
+#ifdef MALLOC_STATS
+ arena_stats_t stats;
+#endif
+
+ /* Tree of dirty-page-containing chunks this arena manages. */
+ arena_chunk_tree_t chunks_dirty;
+
+#ifdef MALLOC_DOUBLE_PURGE
+ /* Head of a linked list of MADV_FREE'd-page-containing chunks this
+ * arena manages. */
+ LinkedList chunks_madvised;
+#endif
+
+ /*
+ * In order to avoid rapid chunk allocation/deallocation when an arena
+ * oscillates right on the cusp of needing a new chunk, cache the most
+ * recently freed chunk. The spare is left in the arena's chunk trees
+ * until it is deleted.
+ *
+ * There is one spare chunk per arena, rather than one spare total, in
+ * order to avoid interactions between multiple threads that could make
+ * a single spare inadequate.
+ */
+ arena_chunk_t *spare;
+
+ /*
+ * Current count of pages within unused runs that are potentially
+ * dirty, and for which madvise(... MADV_FREE) has not been called. By
+ * tracking this, we can institute a limit on how much dirty unused
+ * memory is mapped for each arena.
+ */
+ size_t ndirty;
+
+ /*
+ * Size/address-ordered tree of this arena's available runs. This tree
+ * is used for first-best-fit run allocation.
+ */
+ arena_avail_tree_t runs_avail;
+
+#ifdef MALLOC_BALANCE
+ /*
+ * The arena load balancing machinery needs to keep track of how much
+ * lock contention there is. This value is exponentially averaged.
+ */
+ uint32_t contention;
+#endif
+
+ /*
+ * bins is used to store rings of free regions of the following sizes,
+ * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
+ *
+ * bins[i] | size |
+ * --------+------+
+ * 0 | 2 |
+ * 1 | 4 |
+ * 2 | 8 |
+ * --------+------+
+ * 3 | 16 |
+ * 4 | 32 |
+ * 5 | 48 |
+ * 6 | 64 |
+ * : :
+ * : :
+ * 33 | 496 |
+ * 34 | 512 |
+ * --------+------+
+ * 35 | 1024 |
+ * 36 | 2048 |
+ * --------+------+
+ */
+ arena_bin_t bins[1]; /* Dynamically sized. */
+};
+
+/******************************************************************************/
+/*
+ * Data.
+ */
+
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+/* Number of CPUs. */
+static unsigned ncpus;
+#endif
+
+#ifdef JEMALLOC_MUNMAP
+static const bool config_munmap = true;
+#else
+static const bool config_munmap = false;
+#endif
+
+#ifdef JEMALLOC_RECYCLE
+static const bool config_recycle = true;
+#else
+static const bool config_recycle = false;
+#endif
+
+/*
+ * When MALLOC_STATIC_SIZES is defined most of the parameters
+ * controlling the malloc behavior are defined as compile-time constants
+ * for best performance and cannot be altered at runtime.
+ */
+#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && !defined(__aarch64__)
+#define MALLOC_STATIC_SIZES 1
+#endif
+
+#ifdef MALLOC_STATIC_SIZES
+
+/*
+ * VM page size. It must divide the runtime CPU page size or the code
+ * will abort.
+ * Platform specific page size conditions copied from js/public/HeapAPI.h
+ */
+#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
+ (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
+#define pagesize_2pow ((size_t) 13)
+#elif defined(__powerpc64__)
+#define pagesize_2pow ((size_t) 16)
+#else
+#define pagesize_2pow ((size_t) 12)
+#endif
+#define pagesize ((size_t) 1 << pagesize_2pow)
+#define pagesize_mask (pagesize - 1)
+
+/* Various quantum-related settings. */
+
+#define QUANTUM_DEFAULT ((size_t) 1 << QUANTUM_2POW_MIN)
+static const size_t quantum = QUANTUM_DEFAULT;
+static const size_t quantum_mask = QUANTUM_DEFAULT - 1;
+
+/* Various bin-related settings. */
+
+static const size_t small_min = (QUANTUM_DEFAULT >> 1) + 1;
+static const size_t small_max = (size_t) SMALL_MAX_DEFAULT;
+
+/* Max size class for bins. */
+static const size_t bin_maxclass = pagesize >> 1;
+
+ /* Number of (2^n)-spaced tiny bins. */
+static const unsigned ntbins = (unsigned)
+ (QUANTUM_2POW_MIN - TINY_MIN_2POW);
+
+ /* Number of quantum-spaced bins. */
+static const unsigned nqbins = (unsigned)
+ (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
+
+/* Number of (2^n)-spaced sub-page bins. */
+static const unsigned nsbins = (unsigned)
+ (pagesize_2pow -
+ SMALL_MAX_2POW_DEFAULT - 1);
+
+#else /* !MALLOC_STATIC_SIZES */
+
+/* VM page size. */
+static size_t pagesize;
+static size_t pagesize_mask;
+static size_t pagesize_2pow;
+
+/* Various bin-related settings. */
+static size_t bin_maxclass; /* Max size class for bins. */
+static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */
+static unsigned nqbins; /* Number of quantum-spaced bins. */
+static unsigned nsbins; /* Number of (2^n)-spaced sub-page bins. */
+static size_t small_min;
+static size_t small_max;
+
+/* Various quantum-related settings. */
+static size_t quantum;
+static size_t quantum_mask; /* (quantum - 1). */
+
+#endif
+
+/* Various chunk-related settings. */
+
+/*
+ * Compute the header size such that it is large enough to contain the page map
+ * and enough nodes for the worst case: one node per non-header page plus one
+ * extra for situations where we briefly have one more node allocated than we
+ * will need.
+ */
+#define calculate_arena_header_size() \
+ (sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
+
+#define calculate_arena_header_pages() \
+ ((calculate_arena_header_size() >> pagesize_2pow) + \
+ ((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
+
+/* Max size class for arenas. */
+#define calculate_arena_maxclass() \
+ (chunksize - (arena_chunk_header_npages << pagesize_2pow))
+
+/*
+ * Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
+ * 6.25% of the process address space on a 32-bit OS for later use.
+ */
+#define CHUNK_RECYCLE_LIMIT 128
+
+#ifdef MALLOC_STATIC_SIZES
+#define CHUNKSIZE_DEFAULT ((size_t) 1 << CHUNK_2POW_DEFAULT)
+static const size_t chunksize = CHUNKSIZE_DEFAULT;
+static const size_t chunksize_mask =CHUNKSIZE_DEFAULT - 1;
+static const size_t chunk_npages = CHUNKSIZE_DEFAULT >> pagesize_2pow;
+#define arena_chunk_header_npages calculate_arena_header_pages()
+#define arena_maxclass calculate_arena_maxclass()
+static const size_t recycle_limit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
+#else
+static size_t chunksize;
+static size_t chunksize_mask; /* (chunksize - 1). */
+static size_t chunk_npages;
+static size_t arena_chunk_header_npages;
+static size_t arena_maxclass; /* Max size class for arenas. */
+static size_t recycle_limit;
+#endif
+
+/* The current amount of recycled bytes, updated atomically. */
+static size_t recycled_size;
+
+/********/
+/*
+ * Chunks.
+ */
+
+#ifdef MALLOC_VALIDATE
+static malloc_rtree_t *chunk_rtree;
+#endif
+
+/* Protects chunk-related data structures. */
+static malloc_mutex_t chunks_mtx;
+
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering). These are used when allocating chunks, in an attempt to re-use
+ * address space. Depending on function, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_t chunks_szad_mmap;
+static extent_tree_t chunks_ad_mmap;
+
+/* Protects huge allocation-related data structures. */
+static malloc_mutex_t huge_mtx;
+
+/* Tree of chunks that are stand-alone huge allocations. */
+static extent_tree_t huge;
+
+#ifdef MALLOC_STATS
+/* Huge allocation statistics. */
+static uint64_t huge_nmalloc;
+static uint64_t huge_ndalloc;
+static size_t huge_allocated;
+static size_t huge_mapped;
+#endif
+
+/****************************/
+/*
+ * base (internal allocation).
+ */
+
+/*
+ * Current pages that are being used for internal memory allocations. These
+ * pages are carved up in cacheline-size quanta, so that there is no chance of
+ * false cache line sharing.
+ */
+static void *base_pages;
+static void *base_next_addr;
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
+static void *base_next_decommitted;
+#endif
+static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_node_t *base_nodes;
+static malloc_mutex_t base_mtx;
+#ifdef MALLOC_STATS
+static size_t base_mapped;
+static size_t base_committed;
+#endif
+
+/********/
+/*
+ * Arenas.
+ */
+
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+static arena_t **arenas;
+static unsigned narenas;
+#ifndef NO_TLS
+# ifdef MALLOC_BALANCE
+static unsigned narenas_2pow;
+# else
+static unsigned next_arena;
+# endif
+#endif
+#ifdef MOZ_MEMORY
+static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
+#else
+static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
+#endif
+
+#ifndef NO_TLS
+/*
+ * Map of pthread_self() --> arenas[???], used for selecting an arena to use
+ * for allocations.
+ */
+#ifndef MOZ_MEMORY_WINDOWS
+static __thread arena_t *arenas_map;
+#endif
+#endif
+
+/*******************************/
+/*
+ * Runtime configuration options.
+ */
+MOZ_JEMALLOC_API
+const char *_malloc_options = MOZ_MALLOC_OPTIONS;
+
+#ifndef MALLOC_PRODUCTION
+static bool opt_abort = true;
+#ifdef MALLOC_FILL
+static bool opt_junk = true;
+static bool opt_poison = true;
+static bool opt_zero = false;
+#endif
+#else
+static bool opt_abort = false;
+#ifdef MALLOC_FILL
+static const bool opt_junk = false;
+static const bool opt_poison = true;
+static const bool opt_zero = false;
+#endif
+#endif
+
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+#ifdef MALLOC_BALANCE
+static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
+#endif
+static bool opt_print_stats = false;
+#ifdef MALLOC_STATIC_SIZES
+#define opt_quantum_2pow QUANTUM_2POW_MIN
+#define opt_small_max_2pow SMALL_MAX_2POW_DEFAULT
+#define opt_chunk_2pow CHUNK_2POW_DEFAULT
+#else
+static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
+static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
+static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+#endif
+#ifdef MALLOC_UTRACE
+static bool opt_utrace = false;
+#endif
+#ifdef MALLOC_SYSV
+static bool opt_sysv = false;
+#endif
+#ifdef MALLOC_XMALLOC
+static bool opt_xmalloc = false;
+#endif
+static int opt_narenas_lshift = 0;
+
+#ifdef MALLOC_UTRACE
+typedef struct {
+ void *p;
+ size_t s;
+ void *r;
+} malloc_utrace_t;
+
+#define UTRACE(a, b, c) \
+ if (opt_utrace) { \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ }
+#else
+#define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/*
+ * Begin function prototypes for non-inline static functions.
+ */
+
+static char *umax2s(uintmax_t x, unsigned base, char *s);
+static bool malloc_mutex_init(malloc_mutex_t *mutex);
+static bool malloc_spin_init(malloc_spinlock_t *lock);
+static void wrtmessage(const char *p1, const char *p2, const char *p3,
+ const char *p4);
+#ifdef MALLOC_STATS
+#ifdef MOZ_MEMORY_DARWIN
+/* Avoid namespace collision with OS X's malloc APIs. */
+#define malloc_printf moz_malloc_printf
+#endif
+static void malloc_printf(const char *format, ...);
+#endif
+static bool base_pages_alloc(size_t minsize);
+static void *base_alloc(size_t size);
+static void *base_calloc(size_t number, size_t size);
+static extent_node_t *base_node_alloc(void);
+static void base_node_dealloc(extent_node_t *node);
+#ifdef MALLOC_STATS
+static void stats_print(arena_t *arena);
+#endif
+static void *pages_map(void *addr, size_t size);
+static void pages_unmap(void *addr, size_t size);
+static void *chunk_alloc_mmap(size_t size, size_t alignment);
+static void *chunk_recycle(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size,
+ size_t alignment, bool base, bool *zero);
+static void *chunk_alloc(size_t size, size_t alignment, bool base, bool zero);
+static void chunk_record(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, void *chunk, size_t size);
+static bool chunk_dalloc_mmap(void *chunk, size_t size);
+static void chunk_dealloc(void *chunk, size_t size);
+#ifndef NO_TLS
+static arena_t *choose_arena_hard(void);
+#endif
+static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+ bool large, bool zero);
+static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
+static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
+ size_t size, bool large, bool zero);
+static void arena_purge(arena_t *arena, bool all);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize);
+static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
+static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
+static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
+static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
+#ifdef MALLOC_BALANCE
+static void arena_lock_balance_hard(arena_t *arena);
+#endif
+static void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
+static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
+ size_t alloc_size);
+static size_t arena_salloc(const void *ptr);
+static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr);
+static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t size, size_t oldsize);
+static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t size, size_t oldsize);
+static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize);
+static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
+static bool arena_new(arena_t *arena);
+static arena_t *arenas_extend(unsigned ind);
+static void *huge_malloc(size_t size, bool zero);
+static void *huge_palloc(size_t size, size_t alignment, bool zero);
+static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
+static void huge_dalloc(void *ptr);
+static void malloc_print_stats(void);
+#ifndef MOZ_MEMORY_WINDOWS
+static
+#endif
+bool malloc_init_hard(void);
+
+static void _malloc_prefork(void);
+static void _malloc_postfork(void);
+
+#ifdef MOZ_MEMORY_DARWIN
+/*
+ * MALLOC_ZONE_T_NOTE
+ *
+ * On Darwin, we hook into the memory allocator using a malloc_zone_t struct.
+ * We must be very careful around this struct because of different behaviour on
+ * different versions of OSX.
+ *
+ * Each of OSX 10.5, 10.6 and 10.7 use different versions of the struct
+ * (with version numbers 3, 6 and 8 respectively). The binary we use on each of
+ * these platforms will not necessarily be built using the correct SDK [1].
+ * This means we need to statically know the correct struct size to use on all
+ * OSX releases, and have a fallback for unknown future versions. The struct
+ * sizes defined in osx_zone_types.h.
+ *
+ * For OSX 10.8 and later, we may expect the malloc_zone_t struct to change
+ * again, and need to dynamically account for this. By simply leaving
+ * malloc_zone_t alone, we don't quite deal with the problem, because there
+ * remain calls to jemalloc through the mozalloc interface. We check this
+ * dynamically on each allocation, using the CHECK_DARWIN macro and
+ * osx_use_jemalloc.
+ *
+ *
+ * [1] Mozilla is built as a universal binary on Mac, supporting i386 and
+ * x86_64. The i386 target is built using the 10.5 SDK, even if it runs on
+ * 10.6. The x86_64 target is built using the 10.6 SDK, even if it runs on
+ * 10.7 or later, or 10.5.
+ *
+ * FIXME:
+ * When later versions of OSX come out (10.8 and up), we need to check their
+ * malloc_zone_t versions. If they're greater than 8, we need a new version
+ * of malloc_zone_t adapted into osx_zone_types.h.
+ */
+
+#ifndef MOZ_REPLACE_MALLOC
+#include "osx_zone_types.h"
+
+#define LEOPARD_MALLOC_ZONE_T_VERSION 3
+#define SNOW_LEOPARD_MALLOC_ZONE_T_VERSION 6
+#define LION_MALLOC_ZONE_T_VERSION 8
+
+static bool osx_use_jemalloc = false;
+
+
+static lion_malloc_zone l_szone;
+static malloc_zone_t * szone = (malloc_zone_t*)(&l_szone);
+
+static lion_malloc_introspection l_ozone_introspect;
+static malloc_introspection_t * const ozone_introspect =
+ (malloc_introspection_t*)(&l_ozone_introspect);
+static void szone2ozone(malloc_zone_t *zone, size_t size);
+static size_t zone_version_size(int version);
+#else
+static const bool osx_use_jemalloc = true;
+#endif
+
+#endif
+
+/*
+ * End function prototypes.
+ */
+/******************************************************************************/
+
+static inline size_t
+load_acquire_z(size_t *p)
+{
+ volatile size_t result = *p;
+# ifdef MOZ_MEMORY_WINDOWS
+ /*
+ * We use InterlockedExchange with a dummy value to insert a memory
+ * barrier. This has been confirmed to generate the right instruction
+ * and is also used by MinGW.
+ */
+ volatile long dummy = 0;
+ InterlockedExchange(&dummy, 1);
+# else
+ __sync_synchronize();
+# endif
+ return result;
+}
+
+/*
+ * umax2s() provides minimal integer printing functionality, which is
+ * especially useful for situations where allocation in vsnprintf() calls would
+ * potentially cause deadlock.
+ */
+#define UMAX2S_BUFSIZE 65
+char *
+umax2s(uintmax_t x, unsigned base, char *s)
+{
+ unsigned i;
+
+ i = UMAX2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % 10];
+ x /= 10;
+ } while (x > 0);
+ break;
+ case 16:
+ do {
+ i--;
+ s[i] = "0123456789abcdef"[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ default:
+ do {
+ i--;
+ s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base];
+ x /= base;
+ } while (x > 0);
+ }
+
+ return (&s[i]);
+}
+
+static void
+wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
+{
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
+#define _write write
+#endif
+ // Pretend to check _write() errors to suppress gcc warnings about
+ // warn_unused_result annotations in some versions of glibc headers.
+ if (_write(STDERR_FILENO, p1, (unsigned int) strlen(p1)) < 0)
+ return;
+ if (_write(STDERR_FILENO, p2, (unsigned int) strlen(p2)) < 0)
+ return;
+ if (_write(STDERR_FILENO, p3, (unsigned int) strlen(p3)) < 0)
+ return;
+ if (_write(STDERR_FILENO, p4, (unsigned int) strlen(p4)) < 0)
+ return;
+}
+
+MOZ_JEMALLOC_API
+void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
+ const char *p4) = wrtmessage;
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+// Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
+// instead of the one defined here; use only MozTagAnonymousMemory().
+
+#ifdef MALLOC_DEBUG
+# define assert(e) MOZ_ASSERT(e)
+#else
+# define assert(e)
+#endif
+
+#ifdef MOZ_MEMORY_ANDROID
+// Android's pthread.h does not declare pthread_atfork() until SDK 21.
+extern MOZ_EXPORT
+int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
+#endif
+
+#if defined(MOZ_JEMALLOC_HARD_ASSERTS)
+# define RELEASE_ASSERT(assertion) do { \
+ if (!(assertion)) { \
+ MOZ_CRASH_UNSAFE_OOL(#assertion); \
+ } \
+} while (0)
+#else
+# define RELEASE_ASSERT(assertion) assert(assertion)
+#endif
+
+/******************************************************************************/
+/*
+ * Begin mutex. We can't use normal pthread mutexes in all places, because
+ * they require malloc()ed memory, which causes bootstrapping issues in some
+ * cases.
+ */
+
+static bool
+malloc_mutex_init(malloc_mutex_t *mutex)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+ if (isthreaded)
+ if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
+ return (true);
+#elif defined(MOZ_MEMORY_DARWIN)
+ mutex->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ if (pthread_mutex_init(mutex, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#elif defined(MOZ_MEMORY)
+ if (pthread_mutex_init(mutex, NULL) != 0)
+ return (true);
+#else
+ static const spinlock_t lock = _SPINLOCK_INITIALIZER;
+
+ mutex->lock = lock;
+#endif
+ return (false);
+}
+
+static inline void
+malloc_mutex_lock(malloc_mutex_t *mutex)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+ EnterCriticalSection(mutex);
+#elif defined(MOZ_MEMORY_DARWIN)
+ OSSpinLockLock(&mutex->lock);
+#elif defined(MOZ_MEMORY)
+ pthread_mutex_lock(mutex);
+#else
+ if (isthreaded)
+ _SPINLOCK(&mutex->lock);
+#endif
+}
+
+static inline void
+malloc_mutex_unlock(malloc_mutex_t *mutex)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+ LeaveCriticalSection(mutex);
+#elif defined(MOZ_MEMORY_DARWIN)
+ OSSpinLockUnlock(&mutex->lock);
+#elif defined(MOZ_MEMORY)
+ pthread_mutex_unlock(mutex);
+#else
+ if (isthreaded)
+ _SPINUNLOCK(&mutex->lock);
+#endif
+}
+
+#if (defined(__GNUC__))
+__attribute__((unused))
+# endif
+static bool
+malloc_spin_init(malloc_spinlock_t *lock)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+ if (isthreaded)
+ if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
+ return (true);
+#elif defined(MOZ_MEMORY_DARWIN)
+ lock->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ if (pthread_mutex_init(lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#elif defined(MOZ_MEMORY)
+ if (pthread_mutex_init(lock, NULL) != 0)
+ return (true);
+#else
+ lock->lock = _SPINLOCK_INITIALIZER;
+#endif
+ return (false);
+}
+
+static inline void
+malloc_spin_lock(malloc_spinlock_t *lock)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+ EnterCriticalSection(lock);
+#elif defined(MOZ_MEMORY_DARWIN)
+ OSSpinLockLock(&lock->lock);
+#elif defined(MOZ_MEMORY)
+ pthread_mutex_lock(lock);
+#else
+ if (isthreaded)
+ _SPINLOCK(&lock->lock);
+#endif
+}
+
+static inline void
+malloc_spin_unlock(malloc_spinlock_t *lock)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+ LeaveCriticalSection(lock);
+#elif defined(MOZ_MEMORY_DARWIN)
+ OSSpinLockUnlock(&lock->lock);
+#elif defined(MOZ_MEMORY)
+ pthread_mutex_unlock(lock);
+#else
+ if (isthreaded)
+ _SPINUNLOCK(&lock->lock);
+#endif
+}
+
+/*
+ * End mutex.
+ */
+/******************************************************************************/
+/*
+ * Begin spin lock. Spin locks here are actually adaptive mutexes that block
+ * after a period of spinning, because unbounded spinning would allow for
+ * priority inversion.
+ */
+
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
+# define malloc_spin_init malloc_mutex_init
+# define malloc_spin_lock malloc_mutex_lock
+# define malloc_spin_unlock malloc_mutex_unlock
+#endif
+
+#ifndef MOZ_MEMORY
+/*
+ * We use an unpublished interface to initialize pthread mutexes with an
+ * allocation callback, in order to avoid infinite recursion.
+ */
+int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+ _pthread_mutex_init_calloc_cb);
+
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+
+ return (0);
+}
+
+static bool
+malloc_spin_init(pthread_mutex_t *lock)
+{
+
+ if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
+ return (true);
+
+ return (false);
+}
+
+static inline unsigned
+malloc_spin_lock(pthread_mutex_t *lock)
+{
+ unsigned ret = 0;
+
+ if (isthreaded) {
+ if (_pthread_mutex_trylock(lock) != 0) {
+ unsigned i;
+ volatile unsigned j;
+
+ /* Exponentially back off. */
+ for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
+ for (j = 0; j < (1U << i); j++)
+ ret++;
+
+ CPU_SPINWAIT;
+ if (_pthread_mutex_trylock(lock) == 0)
+ return (ret);
+ }
+
+ /*
+ * Spinning failed. Block until the lock becomes
+ * available, in order to avoid indefinite priority
+ * inversion.
+ */
+ _pthread_mutex_lock(lock);
+ assert((ret << BLOCK_COST_2POW) != 0);
+ return (ret << BLOCK_COST_2POW);
+ }
+ }
+
+ return (ret);
+}
+
+static inline void
+malloc_spin_unlock(pthread_mutex_t *lock)
+{
+
+ if (isthreaded)
+ _pthread_mutex_unlock(lock);
+}
+#endif
+
+/*
+ * End spin lock.
+ */
+/******************************************************************************/
+/*
+ * Begin Utility functions/macros.
+ */
+
+/* Return the chunk address for allocation address a. */
+#define CHUNK_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define CHUNK_ADDR2OFFSET(a) \
+ ((size_t)((uintptr_t)(a) & chunksize_mask))
+
+/* Return the smallest chunk multiple that is >= s. */
+#define CHUNK_CEILING(s) \
+ (((s) + chunksize_mask) & ~chunksize_mask)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define CACHELINE_CEILING(s) \
+ (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+
+/* Return the smallest quantum multiple that is >= a. */
+#define QUANTUM_CEILING(a) \
+ (((a) + quantum_mask) & ~quantum_mask)
+
+/* Return the smallest pagesize multiple that is >= s. */
+#define PAGE_CEILING(s) \
+ (((s) + pagesize_mask) & ~pagesize_mask)
+
+/* Compute the smallest power of 2 that is >= x. */
+static inline size_t
+pow2_ceil(size_t x)
+{
+
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+#if (SIZEOF_PTR == 8)
+ x |= x >> 32;
+#endif
+ x++;
+ return (x);
+}
+
+#ifdef MALLOC_BALANCE
+/*
+ * Use a simple linear congruential pseudo-random number generator:
+ *
+ * prn(y) = (a*x + c) % m
+ *
+ * where the following constants ensure maximal period:
+ *
+ * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
+ * c == Odd number (relatively prime to 2^n).
+ * m == 2^32
+ *
+ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
+ *
+ * This choice of m has the disadvantage that the quality of the bits is
+ * proportional to bit position. For example. the lowest bit has a cycle of 2,
+ * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
+ * bits.
+ */
+# define PRN_DEFINE(suffix, var, a, c) \
+static inline void \
+sprn_##suffix(uint32_t seed) \
+{ \
+ var = seed; \
+} \
+ \
+static inline uint32_t \
+prn_##suffix(uint32_t lg_range) \
+{ \
+ uint32_t ret, x; \
+ \
+ assert(lg_range > 0); \
+ assert(lg_range <= 32); \
+ \
+ x = (var * (a)) + (c); \
+ var = x; \
+ ret = x >> (32 - lg_range); \
+ \
+ return (ret); \
+}
+# define SPRN(suffix, seed) sprn_##suffix(seed)
+# define PRN(suffix, lg_range) prn_##suffix(lg_range)
+#endif
+
+#ifdef MALLOC_BALANCE
+/* Define the PRNG used for arena assignment. */
+static __thread uint32_t balance_x;
+PRN_DEFINE(balance, balance_x, 1297, 1301)
+#endif
+
+#ifdef MALLOC_UTRACE
+static int
+utrace(const void *addr, size_t len)
+{
+ malloc_utrace_t *ut = (malloc_utrace_t *)addr;
+ char buf_a[UMAX2S_BUFSIZE];
+ char buf_b[UMAX2S_BUFSIZE];
+
+ assert(len == sizeof(malloc_utrace_t));
+
+ if (ut->p == NULL && ut->s == 0 && ut->r == NULL) {
+ _malloc_message(
+ umax2s(getpid(), 10, buf_a),
+ " x USER malloc_init()\n", "", "");
+ } else if (ut->p == NULL && ut->r != NULL) {
+ _malloc_message(
+ umax2s(getpid(), 10, buf_a),
+ " x USER 0x",
+ umax2s((uintptr_t)ut->r, 16, buf_b),
+ " = malloc(");
+ _malloc_message(
+ umax2s(ut->s, 10, buf_a),
+ ")\n", "", "");
+ } else if (ut->p != NULL && ut->r != NULL) {
+ _malloc_message(
+ umax2s(getpid(), 10, buf_a),
+ " x USER 0x",
+ umax2s((uintptr_t)ut->r, 16, buf_b),
+ " = realloc(0x");
+ _malloc_message(
+ umax2s((uintptr_t)ut->p, 16, buf_a),
+ ", ",
+ umax2s(ut->s, 10, buf_b),
+ ")\n");
+ } else {
+ _malloc_message(
+ umax2s(getpid(), 10, buf_a),
+ " x USER free(0x",
+ umax2s((uintptr_t)ut->p, 16, buf_b),
+ ")\n");
+ }
+
+ return (0);
+}
+#endif
+
+static inline const char *
+_getprogname(void)
+{
+
+ return ("<jemalloc>");
+}
+
+#ifdef MALLOC_STATS
+/*
+ * Print to stderr in such a way as to (hopefully) avoid memory allocation.
+ */
+static void
+malloc_printf(const char *format, ...)
+{
+ char buf[4096];
+ va_list ap;
+
+ va_start(ap, format);
+ vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ _malloc_message(buf, "", "", "");
+}
+#endif
+
+/******************************************************************************/
+
+static inline void
+pages_decommit(void *addr, size_t size)
+{
+
+#ifdef MOZ_MEMORY_WINDOWS
+ /*
+ * The region starting at addr may have been allocated in multiple calls
+ * to VirtualAlloc and recycled, so decommitting the entire region in one
+ * go may not be valid. However, since we allocate at least a chunk at a
+ * time, we may touch any region in chunksized increments.
+ */
+ size_t pages_size = min(size, chunksize -
+ CHUNK_ADDR2OFFSET((uintptr_t)addr));
+ while (size > 0) {
+ if (!VirtualFree(addr, pages_size, MEM_DECOMMIT))
+ abort();
+ addr = (void *)((uintptr_t)addr + pages_size);
+ size -= pages_size;
+ pages_size = min(size, chunksize);
+ }
+#else
+ if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
+ 0) == MAP_FAILED)
+ abort();
+ MozTagAnonymousMemory(addr, size, "jemalloc-decommitted");
+#endif
+}
+
+static inline void
+pages_commit(void *addr, size_t size)
+{
+
+# ifdef MOZ_MEMORY_WINDOWS
+ /*
+ * The region starting at addr may have been allocated in multiple calls
+ * to VirtualAlloc and recycled, so committing the entire region in one
+ * go may not be valid. However, since we allocate at least a chunk at a
+ * time, we may touch any region in chunksized increments.
+ */
+ size_t pages_size = min(size, chunksize -
+ CHUNK_ADDR2OFFSET((uintptr_t)addr));
+ while (size > 0) {
+ if (!VirtualAlloc(addr, pages_size, MEM_COMMIT, PAGE_READWRITE))
+ abort();
+ addr = (void *)((uintptr_t)addr + pages_size);
+ size -= pages_size;
+ pages_size = min(size, chunksize);
+ }
+# else
+ if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
+ MAP_ANON, -1, 0) == MAP_FAILED)
+ abort();
+ MozTagAnonymousMemory(addr, size, "jemalloc");
+# endif
+}
+
+static bool
+base_pages_alloc(size_t minsize)
+{
+ size_t csize;
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
+ size_t pminsize;
+#endif
+
+ assert(minsize != 0);
+ csize = CHUNK_CEILING(minsize);
+ base_pages = chunk_alloc(csize, chunksize, true, false);
+ if (base_pages == NULL)
+ return (true);
+ base_next_addr = base_pages;
+ base_past_addr = (void *)((uintptr_t)base_pages + csize);
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
+ /*
+ * Leave enough pages for minsize committed, since otherwise they would
+ * have to be immediately recommitted.
+ */
+ pminsize = PAGE_CEILING(minsize);
+ base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
+# if defined(MALLOC_DECOMMIT)
+ if (pminsize < csize)
+ pages_decommit(base_next_decommitted, csize - pminsize);
+# endif
+# ifdef MALLOC_STATS
+ base_mapped += csize;
+ base_committed += pminsize;
+# endif
+#endif
+
+ return (false);
+}
+
+static void *
+base_alloc(size_t size)
+{
+ void *ret;
+ size_t csize;
+
+ /* Round size up to nearest multiple of the cacheline size. */
+ csize = CACHELINE_CEILING(size);
+
+ malloc_mutex_lock(&base_mtx);
+ /* Make sure there's enough space for the allocation. */
+ if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ if (base_pages_alloc(csize)) {
+ malloc_mutex_unlock(&base_mtx);
+ return (NULL);
+ }
+ }
+ /* Allocate. */
+ ret = base_next_addr;
+ base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS)
+ /* Make sure enough pages are committed for the new allocation. */
+ if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
+ void *pbase_next_addr =
+ (void *)(PAGE_CEILING((uintptr_t)base_next_addr));
+
+# ifdef MALLOC_DECOMMIT
+ pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
+ (uintptr_t)base_next_decommitted);
+# endif
+ base_next_decommitted = pbase_next_addr;
+# ifdef MALLOC_STATS
+ base_committed += (uintptr_t)pbase_next_addr -
+ (uintptr_t)base_next_decommitted;
+# endif
+ }
+#endif
+ malloc_mutex_unlock(&base_mtx);
+
+ return (ret);
+}
+
+static void *
+base_calloc(size_t number, size_t size)
+{
+ void *ret;
+
+ ret = base_alloc(number * size);
+ memset(ret, 0, number * size);
+
+ return (ret);
+}
+
+static extent_node_t *
+base_node_alloc(void)
+{
+ extent_node_t *ret;
+
+ malloc_mutex_lock(&base_mtx);
+ if (base_nodes != NULL) {
+ ret = base_nodes;
+ base_nodes = *(extent_node_t **)ret;
+ malloc_mutex_unlock(&base_mtx);
+ } else {
+ malloc_mutex_unlock(&base_mtx);
+ ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+ }
+
+ return (ret);
+}
+
+static void
+base_node_dealloc(extent_node_t *node)
+{
+
+ malloc_mutex_lock(&base_mtx);
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
+ malloc_mutex_unlock(&base_mtx);
+}
+
+/******************************************************************************/
+
+#ifdef MALLOC_STATS
+static void
+stats_print(arena_t *arena)
+{
+ unsigned i, gap_start;
+
+#ifdef MOZ_MEMORY_WINDOWS
+ malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
+ " %I64u madvise%s, %I64u page%s purged\n",
+ arena->ndirty, arena->ndirty == 1 ? "" : "s",
+ arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+ arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+ arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+# ifdef MALLOC_DECOMMIT
+ malloc_printf("decommit: %I64u decommit%s, %I64u commit%s,"
+ " %I64u page%s decommitted\n",
+ arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+ arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+ arena->stats.decommitted,
+ (arena->stats.decommitted == 1) ? "" : "s");
+# endif
+
+ malloc_printf(" allocated nmalloc ndalloc\n");
+ malloc_printf("small: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_small, arena->stats.nmalloc_small,
+ arena->stats.ndalloc_small);
+ malloc_printf("large: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_large, arena->stats.nmalloc_large,
+ arena->stats.ndalloc_large);
+ malloc_printf("total: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_small + arena->stats.allocated_large,
+ arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+ arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+ malloc_printf("mapped: %12Iu\n", arena->stats.mapped);
+#else
+ malloc_printf("dirty: %zu page%s dirty, %llu sweep%s,"
+ " %llu madvise%s, %llu page%s purged\n",
+ arena->ndirty, arena->ndirty == 1 ? "" : "s",
+ arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+ arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+ arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+# ifdef MALLOC_DECOMMIT
+ malloc_printf("decommit: %llu decommit%s, %llu commit%s,"
+ " %llu page%s decommitted\n",
+ arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+ arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+ arena->stats.decommitted,
+ (arena->stats.decommitted == 1) ? "" : "s");
+# endif
+
+ malloc_printf(" allocated nmalloc ndalloc\n");
+ malloc_printf("small: %12zu %12llu %12llu\n",
+ arena->stats.allocated_small, arena->stats.nmalloc_small,
+ arena->stats.ndalloc_small);
+ malloc_printf("large: %12zu %12llu %12llu\n",
+ arena->stats.allocated_large, arena->stats.nmalloc_large,
+ arena->stats.ndalloc_large);
+ malloc_printf("total: %12zu %12llu %12llu\n",
+ arena->stats.allocated_small + arena->stats.allocated_large,
+ arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+ arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+ malloc_printf("mapped: %12zu\n", arena->stats.mapped);
+#endif
+ malloc_printf("bins: bin size regs pgs requests newruns"
+ " reruns maxruns curruns\n");
+ for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) {
+ if (arena->bins[i].stats.nrequests == 0) {
+ if (gap_start == UINT_MAX)
+ gap_start = i;
+ } else {
+ if (gap_start != UINT_MAX) {
+ if (i > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_printf("[%u..%u]\n",
+ gap_start, i - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_printf("[%u]\n", gap_start);
+ }
+ gap_start = UINT_MAX;
+ }
+ malloc_printf(
+#if defined(MOZ_MEMORY_WINDOWS)
+ "%13u %1s %4u %4u %3u %9I64u %9I64u"
+ " %9I64u %7u %7u\n",
+#else
+ "%13u %1s %4u %4u %3u %9llu %9llu"
+ " %9llu %7lu %7lu\n",
+#endif
+ i,
+ i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
+ arena->bins[i].reg_size,
+ arena->bins[i].nregs,
+ arena->bins[i].run_size >> pagesize_2pow,
+ arena->bins[i].stats.nrequests,
+ arena->bins[i].stats.nruns,
+ arena->bins[i].stats.reruns,
+ arena->bins[i].stats.highruns,
+ arena->bins[i].stats.curruns);
+ }
+ }
+ if (gap_start != UINT_MAX) {
+ if (i > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_printf("[%u..%u]\n", gap_start, i - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_printf("[%u]\n", gap_start);
+ }
+ }
+}
+#endif
+
+/*
+ * End Utility functions/macros.
+ */
+/******************************************************************************/
+/*
+ * Begin extent tree code.
+ */
+
+static inline int
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
+{
+ int ret;
+ size_t a_size = a->size;
+ size_t b_size = b->size;
+
+ ret = (a_size > b_size) - (a_size < b_size);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
+
+ return (ret);
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
+ link_szad, extent_szad_comp)
+
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+ extent_ad_comp)
+
+/*
+ * End extent tree code.
+ */
+/******************************************************************************/
+/*
+ * Begin chunk management functions.
+ */
+
+#ifdef MOZ_MEMORY_WINDOWS
+
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret = NULL;
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in VirtualFree()\n", "", "");
+ if (opt_abort)
+ abort();
+ }
+}
+#else
+#ifdef JEMALLOC_USES_MAP_ALIGN
+static void *
+pages_map_align(size_t size, size_t alignment)
+{
+ void *ret;
+
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else
+ MozTagAnonymousMemory(ret, size, "jemalloc");
+ return (ret);
+}
+#endif
+
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+#if defined(__ia64__)
+ /*
+ * The JS engine assumes that all allocated pointers have their high 17 bits clear,
+ * which ia64's mmap doesn't support directly. However, we can emulate it by passing
+ * mmap an "addr" parameter with those bits clear. The mmap will return that address,
+ * or the nearest available memory above that address, providing a near-guarantee
+ * that those bits are clear. If they are not, we return NULL below to indicate
+ * out-of-memory.
+ *
+ * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
+ * address space.
+ *
+ * See Bug 589735 for more information.
+ */
+ bool check_placement = true;
+ if (addr == NULL) {
+ addr = (void*)0x0000070000000000;
+ check_placement = false;
+ }
+#endif
+
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED) {
+ ret = NULL;
+ }
+#if defined(__ia64__)
+ /*
+ * If the allocated memory doesn't have its upper 17 bits clear, consider it
+ * as out of memory.
+ */
+ else if ((long long)ret & 0xffff800000000000) {
+ munmap(ret, size);
+ ret = NULL;
+ }
+ /* If the caller requested a specific memory location, verify that's what mmap returned. */
+ else if (check_placement && ret != addr) {
+#else
+ else if (addr != NULL && ret != addr) {
+#endif
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ if (munmap(ret, size) == -1) {
+ char buf[STRERROR_BUF];
+
+ if (strerror_r(errno, buf, sizeof(buf)) == 0) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in munmap(): ", buf, "\n");
+ }
+ if (opt_abort)
+ abort();
+ }
+ ret = NULL;
+ }
+ if (ret != NULL) {
+ MozTagAnonymousMemory(ret, size, "jemalloc");
+ }
+
+#if defined(__ia64__)
+ assert(ret == NULL || (!check_placement && ret != NULL)
+ || (check_placement && ret == addr));
+#else
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+#endif
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+ if (munmap(addr, size) == -1) {
+ char buf[STRERROR_BUF];
+
+ if (strerror_r(errno, buf, sizeof(buf)) == 0) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in munmap(): ", buf, "\n");
+ }
+ if (opt_abort)
+ abort();
+ }
+}
+#endif
+
+#ifdef MOZ_MEMORY_DARWIN
+#define VM_COPY_MIN (pagesize << 5)
+static inline void
+pages_copy(void *dest, const void *src, size_t n)
+{
+
+ assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
+ assert(n >= VM_COPY_MIN);
+ assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
+
+ vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
+ (vm_address_t)dest);
+}
+#endif
+
+#ifdef MALLOC_VALIDATE
+static inline malloc_rtree_t *
+malloc_rtree_new(unsigned bits)
+{
+ malloc_rtree_t *ret;
+ unsigned bits_per_level, height, i;
+
+ bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
+ sizeof(void *)))) - 1;
+ height = bits / bits_per_level;
+ if (height * bits_per_level != bits)
+ height++;
+ RELEASE_ASSERT(height * bits_per_level >= bits);
+
+ ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) +
+ (sizeof(unsigned) * (height - 1)));
+ if (ret == NULL)
+ return (NULL);
+
+ malloc_spin_init(&ret->lock);
+ ret->height = height;
+ if (bits_per_level * height > bits)
+ ret->level2bits[0] = bits % bits_per_level;
+ else
+ ret->level2bits[0] = bits_per_level;
+ for (i = 1; i < height; i++)
+ ret->level2bits[i] = bits_per_level;
+
+ ret->root = (void**)base_calloc(1, sizeof(void *) << ret->level2bits[0]);
+ if (ret->root == NULL) {
+ /*
+ * We leak the rtree here, since there's no generic base
+ * deallocation.
+ */
+ return (NULL);
+ }
+
+ return (ret);
+}
+
+#define MALLOC_RTREE_GET_GENERATE(f) \
+/* The least significant bits of the key are ignored. */ \
+static inline void * \
+f(malloc_rtree_t *rtree, uintptr_t key) \
+{ \
+ void *ret; \
+ uintptr_t subkey; \
+ unsigned i, lshift, height, bits; \
+ void **node, **child; \
+ \
+ MALLOC_RTREE_LOCK(&rtree->lock); \
+ for (i = lshift = 0, height = rtree->height, node = rtree->root;\
+ i < height - 1; \
+ i++, lshift += bits, node = child) { \
+ bits = rtree->level2bits[i]; \
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); \
+ child = (void**)node[subkey]; \
+ if (child == NULL) { \
+ MALLOC_RTREE_UNLOCK(&rtree->lock); \
+ return (NULL); \
+ } \
+ } \
+ \
+ /* \
+ * node is a leaf, so it contains values rather than node \
+ * pointers. \
+ */ \
+ bits = rtree->level2bits[i]; \
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); \
+ ret = node[subkey]; \
+ MALLOC_RTREE_UNLOCK(&rtree->lock); \
+ \
+ MALLOC_RTREE_GET_VALIDATE \
+ return (ret); \
+}
+
+#ifdef MALLOC_DEBUG
+# define MALLOC_RTREE_LOCK(l) malloc_spin_lock(l)
+# define MALLOC_RTREE_UNLOCK(l) malloc_spin_unlock(l)
+# define MALLOC_RTREE_GET_VALIDATE
+MALLOC_RTREE_GET_GENERATE(malloc_rtree_get_locked)
+# undef MALLOC_RTREE_LOCK
+# undef MALLOC_RTREE_UNLOCK
+# undef MALLOC_RTREE_GET_VALIDATE
+#endif
+
+#define MALLOC_RTREE_LOCK(l)
+#define MALLOC_RTREE_UNLOCK(l)
+#ifdef MALLOC_DEBUG
+ /*
+ * Suppose that it were possible for a jemalloc-allocated chunk to be
+ * munmap()ped, followed by a different allocator in another thread re-using
+ * overlapping virtual memory, all without invalidating the cached rtree
+ * value. The result would be a false positive (the rtree would claim that
+ * jemalloc owns memory that it had actually discarded). I don't think this
+ * scenario is possible, but the following assertion is a prudent sanity
+ * check.
+ */
+# define MALLOC_RTREE_GET_VALIDATE \
+ assert(malloc_rtree_get_locked(rtree, key) == ret);
+#else
+# define MALLOC_RTREE_GET_VALIDATE
+#endif
+MALLOC_RTREE_GET_GENERATE(malloc_rtree_get)
+#undef MALLOC_RTREE_LOCK
+#undef MALLOC_RTREE_UNLOCK
+#undef MALLOC_RTREE_GET_VALIDATE
+
+static inline bool
+malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
+{
+ uintptr_t subkey;
+ unsigned i, lshift, height, bits;
+ void **node, **child;
+
+ malloc_spin_lock(&rtree->lock);
+ for (i = lshift = 0, height = rtree->height, node = rtree->root;
+ i < height - 1;
+ i++, lshift += bits, node = child) {
+ bits = rtree->level2bits[i];
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+ child = (void**)node[subkey];
+ if (child == NULL) {
+ child = (void**)base_calloc(1, sizeof(void *) <<
+ rtree->level2bits[i+1]);
+ if (child == NULL) {
+ malloc_spin_unlock(&rtree->lock);
+ return (true);
+ }
+ node[subkey] = child;
+ }
+ }
+
+ /* node is a leaf, so it contains values rather than node pointers. */
+ bits = rtree->level2bits[i];
+ subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+ node[subkey] = val;
+ malloc_spin_unlock(&rtree->lock);
+
+ return (false);
+}
+#endif
+
+/* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
+ * from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & (-(alignment)))
+
+static void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef MOZ_MEMORY_WINDOWS
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment)
+{
+ void *ret, *pages;
+ size_t alloc_size, leadsize;
+
+ alloc_size = size + alignment - pagesize;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ do {
+ pages = pages_map(NULL, alloc_size);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ return (ret);
+}
+
+static void *
+chunk_alloc_mmap(size_t size, size_t alignment)
+{
+#ifdef JEMALLOC_USES_MAP_ALIGN
+ return pages_map_align(size, alignment);
+#else
+ void *ret;
+ size_t offset;
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * pages_unmap().
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
+
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ pages_unmap(ret, size);
+ return (chunk_alloc_mmap_slow(size, alignment));
+ }
+
+ assert(ret != NULL);
+ return (ret);
+#endif
+}
+
+bool
+pages_purge(void *addr, size_t length)
+{
+ bool unzeroed;
+
+#ifdef MALLOC_DECOMMIT
+ pages_decommit(addr, length);
+ unzeroed = false;
+#else
+# ifdef MOZ_MEMORY_WINDOWS
+ /*
+ * The region starting at addr may have been allocated in multiple calls
+ * to VirtualAlloc and recycled, so resetting the entire region in one
+ * go may not be valid. However, since we allocate at least a chunk at a
+ * time, we may touch any region in chunksized increments.
+ */
+ size_t pages_size = min(length, chunksize -
+ CHUNK_ADDR2OFFSET((uintptr_t)addr));
+ while (length > 0) {
+ VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
+ addr = (void *)((uintptr_t)addr + pages_size);
+ length -= pages_size;
+ pages_size = min(length, chunksize);
+ }
+ unzeroed = true;
+# else
+# ifdef MOZ_MEMORY_LINUX
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# else /* FreeBSD and Darwin. */
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# endif
+ int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
+ unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+# endif
+#endif
+ return (unzeroed);
+}
+
+static void *
+chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
+ size_t alignment, bool base, bool *zero)
+{
+ void *ret;
+ extent_node_t *node;
+ extent_node_t key;
+ size_t alloc_size, leadsize, trailsize;
+ bool zeroed;
+
+ if (base) {
+ /*
+ * This function may need to call base_node_{,de}alloc(), but
+ * the current chunk allocation request is on behalf of the
+ * base allocator. Avoid deadlock (and if that weren't an
+ * issue, potential for infinite recursion) by returning NULL.
+ */
+ return (NULL);
+ }
+
+ alloc_size = size + alignment - chunksize;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ key.addr = NULL;
+ key.size = alloc_size;
+ malloc_mutex_lock(&chunks_mtx);
+ node = extent_tree_szad_nsearch(chunks_szad, &key);
+ if (node == NULL) {
+ malloc_mutex_unlock(&chunks_mtx);
+ return (NULL);
+ }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
+ (uintptr_t)node->addr;
+ assert(node->size >= leadsize + size);
+ trailsize = node->size - leadsize - size;
+ ret = (void *)((uintptr_t)node->addr + leadsize);
+ zeroed = node->zeroed;
+ if (zeroed)
+ *zero = true;
+ /* Remove node from the tree. */
+ extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_ad_remove(chunks_ad, node);
+ if (leadsize != 0) {
+ /* Insert the leading space as a smaller chunk. */
+ node->size = leadsize;
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ node = NULL;
+ }
+ if (trailsize != 0) {
+ /* Insert the trailing space as a smaller chunk. */
+ if (node == NULL) {
+ /*
+ * An additional node is required, but
+ * base_node_alloc() can cause a new base chunk to be
+ * allocated. Drop chunks_mtx in order to avoid
+ * deadlock, and if node allocation fails, deallocate
+ * the result before returning an error.
+ */
+ malloc_mutex_unlock(&chunks_mtx);
+ node = base_node_alloc();
+ if (node == NULL) {
+ chunk_dealloc(ret, size);
+ return (NULL);
+ }
+ malloc_mutex_lock(&chunks_mtx);
+ }
+ node->addr = (void *)((uintptr_t)(ret) + size);
+ node->size = trailsize;
+ node->zeroed = zeroed;
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ node = NULL;
+ }
+
+ if (config_munmap && config_recycle)
+ recycled_size -= size;
+
+ malloc_mutex_unlock(&chunks_mtx);
+
+ if (node != NULL)
+ base_node_dealloc(node);
+#ifdef MALLOC_DECOMMIT
+ pages_commit(ret, size);
+#endif
+ if (*zero) {
+ if (zeroed == false)
+ memset(ret, 0, size);
+#ifdef DEBUG
+ else {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
+#endif
+ }
+ return (ret);
+}
+
+#ifdef MOZ_MEMORY_WINDOWS
+/*
+ * On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
+ * awkward to recycle allocations of varying sizes. Therefore we only allow
+ * recycling when the size equals the chunksize, unless deallocation is entirely
+ * disabled.
+ */
+#define CAN_RECYCLE(size) (size == chunksize)
+#else
+#define CAN_RECYCLE(size) true
+#endif
+
+static void *
+chunk_alloc(size_t size, size_t alignment, bool base, bool zero)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ if (!config_munmap || (config_recycle && CAN_RECYCLE(size))) {
+ ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
+ size, alignment, base, &zero);
+ if (ret != NULL)
+ goto RETURN;
+ }
+ ret = chunk_alloc_mmap(size, alignment);
+ if (ret != NULL) {
+ goto RETURN;
+ }
+
+ /* All strategies for allocation failed. */
+ ret = NULL;
+RETURN:
+
+#ifdef MALLOC_VALIDATE
+ if (ret != NULL && base == false) {
+ if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
+ chunk_dealloc(ret, size);
+ return (NULL);
+ }
+ }
+#endif
+
+ assert(CHUNK_ADDR2BASE(ret) == ret);
+ return (ret);
+}
+
+static void
+chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
+ size_t size)
+{
+ bool unzeroed;
+ extent_node_t *xnode, *node, *prev, *xprev, key;
+
+ unzeroed = pages_purge(chunk, size);
+
+ /*
+ * Allocate a node before acquiring chunks_mtx even though it might not
+ * be needed, because base_node_alloc() may cause a new base chunk to
+ * be allocated, which could cause deadlock if chunks_mtx were already
+ * held.
+ */
+ xnode = base_node_alloc();
+ /* Use xprev to implement conditional deferred deallocation of prev. */
+ xprev = NULL;
+
+ malloc_mutex_lock(&chunks_mtx);
+ key.addr = (void *)((uintptr_t)chunk + size);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && node->addr == key.addr) {
+ /*
+ * Coalesce chunk with the following address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, node);
+ node->addr = chunk;
+ node->size += size;
+ node->zeroed = (node->zeroed && (unzeroed == false));
+ extent_tree_szad_insert(chunks_szad, node);
+ } else {
+ /* Coalescing forward failed, so insert a new node. */
+ if (xnode == NULL) {
+ /*
+ * base_node_alloc() failed, which is an exceedingly
+ * unlikely failure. Leak chunk; its pages have
+ * already been purged, so this is only a virtual
+ * memory leak.
+ */
+ goto label_return;
+ }
+ node = xnode;
+ xnode = NULL; /* Prevent deallocation below. */
+ node->addr = chunk;
+ node->size = size;
+ node->zeroed = (unzeroed == false);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ }
+
+ /* Try to coalesce backward. */
+ prev = extent_tree_ad_prev(chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+ chunk) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert node from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
+
+ extent_tree_szad_remove(chunks_szad, node);
+ node->addr = prev->addr;
+ node->size += prev->size;
+ node->zeroed = (node->zeroed && prev->zeroed);
+ extent_tree_szad_insert(chunks_szad, node);
+
+ xprev = prev;
+ }
+
+ if (config_munmap && config_recycle)
+ recycled_size += size;
+
+label_return:
+ malloc_mutex_unlock(&chunks_mtx);
+ /*
+ * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+ * avoid potential deadlock.
+ */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ if (xprev != NULL)
+ base_node_dealloc(xprev);
+}
+
+static bool
+chunk_dalloc_mmap(void *chunk, size_t size)
+{
+ if (!config_munmap || (config_recycle && CAN_RECYCLE(size) &&
+ load_acquire_z(&recycled_size) < recycle_limit))
+ return true;
+
+ pages_unmap(chunk, size);
+ return false;
+}
+
+#undef CAN_RECYCLE
+
+static void
+chunk_dealloc(void *chunk, size_t size)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+#ifdef MALLOC_VALIDATE
+ malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
+#endif
+
+ if (chunk_dalloc_mmap(chunk, size))
+ chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+}
+
+/*
+ * End chunk management functions.
+ */
+/******************************************************************************/
+/*
+ * Begin arena.
+ */
+
+/*
+ * Choose an arena based on a per-thread value (fast-path code, calls slow-path
+ * code if necessary).
+ */
+static inline arena_t *
+choose_arena(void)
+{
+ arena_t *ret;
+
+ /*
+ * We can only use TLS if this is a PIC library, since for the static
+ * library version, libc's malloc is used by TLS allocation, which
+ * introduces a bootstrapping issue.
+ */
+#ifndef NO_TLS
+ if (isthreaded == false) {
+ /* Avoid the overhead of TLS for single-threaded operation. */
+ return (arenas[0]);
+ }
+
+# ifdef MOZ_MEMORY_WINDOWS
+ ret = (arena_t*)TlsGetValue(tlsIndex);
+# else
+ ret = arenas_map;
+# endif
+
+ if (ret == NULL) {
+ ret = choose_arena_hard();
+ RELEASE_ASSERT(ret != NULL);
+ }
+#else
+ if (isthreaded && narenas > 1) {
+ unsigned long ind;
+
+ /*
+ * Hash _pthread_self() to one of the arenas. There is a prime
+ * number of arenas, so this has a reasonable chance of
+ * working. Even so, the hashing can be easily thwarted by
+ * inconvenient _pthread_self() values. Without specific
+ * knowledge of how _pthread_self() calculates values, we can't
+ * easily do much better than this.
+ */
+ ind = (unsigned long) _pthread_self() % narenas;
+
+ /*
+ * Optimistially assume that arenas[ind] has been initialized.
+ * At worst, we find out that some other thread has already
+ * done so, after acquiring the lock in preparation. Note that
+ * this lazy locking also has the effect of lazily forcing
+ * cache coherency; without the lock acquisition, there's no
+ * guarantee that modification of arenas[ind] by another thread
+ * would be seen on this CPU for an arbitrary amount of time.
+ *
+ * In general, this approach to modifying a synchronized value
+ * isn't a good idea, but in this case we only ever modify the
+ * value once, so things work out well.
+ */
+ ret = arenas[ind];
+ if (ret == NULL) {
+ /*
+ * Avoid races with another thread that may have already
+ * initialized arenas[ind].
+ */
+ malloc_spin_lock(&arenas_lock);
+ if (arenas[ind] == NULL)
+ ret = arenas_extend((unsigned)ind);
+ else
+ ret = arenas[ind];
+ malloc_spin_unlock(&arenas_lock);
+ }
+ } else
+ ret = arenas[0];
+#endif
+
+ RELEASE_ASSERT(ret != NULL);
+ return (ret);
+}
+
+#ifndef NO_TLS
+/*
+ * Choose an arena based on a per-thread value (slow-path code only, called
+ * only by choose_arena()).
+ */
+static arena_t *
+choose_arena_hard(void)
+{
+ arena_t *ret;
+
+ assert(isthreaded);
+
+#ifdef MALLOC_BALANCE
+ /* Seed the PRNG used for arena load balancing. */
+ SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
+#endif
+
+ if (narenas > 1) {
+#ifdef MALLOC_BALANCE
+ unsigned ind;
+
+ ind = PRN(balance, narenas_2pow);
+ if ((ret = arenas[ind]) == NULL) {
+ malloc_spin_lock(&arenas_lock);
+ if ((ret = arenas[ind]) == NULL)
+ ret = arenas_extend(ind);
+ malloc_spin_unlock(&arenas_lock);
+ }
+#else
+ malloc_spin_lock(&arenas_lock);
+ if ((ret = arenas[next_arena]) == NULL)
+ ret = arenas_extend(next_arena);
+ next_arena = (next_arena + 1) % narenas;
+ malloc_spin_unlock(&arenas_lock);
+#endif
+ } else
+ ret = arenas[0];
+
+#ifdef MOZ_MEMORY_WINDOWS
+ TlsSetValue(tlsIndex, ret);
+#else
+ arenas_map = ret;
+#endif
+
+ return (ret);
+}
+#endif
+
+static inline int
+arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+ uintptr_t a_chunk = (uintptr_t)a;
+ uintptr_t b_chunk = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
+ arena_chunk_t, link_dirty, arena_chunk_comp)
+
+static inline int
+arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+ uintptr_t a_mapelm = (uintptr_t)a;
+ uintptr_t b_mapelm = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
+ arena_run_comp)
+
+static inline int
+arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+ int ret;
+ size_t a_size = a->bits & ~pagesize_mask;
+ size_t b_size = b->bits & ~pagesize_mask;
+
+ ret = (a_size > b_size) - (a_size < b_size);
+ if (ret == 0) {
+ uintptr_t a_mapelm, b_mapelm;
+
+ if ((a->bits & CHUNK_MAP_KEY) == 0)
+ a_mapelm = (uintptr_t)a;
+ else {
+ /*
+ * Treat keys as though they are lower than anything
+ * else.
+ */
+ a_mapelm = 0;
+ }
+ b_mapelm = (uintptr_t)b;
+
+ ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
+ }
+
+ return (ret);
+}
+
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
+ arena_avail_comp)
+
+static inline void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
+{
+ void *ret;
+ unsigned i, mask, bit, regind;
+
+ assert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->regs_minelm < bin->regs_mask_nelms);
+
+ /*
+ * Move the first check outside the loop, so that run->regs_minelm can
+ * be updated unconditionally, without the possibility of updating it
+ * multiple times.
+ */
+ i = run->regs_minelm;
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ /* Usable allocation found. */
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ assert(regind < bin->nregs);
+ ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+ + (bin->reg_size * regind));
+
+ /* Clear bit. */
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ return (ret);
+ }
+
+ for (i++; i < bin->regs_mask_nelms; i++) {
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ /* Usable allocation found. */
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ assert(regind < bin->nregs);
+ ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+ + (bin->reg_size * regind));
+
+ /* Clear bit. */
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ /*
+ * Make a note that nothing before this element
+ * contains a free region.
+ */
+ run->regs_minelm = i; /* Low payoff: + (mask == 0); */
+
+ return (ret);
+ }
+ }
+ /* Not reached. */
+ RELEASE_ASSERT(0);
+ return (NULL);
+}
+
+static inline void
+arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
+{
+ /*
+ * To divide by a number D that is not a power of two we multiply
+ * by (2^21 / D) and then right shift by 21 positions.
+ *
+ * X / D
+ *
+ * becomes
+ *
+ * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+ */
+#define SIZE_INV_SHIFT 21
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
+ static const unsigned size_invs[] = {
+ SIZE_INV(3),
+ SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+ SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+ SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+ SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+ SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+ SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+ SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+#if (QUANTUM_2POW_MIN < 4)
+ ,
+ SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
+ SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
+ SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
+ SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
+ SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
+ SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
+ SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
+ SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
+#endif
+ };
+ unsigned diff, regind, elm, bit;
+
+ assert(run->magic == ARENA_RUN_MAGIC);
+ assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
+ >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
+
+ /*
+ * Avoid doing division with a variable divisor if possible. Using
+ * actual division here can reduce allocator throughput by over 20%!
+ */
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
+ if ((size & (size - 1)) == 0) {
+ /*
+ * log2_table allows fast division of a power of two in the
+ * [1..128] range.
+ *
+ * (x / divisor) becomes (x >> log2_table[divisor - 1]).
+ */
+ static const unsigned char log2_table[] = {
+ 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+ };
+
+ if (size <= 128)
+ regind = (diff >> log2_table[size - 1]);
+ else if (size <= 32768)
+ regind = diff >> (8 + log2_table[(size >> 8) - 1]);
+ else {
+ /*
+ * The run size is too large for us to use the lookup
+ * table. Use real division.
+ */
+ regind = diff / size;
+ }
+ } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
+ << QUANTUM_2POW_MIN) + 2) {
+ regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+ regind >>= SIZE_INV_SHIFT;
+ } else {
+ /*
+ * size_invs isn't large enough to handle this size class, so
+ * calculate regind using actual division. This only happens
+ * if the user increases small_max via the 'S' runtime
+ * configuration option.
+ */
+ regind = diff / size;
+ };
+ RELEASE_ASSERT(diff == regind * size);
+ RELEASE_ASSERT(regind < bin->nregs);
+
+ elm = regind >> (SIZEOF_INT_2POW + 3);
+ if (elm < run->regs_minelm)
+ run->regs_minelm = elm;
+ bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+ RELEASE_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
+ run->regs_mask[elm] |= (1U << bit);
+#undef SIZE_INV
+#undef SIZE_INV_SHIFT
+}
+
+static void
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
+ bool zero)
+{
+ arena_chunk_t *chunk;
+ size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ old_ndirty = chunk->ndirty;
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+ >> pagesize_2pow);
+ total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
+ pagesize_2pow;
+ need_pages = (size >> pagesize_2pow);
+ assert(need_pages > 0);
+ assert(need_pages <= total_pages);
+ rem_pages = total_pages - need_pages;
+
+ arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
+
+ /* Keep track of trailing unused pages for later use. */
+ if (rem_pages > 0) {
+ chunk->map[run_ind+need_pages].bits = (rem_pages <<
+ pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
+ pagesize_mask);
+ chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
+ pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
+ pagesize_mask);
+ arena_avail_tree_insert(&arena->runs_avail,
+ &chunk->map[run_ind+need_pages]);
+ }
+
+ for (i = 0; i < need_pages; i++) {
+#if defined(MALLOC_DECOMMIT) || defined(MALLOC_STATS) || defined(MALLOC_DOUBLE_PURGE)
+ /*
+ * Commit decommitted pages if necessary. If a decommitted
+ * page is encountered, commit all needed adjacent decommitted
+ * pages in one operation, in order to reduce system call
+ * overhead.
+ */
+ if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
+ size_t j;
+
+ /*
+ * Advance i+j to just past the index of the last page
+ * to commit. Clear CHUNK_MAP_DECOMMITTED and
+ * CHUNK_MAP_MADVISED along the way.
+ */
+ for (j = 0; i + j < need_pages && (chunk->map[run_ind +
+ i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
+ /* DECOMMITTED and MADVISED are mutually exclusive. */
+ assert(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
+ chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
+
+ chunk->map[run_ind + i + j].bits &=
+ ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+ }
+
+# ifdef MALLOC_DECOMMIT
+ pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
+ << pagesize_2pow)), (j << pagesize_2pow));
+# ifdef MALLOC_STATS
+ arena->stats.ncommit++;
+# endif
+# endif
+
+# ifdef MALLOC_STATS
+ arena->stats.committed += j;
+# endif
+
+# ifndef MALLOC_DECOMMIT
+ }
+# else
+ } else /* No need to zero since commit zeros. */
+# endif
+
+#endif
+
+ /* Zero if necessary. */
+ if (zero) {
+ if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
+ == 0) {
+ memset((void *)((uintptr_t)chunk + ((run_ind
+ + i) << pagesize_2pow)), 0, pagesize);
+ /* CHUNK_MAP_ZEROED is cleared below. */
+ }
+ }
+
+ /* Update dirty page accounting. */
+ if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
+ chunk->ndirty--;
+ arena->ndirty--;
+ /* CHUNK_MAP_DIRTY is cleared below. */
+ }
+
+ /* Initialize the chunk map. */
+ if (large) {
+ chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
+ | CHUNK_MAP_ALLOCATED;
+ } else {
+ chunk->map[run_ind + i].bits = (size_t)run
+ | CHUNK_MAP_ALLOCATED;
+ }
+ }
+
+ /*
+ * Set the run size only in the first element for large runs. This is
+ * primarily a debugging aid, since the lack of size info for trailing
+ * pages only matters if the application tries to operate on an
+ * interior pointer.
+ */
+ if (large)
+ chunk->map[run_ind].bits |= size;
+
+ if (chunk->ndirty == 0 && old_ndirty > 0)
+ arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
+}
+
+static void
+arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
+{
+ arena_run_t *run;
+ size_t i;
+
+#ifdef MALLOC_STATS
+ arena->stats.mapped += chunksize;
+#endif
+
+ chunk->arena = arena;
+
+ /*
+ * Claim that no pages are in use, since the header is merely overhead.
+ */
+ chunk->ndirty = 0;
+
+ /* Initialize the map to contain one maximal free untouched run. */
+ run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+ pagesize_2pow));
+ for (i = 0; i < arena_chunk_header_npages; i++)
+ chunk->map[i].bits = 0;
+ chunk->map[i].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
+ for (i++; i < chunk_npages-1; i++) {
+ chunk->map[i].bits = CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
+ }
+ chunk->map[chunk_npages-1].bits = arena_maxclass | CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED;
+
+#ifdef MALLOC_DECOMMIT
+ /*
+ * Start out decommitted, in order to force a closer correspondence
+ * between dirty pages and committed untouched pages.
+ */
+ pages_decommit(run, arena_maxclass);
+# ifdef MALLOC_STATS
+ arena->stats.ndecommit++;
+ arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
+# endif
+#endif
+#ifdef MALLOC_STATS
+ arena->stats.committed += arena_chunk_header_npages;
+#endif
+
+ /* Insert the run into the runs_avail tree. */
+ arena_avail_tree_insert(&arena->runs_avail,
+ &chunk->map[arena_chunk_header_npages]);
+
+#ifdef MALLOC_DOUBLE_PURGE
+ LinkedList_Init(&chunk->chunks_madvised_elem);
+#endif
+}
+
+static void
+arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+{
+
+ if (arena->spare != NULL) {
+ if (arena->spare->ndirty > 0) {
+ arena_chunk_tree_dirty_remove(
+ &chunk->arena->chunks_dirty, arena->spare);
+ arena->ndirty -= arena->spare->ndirty;
+#ifdef MALLOC_STATS
+ arena->stats.committed -= arena->spare->ndirty;
+#endif
+ }
+
+#ifdef MALLOC_DOUBLE_PURGE
+ /* This is safe to do even if arena->spare is not in the list. */
+ LinkedList_Remove(&arena->spare->chunks_madvised_elem);
+#endif
+
+ chunk_dealloc((void *)arena->spare, chunksize);
+#ifdef MALLOC_STATS
+ arena->stats.mapped -= chunksize;
+ arena->stats.committed -= arena_chunk_header_npages;
+#endif
+ }
+
+ /*
+ * Remove run from runs_avail, so that the arena does not use it.
+ * Dirty page flushing only uses the chunks_dirty tree, so leaving this
+ * chunk in the chunks_* trees is sufficient for that purpose.
+ */
+ arena_avail_tree_remove(&arena->runs_avail,
+ &chunk->map[arena_chunk_header_npages]);
+
+ arena->spare = chunk;
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
+ bool zero)
+{
+ arena_run_t *run;
+ arena_chunk_map_t *mapelm, key;
+
+ assert(size <= arena_maxclass);
+ assert((size & pagesize_mask) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ key.bits = size | CHUNK_MAP_KEY;
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *chunk =
+ (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = ((uintptr_t)mapelm -
+ (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t);
+
+ run = (arena_run_t *)((uintptr_t)chunk + (pageind
+ << pagesize_2pow));
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+
+ if (arena->spare != NULL) {
+ /* Use the spare. */
+ arena_chunk_t *chunk = arena->spare;
+ arena->spare = NULL;
+ run = (arena_run_t *)((uintptr_t)chunk +
+ (arena_chunk_header_npages << pagesize_2pow));
+ /* Insert the run into the runs_avail tree. */
+ arena_avail_tree_insert(&arena->runs_avail,
+ &chunk->map[arena_chunk_header_npages]);
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+ }
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate
+ * the run.
+ */
+ {
+ arena_chunk_t *chunk = (arena_chunk_t *)
+ chunk_alloc(chunksize, chunksize, false, true);
+ if (chunk == NULL)
+ return (NULL);
+
+ arena_chunk_init(arena, chunk);
+ run = (arena_run_t *)((uintptr_t)chunk +
+ (arena_chunk_header_npages << pagesize_2pow));
+ }
+ /* Update page map. */
+ arena_run_split(arena, run, size, large, zero);
+ return (run);
+}
+
+static void
+arena_purge(arena_t *arena, bool all)
+{
+ arena_chunk_t *chunk;
+ size_t i, npages;
+ /* If all is set purge all dirty pages. */
+ size_t dirty_max = all ? 1 : opt_dirty_max;
+#ifdef MALLOC_DEBUG
+ size_t ndirty = 0;
+ rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
+ chunk) {
+ ndirty += chunk->ndirty;
+ } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
+ assert(ndirty == arena->ndirty);
+#endif
+ RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
+
+#ifdef MALLOC_STATS
+ arena->stats.npurge++;
+#endif
+
+ /*
+ * Iterate downward through chunks until enough dirty memory has been
+ * purged. Terminate as soon as possible in order to minimize the
+ * number of system calls, even if a chunk has only been partially
+ * purged.
+ */
+ while (arena->ndirty > (dirty_max >> 1)) {
+#ifdef MALLOC_DOUBLE_PURGE
+ bool madvised = false;
+#endif
+ chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
+ RELEASE_ASSERT(chunk != NULL);
+
+ for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
+ RELEASE_ASSERT(i >= arena_chunk_header_npages);
+
+ if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
+#ifdef MALLOC_DECOMMIT
+ const size_t free_operation = CHUNK_MAP_DECOMMITTED;
+#else
+ const size_t free_operation = CHUNK_MAP_MADVISED;
+#endif
+ assert((chunk->map[i].bits &
+ CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+ chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
+ /* Find adjacent dirty run(s). */
+ for (npages = 1;
+ i > arena_chunk_header_npages &&
+ (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
+ npages++) {
+ i--;
+ assert((chunk->map[i].bits &
+ CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+ chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
+ }
+ chunk->ndirty -= npages;
+ arena->ndirty -= npages;
+
+#ifdef MALLOC_DECOMMIT
+ pages_decommit((void *)((uintptr_t)
+ chunk + (i << pagesize_2pow)),
+ (npages << pagesize_2pow));
+# ifdef MALLOC_STATS
+ arena->stats.ndecommit++;
+ arena->stats.decommitted += npages;
+# endif
+#endif
+#ifdef MALLOC_STATS
+ arena->stats.committed -= npages;
+#endif
+
+#ifndef MALLOC_DECOMMIT
+ madvise((void *)((uintptr_t)chunk + (i <<
+ pagesize_2pow)), (npages << pagesize_2pow),
+ MADV_FREE);
+# ifdef MALLOC_DOUBLE_PURGE
+ madvised = true;
+# endif
+#endif
+#ifdef MALLOC_STATS
+ arena->stats.nmadvise++;
+ arena->stats.purged += npages;
+#endif
+ if (arena->ndirty <= (dirty_max >> 1))
+ break;
+ }
+ }
+
+ if (chunk->ndirty == 0) {
+ arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
+ chunk);
+ }
+#ifdef MALLOC_DOUBLE_PURGE
+ if (madvised) {
+ /* The chunk might already be in the list, but this
+ * makes sure it's at the front. */
+ LinkedList_Remove(&chunk->chunks_madvised_elem);
+ LinkedList_InsertHead(&arena->chunks_madvised, &chunk->chunks_madvised_elem);
+ }
+#endif
+ }
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+{
+ arena_chunk_t *chunk;
+ size_t size, run_ind, run_pages;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
+ >> pagesize_2pow);
+ RELEASE_ASSERT(run_ind >= arena_chunk_header_npages);
+ RELEASE_ASSERT(run_ind < chunk_npages);
+ if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+ size = chunk->map[run_ind].bits & ~pagesize_mask;
+ else
+ size = run->bin->run_size;
+ run_pages = (size >> pagesize_2pow);
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty) {
+ size_t i;
+
+ for (i = 0; i < run_pages; i++) {
+ RELEASE_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+ == 0);
+ chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
+ }
+
+ if (chunk->ndirty == 0) {
+ arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
+ chunk);
+ }
+ chunk->ndirty += run_pages;
+ arena->ndirty += run_pages;
+ } else {
+ size_t i;
+
+ for (i = 0; i < run_pages; i++) {
+ chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED);
+ }
+ }
+ chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+ pagesize_mask);
+ chunk->map[run_ind+run_pages-1].bits = size |
+ (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+
+ /* Try to coalesce forward. */
+ if (run_ind + run_pages < chunk_npages &&
+ (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+ size_t nrun_size = chunk->map[run_ind+run_pages].bits &
+ ~pagesize_mask;
+
+ /*
+ * Remove successor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ arena_avail_tree_remove(&arena->runs_avail,
+ &chunk->map[run_ind+run_pages]);
+
+ size += nrun_size;
+ run_pages = size >> pagesize_2pow;
+
+ RELEASE_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+ == nrun_size);
+ chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+ pagesize_mask);
+ chunk->map[run_ind+run_pages-1].bits = size |
+ (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+ }
+
+ /* Try to coalesce backward. */
+ if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
+ CHUNK_MAP_ALLOCATED) == 0) {
+ size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
+
+ run_ind -= prun_size >> pagesize_2pow;
+
+ /*
+ * Remove predecessor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ arena_avail_tree_remove(&arena->runs_avail,
+ &chunk->map[run_ind]);
+
+ size += prun_size;
+ run_pages = size >> pagesize_2pow;
+
+ RELEASE_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+ prun_size);
+ chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+ pagesize_mask);
+ chunk->map[run_ind+run_pages-1].bits = size |
+ (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+ }
+
+ /* Insert into runs_avail, now that coalescing is complete. */
+ arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
+
+ /* Deallocate chunk if it is now completely unused. */
+ if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
+ CHUNK_MAP_ALLOCATED)) == arena_maxclass)
+ arena_chunk_dealloc(arena, chunk);
+
+ /* Enforce opt_dirty_max. */
+ if (arena->ndirty > opt_dirty_max)
+ arena_purge(arena, false);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize)
+{
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
+ size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * leading run as separately allocated.
+ */
+ chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+
+ arena_run_dalloc(arena, run, false);
+}
+
+static void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize, bool dirty)
+{
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
+ size_t npages = newsize >> pagesize_2pow;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * trailing run as separately allocated.
+ */
+ chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
+ | CHUNK_MAP_ALLOCATED;
+
+ arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+ dirty);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+ arena_chunk_map_t *mapelm;
+ arena_run_t *run;
+ unsigned i, remainder;
+
+ /* Look for a usable run. */
+ mapelm = arena_run_tree_first(&bin->runs);
+ if (mapelm != NULL) {
+ /* run is guaranteed to have available space. */
+ arena_run_tree_remove(&bin->runs, mapelm);
+ run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
+#ifdef MALLOC_STATS
+ bin->stats.reruns++;
+#endif
+ return (run);
+ }
+ /* No existing runs have any space available. */
+
+ /* Allocate a new run. */
+ run = arena_run_alloc(arena, bin, bin->run_size, false, false);
+ if (run == NULL)
+ return (NULL);
+ /*
+ * Don't initialize if a race in arena_run_alloc() allowed an existing
+ * run to become usable.
+ */
+ if (run == bin->runcur)
+ return (run);
+
+ /* Initialize run internals. */
+ run->bin = bin;
+
+ for (i = 0; i < bin->regs_mask_nelms - 1; i++)
+ run->regs_mask[i] = UINT_MAX;
+ remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+ if (remainder == 0)
+ run->regs_mask[i] = UINT_MAX;
+ else {
+ /* The last element has spare bits that need to be unset. */
+ run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
+ - remainder));
+ }
+
+ run->regs_minelm = 0;
+
+ run->nfree = bin->nregs;
+#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+ run->magic = ARENA_RUN_MAGIC;
+#endif
+
+#ifdef MALLOC_STATS
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ if (bin->stats.curruns > bin->stats.highruns)
+ bin->stats.highruns = bin->stats.curruns;
+#endif
+ return (run);
+}
+
+/* bin->runcur must have space available before this function is called. */
+static inline void *
+arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
+{
+ void *ret;
+
+ RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ RELEASE_ASSERT(run->nfree > 0);
+
+ ret = arena_run_reg_alloc(run, bin);
+ RELEASE_ASSERT(ret != NULL);
+ run->nfree--;
+
+ return (ret);
+}
+
+/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
+static void *
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+{
+
+ bin->runcur = arena_bin_nonfull_run_get(arena, bin);
+ if (bin->runcur == NULL)
+ return (NULL);
+ RELEASE_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
+ RELEASE_ASSERT(bin->runcur->nfree > 0);
+
+ return (arena_bin_malloc_easy(arena, bin, bin->runcur));
+}
+
+/*
+ * Calculate bin->run_size such that it meets the following constraints:
+ *
+ * *) bin->run_size >= min_run_size
+ * *) bin->run_size <= arena_maxclass
+ * *) bin->run_size <= RUN_MAX_SMALL
+ * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ *
+ * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
+ * also calculated here, since these settings are all interdependent.
+ */
+static size_t
+arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
+{
+ size_t try_run_size, good_run_size;
+ unsigned good_nregs, good_mask_nelms, good_reg0_offset;
+ unsigned try_nregs, try_mask_nelms, try_reg0_offset;
+
+ assert(min_run_size >= pagesize);
+ assert(min_run_size <= arena_maxclass);
+
+ /*
+ * Calculate known-valid settings before entering the run_size
+ * expansion loop, so that the first part of the loop always copies
+ * valid settings.
+ *
+ * The do..while loop iteratively reduces the number of regions until
+ * the run header and the regions no longer overlap. A closed formula
+ * would be quite messy, since there is an interdependency between the
+ * header's mask length and the number of regions.
+ */
+ try_run_size = min_run_size;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ do {
+ try_nregs--;
+ try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
+ > try_reg0_offset);
+
+ /* run_size expansion loop. */
+ do {
+ /*
+ * Copy valid settings before trying more aggressive settings.
+ */
+ good_run_size = try_run_size;
+ good_nregs = try_nregs;
+ good_mask_nelms = try_mask_nelms;
+ good_reg0_offset = try_reg0_offset;
+
+ /* Try more aggressive settings. */
+ try_run_size += pagesize;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+ bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
+ do {
+ try_nregs--;
+ try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
+ 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs *
+ bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) *
+ (try_mask_nelms - 1)) > try_reg0_offset);
+ } while (try_run_size <= arena_maxclass
+ && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
+ && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
+
+ assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
+ <= good_reg0_offset);
+ assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+
+ /* Copy final settings. */
+ bin->run_size = good_run_size;
+ bin->nregs = good_nregs;
+ bin->regs_mask_nelms = good_mask_nelms;
+ bin->reg0_offset = good_reg0_offset;
+
+ return (good_run_size);
+}
+
+#ifdef MALLOC_BALANCE
+static inline void
+arena_lock_balance(arena_t *arena)
+{
+ unsigned contention;
+
+ contention = malloc_spin_lock(&arena->lock);
+ if (narenas > 1) {
+ /*
+ * Calculate the exponentially averaged contention for this
+ * arena. Due to integer math always rounding down, this value
+ * decays somewhat faster then normal.
+ */
+ arena->contention = (((uint64_t)arena->contention
+ * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW)-1))
+ + (uint64_t)contention) >> BALANCE_ALPHA_INV_2POW;
+ if (arena->contention >= opt_balance_threshold)
+ arena_lock_balance_hard(arena);
+ }
+}
+
+static void
+arena_lock_balance_hard(arena_t *arena)
+{
+ uint32_t ind;
+
+ arena->contention = 0;
+#ifdef MALLOC_STATS
+ arena->stats.nbalance++;
+#endif
+ ind = PRN(balance, narenas_2pow);
+ if (arenas[ind] != NULL) {
+#ifdef MOZ_MEMORY_WINDOWS
+ TlsSetValue(tlsIndex, arenas[ind]);
+#else
+ arenas_map = arenas[ind];
+#endif
+ } else {
+ malloc_spin_lock(&arenas_lock);
+ if (arenas[ind] != NULL) {
+#ifdef MOZ_MEMORY_WINDOWS
+ TlsSetValue(tlsIndex, arenas[ind]);
+#else
+ arenas_map = arenas[ind];
+#endif
+ } else {
+#ifdef MOZ_MEMORY_WINDOWS
+ TlsSetValue(tlsIndex, arenas_extend(ind));
+#else
+ arenas_map = arenas_extend(ind);
+#endif
+ }
+ malloc_spin_unlock(&arenas_lock);
+ }
+}
+#endif
+
+static inline void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+ arena_bin_t *bin;
+ arena_run_t *run;
+
+ if (size < small_min) {
+ /* Tiny. */
+ size = pow2_ceil(size);
+ bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
+ 1)))];
+#if (!defined(NDEBUG) || defined(MALLOC_STATS))
+ /*
+ * Bin calculation is always correct, but we may need
+ * to fix size for the purposes of assertions and/or
+ * stats accuracy.
+ */
+ if (size < (1U << TINY_MIN_2POW))
+ size = (1U << TINY_MIN_2POW);
+#endif
+ } else if (size <= small_max) {
+ /* Quantum-spaced. */
+ size = QUANTUM_CEILING(size);
+ bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
+ - 1];
+ } else {
+ /* Sub-page. */
+ size = pow2_ceil(size);
+ bin = &arena->bins[ntbins + nqbins
+ + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
+ }
+ RELEASE_ASSERT(size == bin->reg_size);
+
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ret = arena_bin_malloc_easy(arena, bin, run);
+ else
+ ret = arena_bin_malloc_hard(arena, bin);
+
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+
+#ifdef MALLOC_STATS
+ bin->stats.nrequests++;
+ arena->stats.nmalloc_small++;
+ arena->stats.allocated_small += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+ if (zero == false) {
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xe4, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ } else
+ memset(ret, 0, size);
+
+ return (ret);
+}
+
+static void *
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+
+ /* Large allocation. */
+ size = PAGE_CEILING(size);
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ ret = (void *)arena_run_alloc(arena, NULL, size, true, zero);
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+#ifdef MALLOC_STATS
+ arena->stats.nmalloc_large++;
+ arena->stats.allocated_large += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+ if (zero == false) {
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xe4, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ }
+
+ return (ret);
+}
+
+static inline void *
+arena_malloc(arena_t *arena, size_t size, bool zero)
+{
+
+ assert(arena != NULL);
+ RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+ assert(size != 0);
+ assert(QUANTUM_CEILING(size) <= arena_maxclass);
+
+ if (size <= bin_maxclass) {
+ return (arena_malloc_small(arena, size, zero));
+ } else
+ return (arena_malloc_large(arena, size, zero));
+}
+
+static inline void *
+imalloc(size_t size)
+{
+
+ assert(size != 0);
+
+ if (size <= arena_maxclass)
+ return (arena_malloc(choose_arena(), size, false));
+ else
+ return (huge_malloc(size, false));
+}
+
+static inline void *
+icalloc(size_t size)
+{
+
+ if (size <= arena_maxclass)
+ return (arena_malloc(choose_arena(), size, true));
+ else
+ return (huge_malloc(size, true));
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
+{
+ void *ret;
+ size_t offset;
+ arena_chunk_t *chunk;
+
+ assert((size & pagesize_mask) == 0);
+ assert((alignment & pagesize_mask) == 0);
+
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+
+ offset = (uintptr_t)ret & (alignment - 1);
+ assert((offset & pagesize_mask) == 0);
+ assert(offset < alloc_size);
+ if (offset == 0)
+ arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
+ else {
+ size_t leadsize, trailsize;
+
+ leadsize = alignment - offset;
+ if (leadsize > 0) {
+ arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
+ alloc_size - leadsize);
+ ret = (void *)((uintptr_t)ret + leadsize);
+ }
+
+ trailsize = alloc_size - leadsize - size;
+ if (trailsize != 0) {
+ /* Trim trailing space. */
+ assert(trailsize < alloc_size);
+ arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
+ size, false);
+ }
+ }
+
+#ifdef MALLOC_STATS
+ arena->stats.nmalloc_large++;
+ arena->stats.allocated_large += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xe4, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ return (ret);
+}
+
+static inline void *
+ipalloc(size_t alignment, size_t size)
+{
+ void *ret;
+ size_t ceil_size;
+
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each small
+ * size class, every object is aligned at the smallest power of two
+ * that is non-zero in the base two representation of the size. For
+ * example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ *
+ * Depending on runtime settings, it is possible that arena_malloc()
+ * will further round up to a power of two, but that never causes
+ * correctness issues.
+ */
+ ceil_size = (size + (alignment - 1)) & (-alignment);
+ /*
+ * (ceil_size < size) protects against the combination of maximal
+ * alignment and size greater than maximal alignment.
+ */
+ if (ceil_size < size) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ if (ceil_size <= pagesize || (alignment <= pagesize
+ && ceil_size <= arena_maxclass))
+ ret = arena_malloc(choose_arena(), ceil_size, false);
+ else {
+ size_t run_size;
+
+ /*
+ * We can't achieve sub-page alignment, so round up alignment
+ * permanently; it makes later calculations simpler.
+ */
+ alignment = PAGE_CEILING(alignment);
+ ceil_size = PAGE_CEILING(size);
+ /*
+ * (ceil_size < size) protects against very large sizes within
+ * pagesize of SIZE_T_MAX.
+ *
+ * (ceil_size + alignment < ceil_size) protects against the
+ * combination of maximal alignment and ceil_size large enough
+ * to cause overflow. This is similar to the first overflow
+ * check above, but it needs to be repeated due to the new
+ * ceil_size value, which may now be *equal* to maximal
+ * alignment, whereas before we only detected overflow if the
+ * original size was *greater* than maximal alignment.
+ */
+ if (ceil_size < size || ceil_size + alignment < ceil_size) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ /*
+ * Calculate the size of the over-size run that arena_palloc()
+ * would need to allocate in order to guarantee the alignment.
+ */
+ if (ceil_size >= alignment)
+ run_size = ceil_size + alignment - pagesize;
+ else {
+ /*
+ * It is possible that (alignment << 1) will cause
+ * overflow, but it doesn't matter because we also
+ * subtract pagesize, which in the case of overflow
+ * leaves us with a very large run_size. That causes
+ * the first conditional below to fail, which means
+ * that the bogus run_size value never gets used for
+ * anything important.
+ */
+ run_size = (alignment << 1) - pagesize;
+ }
+
+ if (run_size <= arena_maxclass) {
+ ret = arena_palloc(choose_arena(), alignment, ceil_size,
+ run_size);
+ } else if (alignment <= chunksize)
+ ret = huge_malloc(ceil_size, false);
+ else
+ ret = huge_palloc(ceil_size, alignment, false);
+ }
+
+ assert(((uintptr_t)ret & (alignment - 1)) == 0);
+ return (ret);
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+static size_t
+arena_salloc(const void *ptr)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+ mapbits = chunk->map[pageind].bits;
+ RELEASE_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
+ RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ ret = run->bin->reg_size;
+ } else {
+ ret = mapbits & ~pagesize_mask;
+ RELEASE_ASSERT(ret != 0);
+ }
+
+ return (ret);
+}
+
+#if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
+/*
+ * Validate ptr before assuming that it points to an allocation. Currently,
+ * the following validation is performed:
+ *
+ * + Check that ptr is not NULL.
+ *
+ * + Check that ptr lies within a mapped chunk.
+ */
+static inline size_t
+isalloc_validate(const void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk == NULL)
+ return (0);
+
+ if (malloc_rtree_get(chunk_rtree, (uintptr_t)chunk) == NULL)
+ return (0);
+
+ if (chunk != ptr) {
+ RELEASE_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+ return (arena_salloc(ptr));
+ } else {
+ size_t ret;
+ extent_node_t *node;
+ extent_node_t key;
+
+ /* Chunk. */
+ key.addr = (void *)chunk;
+ malloc_mutex_lock(&huge_mtx);
+ node = extent_tree_ad_search(&huge, &key);
+ if (node != NULL)
+ ret = node->size;
+ else
+ ret = 0;
+ malloc_mutex_unlock(&huge_mtx);
+ return (ret);
+ }
+}
+#endif
+
+static inline size_t
+isalloc(const void *ptr)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr) {
+ /* Region. */
+ assert(chunk->arena->magic == ARENA_MAGIC);
+
+ ret = arena_salloc(ptr);
+ } else {
+ extent_node_t *node, key;
+
+ /* Chunk (huge allocation). */
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ RELEASE_ASSERT(node != NULL);
+
+ ret = node->size;
+
+ malloc_mutex_unlock(&huge_mtx);
+ }
+
+ return (ret);
+}
+
+static inline void
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_t *mapelm)
+{
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t size;
+
+ run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
+ RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ bin = run->bin;
+ size = bin->reg_size;
+
+#ifdef MALLOC_FILL
+ if (opt_poison)
+ memset(ptr, 0xe5, size);
+#endif
+
+ arena_run_reg_dalloc(run, bin, ptr, size);
+ run->nfree++;
+
+ if (run->nfree == bin->nregs) {
+ /* Deallocate run. */
+ if (run == bin->runcur)
+ bin->runcur = NULL;
+ else if (bin->nregs != 1) {
+ size_t run_pageind = (((uintptr_t)run -
+ (uintptr_t)chunk)) >> pagesize_2pow;
+ arena_chunk_map_t *run_mapelm =
+ &chunk->map[run_pageind];
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
+ run_mapelm);
+ arena_run_tree_remove(&bin->runs, run_mapelm);
+ }
+#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+ run->magic = 0;
+#endif
+ arena_run_dalloc(arena, run, true);
+#ifdef MALLOC_STATS
+ bin->stats.curruns--;
+#endif
+ } else if (run->nfree == 1 && run != bin->runcur) {
+ /*
+ * Make sure that bin->runcur always refers to the lowest
+ * non-full run, if one exists.
+ */
+ if (bin->runcur == NULL)
+ bin->runcur = run;
+ else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ /* Switch runcur. */
+ if (bin->runcur->nfree > 0) {
+ arena_chunk_t *runcur_chunk =
+ (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
+ size_t runcur_pageind =
+ (((uintptr_t)bin->runcur -
+ (uintptr_t)runcur_chunk)) >> pagesize_2pow;
+ arena_chunk_map_t *runcur_mapelm =
+ &runcur_chunk->map[runcur_pageind];
+
+ /* Insert runcur. */
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs,
+ runcur_mapelm) == NULL);
+ arena_run_tree_insert(&bin->runs,
+ runcur_mapelm);
+ }
+ bin->runcur = run;
+ } else {
+ size_t run_pageind = (((uintptr_t)run -
+ (uintptr_t)chunk)) >> pagesize_2pow;
+ arena_chunk_map_t *run_mapelm =
+ &chunk->map[run_pageind];
+
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
+ NULL);
+ arena_run_tree_insert(&bin->runs, run_mapelm);
+ }
+ }
+#ifdef MALLOC_STATS
+ arena->stats.allocated_small -= size;
+ arena->stats.ndalloc_small++;
+#endif
+}
+
+static void
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+ /* Large allocation. */
+ malloc_spin_lock(&arena->lock);
+
+#ifdef MALLOC_FILL
+#ifndef MALLOC_STATS
+ if (opt_poison)
+#endif
+#endif
+ {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ pagesize_2pow;
+ size_t size = chunk->map[pageind].bits & ~pagesize_mask;
+
+#ifdef MALLOC_FILL
+#ifdef MALLOC_STATS
+ if (opt_poison)
+#endif
+ memset(ptr, 0xe5, size);
+#endif
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large -= size;
+#endif
+ }
+#ifdef MALLOC_STATS
+ arena->stats.ndalloc_large++;
+#endif
+
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+ malloc_spin_unlock(&arena->lock);
+}
+
+static inline void
+arena_dalloc(void *ptr, size_t offset)
+{
+ arena_chunk_t *chunk;
+ arena_t *arena;
+ size_t pageind;
+ arena_chunk_map_t *mapelm;
+
+ assert(ptr != NULL);
+ assert(offset != 0);
+ assert(CHUNK_ADDR2OFFSET(ptr) == offset);
+
+ chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
+ arena = chunk->arena;
+ assert(arena != NULL);
+ RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+
+ pageind = offset >> pagesize_2pow;
+ mapelm = &chunk->map[pageind];
+ RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
+ /* Small allocation. */
+ malloc_spin_lock(&arena->lock);
+ arena_dalloc_small(arena, chunk, ptr, mapelm);
+ malloc_spin_unlock(&arena->lock);
+ } else
+ arena_dalloc_large(arena, chunk, ptr);
+}
+
+static inline void
+idalloc(void *ptr)
+{
+ size_t offset;
+
+ assert(ptr != NULL);
+
+ offset = CHUNK_ADDR2OFFSET(ptr);
+ if (offset != 0)
+ arena_dalloc(ptr, offset);
+ else
+ huge_dalloc(ptr);
+}
+
+static void
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t size, size_t oldsize)
+{
+
+ assert(size < oldsize);
+
+ /*
+ * Shrink the run, and make trailing pages available for other
+ * allocations.
+ */
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
+ true);
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large -= oldsize - size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t size, size_t oldsize)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
+ size_t npages = oldsize >> pagesize_2pow;
+
+ RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
+
+ /* Try to extend the run. */
+ assert(size > oldsize);
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
+ & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
+ ~pagesize_mask) >= size - oldsize) {
+ /*
+ * The next run is available and sufficiently large. Split the
+ * following run, then merge the first part with the existing
+ * allocation.
+ */
+ arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
+ ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
+ false);
+
+ chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+ chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED;
+
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large += size - oldsize;
+#endif
+ malloc_spin_unlock(&arena->lock);
+ return (false);
+ }
+ malloc_spin_unlock(&arena->lock);
+
+ return (true);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying. This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
+{
+ size_t psize;
+
+ psize = PAGE_CEILING(size);
+ if (psize == oldsize) {
+ /* Same size class. */
+#ifdef MALLOC_FILL
+ if (opt_poison && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize -
+ size);
+ }
+#endif
+ return (false);
+ } else {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = chunk->arena;
+ RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
+
+ if (psize < oldsize) {
+#ifdef MALLOC_FILL
+ /* Fill before shrinking in order avoid a race. */
+ if (opt_poison) {
+ memset((void *)((uintptr_t)ptr + size), 0xe5,
+ oldsize - size);
+ }
+#endif
+ arena_ralloc_large_shrink(arena, chunk, ptr, psize,
+ oldsize);
+ return (false);
+ } else {
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
+ psize, oldsize);
+#ifdef MALLOC_FILL
+ if (ret == false && opt_zero) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ size - oldsize);
+ }
+#endif
+ return (ret);
+ }
+ }
+}
+
+static void *
+arena_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ if (size < small_min) {
+ if (oldsize < small_min &&
+ ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
+ == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
+ goto IN_PLACE; /* Same size class. */
+ } else if (size <= small_max) {
+ if (oldsize >= small_min && oldsize <= small_max &&
+ (QUANTUM_CEILING(size) >> opt_quantum_2pow)
+ == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
+ goto IN_PLACE; /* Same size class. */
+ } else if (size <= bin_maxclass) {
+ if (oldsize > small_max && oldsize <= bin_maxclass &&
+ pow2_ceil(size) == pow2_ceil(oldsize))
+ goto IN_PLACE; /* Same size class. */
+ } else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
+ assert(size > bin_maxclass);
+ if (arena_ralloc_large(ptr, size, oldsize) == false)
+ return (ptr);
+ }
+
+ /*
+ * If we get here, then size and oldsize are different enough that we
+ * need to move the object. In that case, fall back to allocating new
+ * space and copying.
+ */
+ ret = arena_malloc(choose_arena(), size, false);
+ if (ret == NULL)
+ return (NULL);
+
+ /* Junk/zero-filling were already done by arena_malloc(). */
+ copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+ if (copysize >= VM_COPY_MIN)
+ pages_copy(ret, ptr, copysize);
+ else
+#endif
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ return (ret);
+IN_PLACE:
+#ifdef MALLOC_FILL
+ if (opt_poison && size < oldsize)
+ memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize - size);
+ else if (opt_zero && size > oldsize)
+ memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
+#endif
+ return (ptr);
+}
+
+static inline void *
+iralloc(void *ptr, size_t size)
+{
+ size_t oldsize;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ oldsize = isalloc(ptr);
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc(ptr, size, oldsize));
+ else
+ return (huge_ralloc(ptr, size, oldsize));
+}
+
+static bool
+arena_new(arena_t *arena)
+{
+ unsigned i;
+ arena_bin_t *bin;
+ size_t pow2_size, prev_run_size;
+
+ if (malloc_spin_init(&arena->lock))
+ return (true);
+
+#ifdef MALLOC_STATS
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+#endif
+
+ /* Initialize chunks. */
+ arena_chunk_tree_dirty_new(&arena->chunks_dirty);
+#ifdef MALLOC_DOUBLE_PURGE
+ LinkedList_Init(&arena->chunks_madvised);
+#endif
+ arena->spare = NULL;
+
+ arena->ndirty = 0;
+
+ arena_avail_tree_new(&arena->runs_avail);
+
+#ifdef MALLOC_BALANCE
+ arena->contention = 0;
+#endif
+
+ /* Initialize bins. */
+ prev_run_size = pagesize;
+
+ /* (2^n)-spaced tiny bins. */
+ for (i = 0; i < ntbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ arena_run_tree_new(&bin->runs);
+
+ bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
+
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+ /* Quantum-spaced bins. */
+ for (; i < ntbins + nqbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ arena_run_tree_new(&bin->runs);
+
+ bin->reg_size = quantum * (i - ntbins + 1);
+
+ pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+ /* (2^n)-spaced sub-page bins. */
+ for (; i < ntbins + nqbins + nsbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ arena_run_tree_new(&bin->runs);
+
+ bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
+
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+#if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
+ arena->magic = ARENA_MAGIC;
+#endif
+
+ return (false);
+}
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arenas_extend(unsigned ind)
+{
+ arena_t *ret;
+
+ /* Allocate enough space for trailing bins. */
+ ret = (arena_t *)base_alloc(sizeof(arena_t)
+ + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ if (ret != NULL && arena_new(ret) == false) {
+ arenas[ind] = ret;
+ return (ret);
+ }
+ /* Only reached if there is an OOM error. */
+
+ /*
+ * OOM here is quite inconvenient to propagate, since dealing with it
+ * would require a check for failure in the fast path. Instead, punt
+ * by using arenas[0]. In practice, this is an extremely unlikely
+ * failure.
+ */
+ _malloc_message(_getprogname(),
+ ": (malloc) Error initializing arena\n", "", "");
+ if (opt_abort)
+ abort();
+
+ return (arenas[0]);
+}
+
+/*
+ * End arena.
+ */
+/******************************************************************************/
+/*
+ * Begin general internal functions.
+ */
+
+static void *
+huge_malloc(size_t size, bool zero)
+{
+ return huge_palloc(size, chunksize, zero);
+}
+
+static void *
+huge_palloc(size_t size, size_t alignment, bool zero)
+{
+ void *ret;
+ size_t csize;
+ size_t psize;
+ extent_node_t *node;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ csize = CHUNK_CEILING(size);
+ if (csize == 0) {
+ /* size is large enough to cause size_t wrap-around. */
+ return (NULL);
+ }
+
+ /* Allocate an extent node with which to track the chunk. */
+ node = base_node_alloc();
+ if (node == NULL)
+ return (NULL);
+
+ ret = chunk_alloc(csize, alignment, false, zero);
+ if (ret == NULL) {
+ base_node_dealloc(node);
+ return (NULL);
+ }
+
+ /* Insert node into huge. */
+ node->addr = ret;
+ psize = PAGE_CEILING(size);
+ node->size = psize;
+
+ malloc_mutex_lock(&huge_mtx);
+ extent_tree_ad_insert(&huge, node);
+#ifdef MALLOC_STATS
+ huge_nmalloc++;
+
+ /* Although we allocated space for csize bytes, we indicate that we've
+ * allocated only psize bytes.
+ *
+ * If DECOMMIT is defined, this is a reasonable thing to do, since
+ * we'll explicitly decommit the bytes in excess of psize.
+ *
+ * If DECOMMIT is not defined, then we're relying on the OS to be lazy
+ * about how it allocates physical pages to mappings. If we never
+ * touch the pages in excess of psize, the OS won't allocate a physical
+ * page, and we won't use more than psize bytes of physical memory.
+ *
+ * A correct program will only touch memory in excess of how much it
+ * requested if it first calls malloc_usable_size and finds out how
+ * much space it has to play with. But because we set node->size =
+ * psize above, malloc_usable_size will return psize, not csize, and
+ * the program will (hopefully) never touch bytes in excess of psize.
+ * Thus those bytes won't take up space in physical memory, and we can
+ * reasonably claim we never "allocated" them in the first place. */
+ huge_allocated += psize;
+ huge_mapped += csize;
+#endif
+ malloc_mutex_unlock(&huge_mtx);
+
+#ifdef MALLOC_DECOMMIT
+ if (csize - psize > 0)
+ pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
+#endif
+
+#ifdef MALLOC_FILL
+ if (zero == false) {
+ if (opt_junk)
+# ifdef MALLOC_DECOMMIT
+ memset(ret, 0xe4, psize);
+# else
+ memset(ret, 0xe4, csize);
+# endif
+ else if (opt_zero)
+# ifdef MALLOC_DECOMMIT
+ memset(ret, 0, psize);
+# else
+ memset(ret, 0, csize);
+# endif
+ }
+#endif
+
+ return (ret);
+}
+
+static void *
+huge_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Avoid moving the allocation if the size class would not change. */
+
+ if (oldsize > arena_maxclass &&
+ CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
+ size_t psize = PAGE_CEILING(size);
+#ifdef MALLOC_FILL
+ if (opt_poison && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize
+ - size);
+ }
+#endif
+#ifdef MALLOC_DECOMMIT
+ if (psize < oldsize) {
+ extent_node_t *node, key;
+
+ pages_decommit((void *)((uintptr_t)ptr + psize),
+ oldsize - psize);
+
+ /* Update recorded size. */
+ malloc_mutex_lock(&huge_mtx);
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->size == oldsize);
+# ifdef MALLOC_STATS
+ huge_allocated -= oldsize - psize;
+ /* No need to change huge_mapped, because we didn't
+ * (un)map anything. */
+# endif
+ node->size = psize;
+ malloc_mutex_unlock(&huge_mtx);
+ } else if (psize > oldsize) {
+ pages_commit((void *)((uintptr_t)ptr + oldsize),
+ psize - oldsize);
+ }
+#endif
+
+ /* Although we don't have to commit or decommit anything if
+ * DECOMMIT is not defined and the size class didn't change, we
+ * do need to update the recorded size if the size increased,
+ * so malloc_usable_size doesn't return a value smaller than
+ * what was requested via realloc(). */
+
+ if (psize > oldsize) {
+ /* Update recorded size. */
+ extent_node_t *node, key;
+ malloc_mutex_lock(&huge_mtx);
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->size == oldsize);
+# ifdef MALLOC_STATS
+ huge_allocated += psize - oldsize;
+ /* No need to change huge_mapped, because we didn't
+ * (un)map anything. */
+# endif
+ node->size = psize;
+ malloc_mutex_unlock(&huge_mtx);
+ }
+
+#ifdef MALLOC_FILL
+ if (opt_zero && size > oldsize) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0, size
+ - oldsize);
+ }
+#endif
+ return (ptr);
+ }
+
+ /*
+ * If we get here, then size and oldsize are different enough that we
+ * need to use a different size class. In that case, fall back to
+ * allocating new space and copying.
+ */
+ ret = huge_malloc(size, false);
+ if (ret == NULL)
+ return (NULL);
+
+ copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+ if (copysize >= VM_COPY_MIN)
+ pages_copy(ret, ptr, copysize);
+ else
+#endif
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ return (ret);
+}
+
+static void
+huge_dalloc(void *ptr)
+{
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = ptr;
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->addr == ptr);
+ extent_tree_ad_remove(&huge, node);
+
+#ifdef MALLOC_STATS
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ huge_mapped -= CHUNK_CEILING(node->size);
+#endif
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ /* Unmap chunk. */
+ chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
+
+ base_node_dealloc(node);
+}
+
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+#ifdef MOZ_MEMORY_BSD
+static inline unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ int mib[2];
+ size_t len;
+
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ len = sizeof(ret);
+ if (sysctl(mib, 2, &ret, &len, (void *) 0, 0) == -1) {
+ /* Error. */
+ return (1);
+ }
+
+ return (ret);
+}
+#elif (defined(MOZ_MEMORY_LINUX))
+#include <fcntl.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ int fd, nread, column;
+ char buf[1024];
+ static const char matchstr[] = "processor\t:";
+ int i;
+
+ /*
+ * sysconf(3) would be the preferred method for determining the number
+ * of CPUs, but it uses malloc internally, which causes untennable
+ * recursion during malloc initialization.
+ */
+ fd = open("/proc/cpuinfo", O_RDONLY);
+ if (fd == -1)
+ return (1); /* Error. */
+ /*
+ * Count the number of occurrences of matchstr at the beginnings of
+ * lines. This treats hyperthreaded CPUs as multiple processors.
+ */
+ column = 0;
+ ret = 0;
+ while (true) {
+ nread = read(fd, &buf, sizeof(buf));
+ if (nread <= 0)
+ break; /* EOF or error. */
+ for (i = 0;i < nread;i++) {
+ char c = buf[i];
+ if (c == '\n')
+ column = 0;
+ else if (column != -1) {
+ if (c == matchstr[column]) {
+ column++;
+ if (column == sizeof(matchstr) - 1) {
+ column = -1;
+ ret++;
+ }
+ } else
+ column = -1;
+ }
+ }
+ }
+
+ if (ret == 0)
+ ret = 1; /* Something went wrong in the parser. */
+ close(fd);
+
+ return (ret);
+}
+#elif (defined(MOZ_MEMORY_DARWIN))
+#include <mach/mach_init.h>
+#include <mach/mach_host.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ kern_return_t error;
+ natural_t n;
+ processor_info_array_t pinfo;
+ mach_msg_type_number_t pinfocnt;
+
+ error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
+ &n, &pinfo, &pinfocnt);
+ if (error != KERN_SUCCESS)
+ return (1); /* Error. */
+ else
+ return (n);
+}
+#elif (defined(MOZ_MEMORY_SOLARIS))
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+#else
+static inline unsigned
+malloc_ncpus(void)
+{
+
+ /*
+ * We lack a way to determine the number of CPUs on this platform, so
+ * assume 1 CPU.
+ */
+ return (1);
+}
+#endif
+#endif
+
+static void
+malloc_print_stats(void)
+{
+
+ if (opt_print_stats) {
+ char s[UMAX2S_BUFSIZE];
+ _malloc_message("___ Begin malloc statistics ___\n", "", "",
+ "");
+ _malloc_message("Assertions ",
+#ifdef NDEBUG
+ "disabled",
+#else
+ "enabled",
+#endif
+ "\n", "");
+ _malloc_message("Boolean MALLOC_OPTIONS: ",
+ opt_abort ? "A" : "a", "", "");
+#ifdef MALLOC_FILL
+ _malloc_message(opt_poison ? "C" : "c", "", "", "");
+ _malloc_message(opt_junk ? "J" : "j", "", "", "");
+#endif
+ _malloc_message("P", "", "", "");
+#ifdef MALLOC_UTRACE
+ _malloc_message(opt_utrace ? "U" : "u", "", "", "");
+#endif
+#ifdef MALLOC_SYSV
+ _malloc_message(opt_sysv ? "V" : "v", "", "", "");
+#endif
+#ifdef MALLOC_XMALLOC
+ _malloc_message(opt_xmalloc ? "X" : "x", "", "", "");
+#endif
+#ifdef MALLOC_FILL
+ _malloc_message(opt_zero ? "Z" : "z", "", "", "");
+#endif
+ _malloc_message("\n", "", "", "");
+
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+ _malloc_message("CPUs: ", umax2s(ncpus, 10, s), "\n", "");
+#endif
+ _malloc_message("Max arenas: ", umax2s(narenas, 10, s), "\n",
+ "");
+#ifdef MALLOC_BALANCE
+ _malloc_message("Arena balance threshold: ",
+ umax2s(opt_balance_threshold, 10, s), "\n", "");
+#endif
+ _malloc_message("Pointer size: ", umax2s(sizeof(void *), 10, s),
+ "\n", "");
+ _malloc_message("Quantum size: ", umax2s(quantum, 10, s), "\n",
+ "");
+ _malloc_message("Max small size: ", umax2s(small_max, 10, s),
+ "\n", "");
+ _malloc_message("Max dirty pages per arena: ",
+ umax2s(opt_dirty_max, 10, s), "\n", "");
+
+ _malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "",
+ "");
+ _malloc_message(" (2^", umax2s(opt_chunk_2pow, 10, s), ")\n",
+ "");
+
+#ifdef MALLOC_STATS
+ {
+ size_t allocated, mapped = 0;
+#ifdef MALLOC_BALANCE
+ uint64_t nbalance = 0;
+#endif
+ unsigned i;
+ arena_t *arena;
+
+ /* Calculate and print allocated/mapped stats. */
+
+ /* arenas. */
+ for (i = 0, allocated = 0; i < narenas; i++) {
+ if (arenas[i] != NULL) {
+ malloc_spin_lock(&arenas[i]->lock);
+ allocated +=
+ arenas[i]->stats.allocated_small;
+ allocated +=
+ arenas[i]->stats.allocated_large;
+ mapped += arenas[i]->stats.mapped;
+#ifdef MALLOC_BALANCE
+ nbalance += arenas[i]->stats.nbalance;
+#endif
+ malloc_spin_unlock(&arenas[i]->lock);
+ }
+ }
+
+ /* huge/base. */
+ malloc_mutex_lock(&huge_mtx);
+ allocated += huge_allocated;
+ mapped += huge_mapped;
+ malloc_mutex_unlock(&huge_mtx);
+
+ malloc_mutex_lock(&base_mtx);
+ mapped += base_mapped;
+ malloc_mutex_unlock(&base_mtx);
+
+#ifdef MOZ_MEMORY_WINDOWS
+ malloc_printf("Allocated: %lu, mapped: %lu\n",
+ allocated, mapped);
+#else
+ malloc_printf("Allocated: %zu, mapped: %zu\n",
+ allocated, mapped);
+#endif
+
+#ifdef MALLOC_BALANCE
+ malloc_printf("Arena balance reassignments: %llu\n",
+ nbalance);
+#endif
+
+ /* Print chunk stats. */
+ malloc_printf(
+ "huge: nmalloc ndalloc allocated\n");
+#ifdef MOZ_MEMORY_WINDOWS
+ malloc_printf(" %12llu %12llu %12lu\n",
+ huge_nmalloc, huge_ndalloc, huge_allocated);
+#else
+ malloc_printf(" %12llu %12llu %12zu\n",
+ huge_nmalloc, huge_ndalloc, huge_allocated);
+#endif
+ /* Print stats for each arena. */
+ for (i = 0; i < narenas; i++) {
+ arena = arenas[i];
+ if (arena != NULL) {
+ malloc_printf(
+ "\narenas[%u]:\n", i);
+ malloc_spin_lock(&arena->lock);
+ stats_print(arena);
+ malloc_spin_unlock(&arena->lock);
+ }
+ }
+ }
+#endif /* #ifdef MALLOC_STATS */
+ _malloc_message("--- End malloc statistics ---\n", "", "", "");
+ }
+}
+
+/*
+ * FreeBSD's pthreads implementation calls malloc(3), so the malloc
+ * implementation has to take pains to avoid infinite recursion during
+ * initialization.
+ */
+#if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN))
+#define malloc_init() false
+#else
+static inline bool
+malloc_init(void)
+{
+
+ if (malloc_initialized == false)
+ return (malloc_init_hard());
+
+ return (false);
+}
+#endif
+
+#if !defined(MOZ_MEMORY_WINDOWS)
+static
+#endif
+bool
+malloc_init_hard(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts;
+ long result;
+#ifndef MOZ_MEMORY_WINDOWS
+ int linklen;
+#endif
+#ifdef MOZ_MEMORY_DARWIN
+ malloc_zone_t* default_zone;
+#endif
+
+#ifndef MOZ_MEMORY_WINDOWS
+ malloc_mutex_lock(&init_lock);
+#endif
+
+ if (malloc_initialized) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock.
+ */
+#ifndef MOZ_MEMORY_WINDOWS
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (false);
+ }
+
+#ifdef MOZ_MEMORY_WINDOWS
+ /* get a thread local storage index */
+ tlsIndex = TlsAlloc();
+#endif
+
+ /* Get page size and number of CPUs */
+#ifdef MOZ_MEMORY_WINDOWS
+ {
+ SYSTEM_INFO info;
+
+ GetSystemInfo(&info);
+ result = info.dwPageSize;
+
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+ ncpus = info.dwNumberOfProcessors;
+#endif
+ }
+#else
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+ ncpus = malloc_ncpus();
+#endif
+
+ result = sysconf(_SC_PAGESIZE);
+ assert(result != -1);
+#endif
+
+ /* We assume that the page size is a power of 2. */
+ assert(((result - 1) & result) == 0);
+#ifdef MALLOC_STATIC_SIZES
+ if (pagesize % (size_t) result) {
+ _malloc_message(_getprogname(),
+ "Compile-time page size does not divide the runtime one.\n",
+ "", "");
+ abort();
+ }
+#else
+ pagesize = (size_t) result;
+ pagesize_mask = (size_t) result - 1;
+ pagesize_2pow = ffs((int)result) - 1;
+#endif
+
+ for (i = 0; i < 3; i++) {
+ unsigned j;
+
+ /* Get runtime configuration. */
+ switch (i) {
+ case 0:
+#ifndef MOZ_MEMORY_WINDOWS
+ if ((linklen = readlink("/etc/malloc.conf", buf,
+ sizeof(buf) - 1)) != -1) {
+ /*
+ * Use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ buf[linklen] = '\0';
+ opts = buf;
+ } else
+#endif
+ {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 1:
+ if ((opts = getenv("MALLOC_OPTIONS")) != NULL) {
+ /*
+ * Do nothing; opts is already initialized to
+ * the value of the MALLOC_OPTIONS environment
+ * variable.
+ */
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 2:
+ if (_malloc_options != NULL) {
+ /*
+ * Use options that were compiled into the
+ * program.
+ */
+ opts = _malloc_options;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ default:
+ /* NOTREACHED */
+ buf[0] = '\0';
+ opts = buf;
+ assert(false);
+ }
+
+ for (j = 0; opts[j] != '\0'; j++) {
+ unsigned k, nreps;
+ bool nseen;
+
+ /* Parse repetition count, if any. */
+ for (nreps = 0, nseen = false;; j++, nseen = true) {
+ switch (opts[j]) {
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7':
+ case '8': case '9':
+ nreps *= 10;
+ nreps += opts[j] - '0';
+ break;
+ default:
+ goto MALLOC_OUT;
+ }
+ }
+MALLOC_OUT:
+ if (nseen == false)
+ nreps = 1;
+
+ for (k = 0; k < nreps; k++) {
+ switch (opts[j]) {
+ case 'a':
+ opt_abort = false;
+ break;
+ case 'A':
+ opt_abort = true;
+ break;
+ case 'b':
+#ifdef MALLOC_BALANCE
+ opt_balance_threshold >>= 1;
+#endif
+ break;
+ case 'B':
+#ifdef MALLOC_BALANCE
+ if (opt_balance_threshold == 0)
+ opt_balance_threshold = 1;
+ else if ((opt_balance_threshold << 1)
+ > opt_balance_threshold)
+ opt_balance_threshold <<= 1;
+#endif
+ break;
+#ifdef MALLOC_FILL
+#ifndef MALLOC_PRODUCTION
+ case 'c':
+ opt_poison = false;
+ break;
+ case 'C':
+ opt_poison = true;
+ break;
+#endif
+#endif
+ case 'f':
+ opt_dirty_max >>= 1;
+ break;
+ case 'F':
+ if (opt_dirty_max == 0)
+ opt_dirty_max = 1;
+ else if ((opt_dirty_max << 1) != 0)
+ opt_dirty_max <<= 1;
+ break;
+#ifdef MALLOC_FILL
+#ifndef MALLOC_PRODUCTION
+ case 'j':
+ opt_junk = false;
+ break;
+ case 'J':
+ opt_junk = true;
+ break;
+#endif
+#endif
+#ifndef MALLOC_STATIC_SIZES
+ case 'k':
+ /*
+ * Chunks always require at least one
+ * header page, so chunks can never be
+ * smaller than two pages.
+ */
+ if (opt_chunk_2pow > pagesize_2pow + 1)
+ opt_chunk_2pow--;
+ break;
+ case 'K':
+ if (opt_chunk_2pow + 1 <
+ (sizeof(size_t) << 3))
+ opt_chunk_2pow++;
+ break;
+#endif
+ case 'n':
+ opt_narenas_lshift--;
+ break;
+ case 'N':
+ opt_narenas_lshift++;
+ break;
+ case 'p':
+ opt_print_stats = false;
+ break;
+ case 'P':
+ opt_print_stats = true;
+ break;
+#ifndef MALLOC_STATIC_SIZES
+ case 'q':
+ if (opt_quantum_2pow > QUANTUM_2POW_MIN)
+ opt_quantum_2pow--;
+ break;
+ case 'Q':
+ if (opt_quantum_2pow < pagesize_2pow -
+ 1)
+ opt_quantum_2pow++;
+ break;
+ case 's':
+ if (opt_small_max_2pow >
+ QUANTUM_2POW_MIN)
+ opt_small_max_2pow--;
+ break;
+ case 'S':
+ if (opt_small_max_2pow < pagesize_2pow
+ - 1)
+ opt_small_max_2pow++;
+ break;
+#endif
+#ifdef MALLOC_UTRACE
+ case 'u':
+ opt_utrace = false;
+ break;
+ case 'U':
+ opt_utrace = true;
+ break;
+#endif
+#ifdef MALLOC_SYSV
+ case 'v':
+ opt_sysv = false;
+ break;
+ case 'V':
+ opt_sysv = true;
+ break;
+#endif
+#ifdef MALLOC_XMALLOC
+ case 'x':
+ opt_xmalloc = false;
+ break;
+ case 'X':
+ opt_xmalloc = true;
+ break;
+#endif
+#ifdef MALLOC_FILL
+#ifndef MALLOC_PRODUCTION
+ case 'z':
+ opt_zero = false;
+ break;
+ case 'Z':
+ opt_zero = true;
+ break;
+#endif
+#endif
+ default: {
+ char cbuf[2];
+
+ cbuf[0] = opts[j];
+ cbuf[1] = '\0';
+ _malloc_message(_getprogname(),
+ ": (malloc) Unsupported character "
+ "in malloc options: '", cbuf,
+ "'\n");
+ }
+ }
+ }
+ }
+ }
+
+ /* Take care to call atexit() only once. */
+ if (opt_print_stats) {
+#ifndef MOZ_MEMORY_WINDOWS
+ /* Print statistics at exit. */
+ atexit(malloc_print_stats);
+#endif
+ }
+
+#ifndef MALLOC_STATIC_SIZES
+ /* Set variables according to the value of opt_small_max_2pow. */
+ if (opt_small_max_2pow < opt_quantum_2pow)
+ opt_small_max_2pow = opt_quantum_2pow;
+ small_max = (1U << opt_small_max_2pow);
+
+ /* Set bin-related variables. */
+ bin_maxclass = (pagesize >> 1);
+ assert(opt_quantum_2pow >= TINY_MIN_2POW);
+ ntbins = opt_quantum_2pow - TINY_MIN_2POW;
+ assert(ntbins <= opt_quantum_2pow);
+ nqbins = (small_max >> opt_quantum_2pow);
+ nsbins = pagesize_2pow - opt_small_max_2pow - 1;
+
+ /* Set variables according to the value of opt_quantum_2pow. */
+ quantum = (1U << opt_quantum_2pow);
+ quantum_mask = quantum - 1;
+ if (ntbins > 0)
+ small_min = (quantum >> 1) + 1;
+ else
+ small_min = 1;
+ assert(small_min <= quantum);
+
+ /* Set variables according to the value of opt_chunk_2pow. */
+ chunksize = (1LU << opt_chunk_2pow);
+ chunksize_mask = chunksize - 1;
+ chunk_npages = (chunksize >> pagesize_2pow);
+
+ arena_chunk_header_npages = calculate_arena_header_pages();
+ arena_maxclass = calculate_arena_maxclass();
+
+ recycle_limit = CHUNK_RECYCLE_LIMIT * chunksize;
+#endif
+
+ recycled_size = 0;
+
+#ifdef JEMALLOC_USES_MAP_ALIGN
+ /*
+ * When using MAP_ALIGN, the alignment parameter must be a power of two
+ * multiple of the system pagesize, or mmap will fail.
+ */
+ assert((chunksize % pagesize) == 0);
+ assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
+#endif
+
+ UTRACE(0, 0, 0);
+
+ /* Various sanity checks that regard configuration. */
+ assert(quantum >= sizeof(void *));
+ assert(quantum <= pagesize);
+ assert(chunksize >= pagesize);
+ assert(quantum * 4 <= chunksize);
+
+ /* Initialize chunks data. */
+ malloc_mutex_init(&chunks_mtx);
+ extent_tree_szad_new(&chunks_szad_mmap);
+ extent_tree_ad_new(&chunks_ad_mmap);
+
+ /* Initialize huge allocation data. */
+ malloc_mutex_init(&huge_mtx);
+ extent_tree_ad_new(&huge);
+#ifdef MALLOC_STATS
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+ huge_mapped = 0;
+#endif
+
+ /* Initialize base allocation data structures. */
+#ifdef MALLOC_STATS
+ base_mapped = 0;
+ base_committed = 0;
+#endif
+ base_nodes = NULL;
+ malloc_mutex_init(&base_mtx);
+
+#ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+ narenas = 1;
+#else
+ if (ncpus > 1) {
+ /*
+ * For SMP systems, create four times as many arenas as there
+ * are CPUs by default.
+ */
+ opt_narenas_lshift += 2;
+ }
+
+ /* Determine how many arenas to use. */
+ narenas = ncpus;
+#endif
+ if (opt_narenas_lshift > 0) {
+ if ((narenas << opt_narenas_lshift) > narenas)
+ narenas <<= opt_narenas_lshift;
+ /*
+ * Make sure not to exceed the limits of what base_alloc() can
+ * handle.
+ */
+ if (narenas * sizeof(arena_t *) > chunksize)
+ narenas = chunksize / sizeof(arena_t *);
+ } else if (opt_narenas_lshift < 0) {
+ if ((narenas >> -opt_narenas_lshift) < narenas)
+ narenas >>= -opt_narenas_lshift;
+ /* Make sure there is at least one arena. */
+ if (narenas == 0)
+ narenas = 1;
+ }
+#ifdef MALLOC_BALANCE
+ assert(narenas != 0);
+ for (narenas_2pow = 0;
+ (narenas >> (narenas_2pow + 1)) != 0;
+ narenas_2pow++);
+#endif
+
+#ifdef NO_TLS
+ if (narenas > 1) {
+ static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
+ 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
+ 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+ 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
+ 223, 227, 229, 233, 239, 241, 251, 257, 263};
+ unsigned nprimes, parenas;
+
+ /*
+ * Pick a prime number of hash arenas that is more than narenas
+ * so that direct hashing of pthread_self() pointers tends to
+ * spread allocations evenly among the arenas.
+ */
+ assert((narenas & 1) == 0); /* narenas must be even. */
+ nprimes = (sizeof(primes) >> SIZEOF_INT_2POW);
+ parenas = primes[nprimes - 1]; /* In case not enough primes. */
+ for (i = 1; i < nprimes; i++) {
+ if (primes[i] > narenas) {
+ parenas = primes[i];
+ break;
+ }
+ }
+ narenas = parenas;
+ }
+#endif
+
+#ifndef NO_TLS
+# ifndef MALLOC_BALANCE
+ next_arena = 0;
+# endif
+#endif
+
+ /* Allocate and initialize arenas. */
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ if (arenas == NULL) {
+#ifndef MOZ_MEMORY_WINDOWS
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (true);
+ }
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(arenas, 0, sizeof(arena_t *) * narenas);
+
+ /*
+ * Initialize one arena here. The rest are lazily created in
+ * choose_arena_hard().
+ */
+ arenas_extend(0);
+ if (arenas[0] == NULL) {
+#ifndef MOZ_MEMORY_WINDOWS
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (true);
+ }
+#ifndef NO_TLS
+ /*
+ * Assign the initial arena to the initial thread, in order to avoid
+ * spurious creation of an extra arena if the application switches to
+ * threaded mode.
+ */
+#ifdef MOZ_MEMORY_WINDOWS
+ TlsSetValue(tlsIndex, arenas[0]);
+#else
+ arenas_map = arenas[0];
+#endif
+#endif
+
+ /*
+ * Seed here for the initial thread, since choose_arena_hard() is only
+ * called for other threads. The seed value doesn't really matter.
+ */
+#ifdef MALLOC_BALANCE
+ SPRN(balance, 42);
+#endif
+
+ malloc_spin_init(&arenas_lock);
+
+#ifdef MALLOC_VALIDATE
+ chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
+ if (chunk_rtree == NULL)
+ return (true);
+#endif
+
+ malloc_initialized = true;
+
+#if !defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN)
+ /* Prevent potential deadlock on malloc locks after fork. */
+ pthread_atfork(_malloc_prefork, _malloc_postfork, _malloc_postfork);
+#endif
+
+#if defined(NEEDS_PTHREAD_MMAP_UNALIGNED_TSD)
+ if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
+ malloc_printf("<jemalloc>: Error in pthread_key_create()\n");
+ }
+#endif
+
+#if defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_REPLACE_MALLOC)
+ /*
+ * Overwrite the default memory allocator to use jemalloc everywhere.
+ */
+ default_zone = malloc_default_zone();
+
+ /*
+ * We only use jemalloc with MacOS 10.6 and 10.7. jemalloc is disabled
+ * on 32-bit builds (10.5 and 32-bit 10.6) due to bug 702250, an
+ * apparent MacOS bug. In fact, this code isn't even compiled on
+ * 32-bit builds.
+ *
+ * We'll have to update our code to work with newer versions, because
+ * the malloc zone layout is likely to change.
+ */
+
+ osx_use_jemalloc = (default_zone->version == SNOW_LEOPARD_MALLOC_ZONE_T_VERSION ||
+ default_zone->version == LION_MALLOC_ZONE_T_VERSION);
+
+ /* Allow us dynamically turn off jemalloc for testing. */
+ if (getenv("NO_MAC_JEMALLOC")) {
+ osx_use_jemalloc = false;
+#ifdef __i386__
+ malloc_printf("Warning: NO_MAC_JEMALLOC has no effect on "
+ "i386 machines (such as this one).\n");
+#endif
+ }
+
+ if (osx_use_jemalloc) {
+ /*
+ * Convert the default szone to an "overlay zone" that is capable
+ * of deallocating szone-allocated objects, but allocating new
+ * objects from jemalloc.
+ */
+ size_t size = zone_version_size(default_zone->version);
+ szone2ozone(default_zone, size);
+ }
+ else {
+ szone = default_zone;
+ }
+#endif
+
+#ifndef MOZ_MEMORY_WINDOWS
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (false);
+}
+
+/* XXX Why not just expose malloc_print_stats()? */
+#ifdef MOZ_MEMORY_WINDOWS
+void
+malloc_shutdown()
+{
+
+ malloc_print_stats();
+}
+#endif
+
+/*
+ * End general internal functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+/*
+ * Even though we compile with MOZ_MEMORY, we may have to dynamically decide
+ * not to use jemalloc, as discussed above. However, we call jemalloc
+ * functions directly from mozalloc. Since it's pretty dangerous to mix the
+ * allocators, we need to call the OSX allocators from the functions below,
+ * when osx_use_jemalloc is not (dynamically) set.
+ *
+ * Note that we assume jemalloc is enabled on i386. This is safe because the
+ * only i386 versions of MacOS are 10.5 and 10.6, which we support. We have to
+ * do this because madvise isn't in the malloc zone struct for 10.5.
+ *
+ * This means that NO_MAC_JEMALLOC doesn't work on i386.
+ */
+#if defined(MOZ_MEMORY_DARWIN) && !defined(__i386__) && !defined(MOZ_REPLACE_MALLOC)
+#define DARWIN_ONLY(A) if (!osx_use_jemalloc) { A; }
+#else
+#define DARWIN_ONLY(A)
+#endif
+
+MOZ_MEMORY_API void *
+malloc_impl(size_t size)
+{
+ void *ret;
+
+ DARWIN_ONLY(return (szone->malloc)(szone, size));
+
+ if (malloc_init()) {
+ ret = NULL;
+ goto RETURN;
+ }
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ }
+
+ ret = imalloc(size);
+
+RETURN:
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in malloc(): out of memory\n", "",
+ "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+
+ UTRACE(0, size, ret);
+ return (ret);
+}
+
+/*
+ * In ELF systems the default visibility allows symbols to be preempted at
+ * runtime. This in turn prevents the uses of memalign in this file from being
+ * optimized. What we do in here is define two aliasing symbols (they point to
+ * the same code): memalign and memalign_internal. The internal version has
+ * hidden visibility and is used in every reference from this file.
+ *
+ * For more information on this technique, see section 2.2.7 (Avoid Using
+ * Exported Symbols) in http://www.akkadia.org/drepper/dsohowto.pdf.
+ */
+
+#ifndef MOZ_REPLACE_MALLOC
+#if defined(__GNUC__) && !defined(MOZ_MEMORY_DARWIN)
+#define MOZ_MEMORY_ELF
+#endif
+
+#ifdef MOZ_MEMORY_SOLARIS
+# ifdef __SUNPRO_C
+void *
+memalign_impl(size_t alignment, size_t size);
+#pragma no_inline(memalign_impl)
+# elif (defined(__GNUC__))
+__attribute__((noinline))
+# endif
+#else
+#if (defined(MOZ_MEMORY_ELF))
+__attribute__((visibility ("hidden")))
+#endif
+#endif
+#endif /* MOZ_REPLACE_MALLOC */
+
+#ifdef MOZ_MEMORY_ELF
+#define MEMALIGN memalign_internal
+#else
+#define MEMALIGN memalign_impl
+#endif
+
+#ifndef MOZ_MEMORY_ELF
+MOZ_MEMORY_API
+#endif
+void *
+MEMALIGN(size_t alignment, size_t size)
+{
+ void *ret;
+
+ DARWIN_ONLY(return (szone->memalign)(szone, alignment, size));
+
+ assert(((alignment - 1) & alignment) == 0);
+
+ if (malloc_init()) {
+ ret = NULL;
+ goto RETURN;
+ }
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ }
+
+ alignment = alignment < sizeof(void*) ? sizeof(void*) : alignment;
+ ret = ipalloc(alignment, size);
+
+RETURN:
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc && ret == NULL) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in memalign(): out of memory\n", "", "");
+ abort();
+ }
+#endif
+ UTRACE(0, size, ret);
+ return (ret);
+}
+
+#ifdef MOZ_MEMORY_ELF
+extern void *
+memalign_impl(size_t alignment, size_t size) __attribute__((alias ("memalign_internal"), visibility ("default")));
+#endif
+
+MOZ_MEMORY_API int
+posix_memalign_impl(void **memptr, size_t alignment, size_t size)
+{
+ void *result;
+
+ /* Make sure that alignment is a large enough power of 2. */
+ if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in posix_memalign(): "
+ "invalid alignment\n", "", "");
+ abort();
+ }
+#endif
+ return (EINVAL);
+ }
+
+ /* The 0-->1 size promotion is done in the memalign() call below */
+
+ result = MEMALIGN(alignment, size);
+
+ if (result == NULL)
+ return (ENOMEM);
+
+ *memptr = result;
+ return (0);
+}
+
+MOZ_MEMORY_API void *
+aligned_alloc_impl(size_t alignment, size_t size)
+{
+ if (size % alignment) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in aligned_alloc(): "
+ "size is not multiple of alignment\n", "", "");
+ abort();
+ }
+#endif
+ return (NULL);
+ }
+ return MEMALIGN(alignment, size);
+}
+
+MOZ_MEMORY_API void *
+valloc_impl(size_t size)
+{
+ return (MEMALIGN(pagesize, size));
+}
+
+MOZ_MEMORY_API void *
+calloc_impl(size_t num, size_t size)
+{
+ void *ret;
+ size_t num_size;
+
+ DARWIN_ONLY(return (szone->calloc)(szone, num, size));
+
+ if (malloc_init()) {
+ num_size = 0;
+ ret = NULL;
+ goto RETURN;
+ }
+
+ num_size = num * size;
+ if (num_size == 0) {
+#ifdef MALLOC_SYSV
+ if ((opt_sysv == false) && ((num == 0) || (size == 0)))
+#endif
+ num_size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+ && (num_size / size != num)) {
+ /* size_t overflow. */
+ ret = NULL;
+ goto RETURN;
+ }
+
+ ret = icalloc(num_size);
+
+RETURN:
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in calloc(): out of memory\n", "",
+ "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+
+ UTRACE(0, num_size, ret);
+ return (ret);
+}
+
+MOZ_MEMORY_API void *
+realloc_impl(void *ptr, size_t size)
+{
+ void *ret;
+
+ DARWIN_ONLY(return (szone->realloc)(szone, ptr, size));
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ if (ptr != NULL)
+ idalloc(ptr);
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ }
+
+ if (ptr != NULL) {
+ assert(malloc_initialized);
+
+ ret = iralloc(ptr, size);
+
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in realloc(): out of "
+ "memory\n", "", "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+ } else {
+ if (malloc_init())
+ ret = NULL;
+ else
+ ret = imalloc(size);
+
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in realloc(): out of "
+ "memory\n", "", "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+ }
+
+#ifdef MALLOC_SYSV
+RETURN:
+#endif
+ UTRACE(ptr, size, ret);
+ return (ret);
+}
+
+MOZ_MEMORY_API void
+free_impl(void *ptr)
+{
+ size_t offset;
+
+ DARWIN_ONLY((szone->free)(szone, ptr); return);
+
+ UTRACE(ptr, 0, 0);
+
+ /*
+ * A version of idalloc that checks for NULL pointer but only for
+ * huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
+ */
+ assert(CHUNK_ADDR2OFFSET(NULL) == 0);
+ offset = CHUNK_ADDR2OFFSET(ptr);
+ if (offset != 0)
+ arena_dalloc(ptr, offset);
+ else if (ptr != NULL)
+ huge_dalloc(ptr);
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+/* This was added by Mozilla for use by SQLite. */
+#if defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_REPLACE_MALLOC)
+static
+#else
+MOZ_MEMORY_API
+#endif
+size_t
+malloc_good_size_impl(size_t size)
+{
+ /*
+ * This duplicates the logic in imalloc(), arena_malloc() and
+ * arena_malloc_small().
+ */
+ if (size < small_min) {
+ /* Small (tiny). */
+ size = pow2_ceil(size);
+ /*
+ * We omit the #ifdefs from arena_malloc_small() --
+ * it can be inaccurate with its size in some cases, but this
+ * function must be accurate.
+ */
+ if (size < (1U << TINY_MIN_2POW))
+ size = (1U << TINY_MIN_2POW);
+ } else if (size <= small_max) {
+ /* Small (quantum-spaced). */
+ size = QUANTUM_CEILING(size);
+ } else if (size <= bin_maxclass) {
+ /* Small (sub-page). */
+ size = pow2_ceil(size);
+ } else if (size <= arena_maxclass) {
+ /* Large. */
+ size = PAGE_CEILING(size);
+ } else {
+ /*
+ * Huge. We use PAGE_CEILING to get psize, instead of using
+ * CHUNK_CEILING to get csize. This ensures that this
+ * malloc_usable_size(malloc(n)) always matches
+ * malloc_good_size(n).
+ */
+ size = PAGE_CEILING(size);
+ }
+ return size;
+}
+
+
+MOZ_MEMORY_API size_t
+malloc_usable_size_impl(MALLOC_USABLE_SIZE_CONST_PTR void *ptr)
+{
+ DARWIN_ONLY(return (szone->size)(szone, ptr));
+
+#ifdef MALLOC_VALIDATE
+ return (isalloc_validate(ptr));
+#else
+ assert(ptr != NULL);
+
+ return (isalloc(ptr));
+#endif
+}
+
+MOZ_JEMALLOC_API void
+jemalloc_stats_impl(jemalloc_stats_t *stats)
+{
+ size_t i, non_arena_mapped, chunk_header_size;
+
+ assert(stats != NULL);
+
+ /*
+ * Gather runtime settings.
+ */
+ stats->opt_abort = opt_abort;
+ stats->opt_junk =
+#ifdef MALLOC_FILL
+ opt_junk ? true :
+#endif
+ false;
+ stats->opt_poison =
+#ifdef MALLOC_FILL
+ opt_poison ? true :
+#endif
+ false;
+ stats->opt_utrace =
+#ifdef MALLOC_UTRACE
+ opt_utrace ? true :
+#endif
+ false;
+ stats->opt_sysv =
+#ifdef MALLOC_SYSV
+ opt_sysv ? true :
+#endif
+ false;
+ stats->opt_xmalloc =
+#ifdef MALLOC_XMALLOC
+ opt_xmalloc ? true :
+#endif
+ false;
+ stats->opt_zero =
+#ifdef MALLOC_FILL
+ opt_zero ? true :
+#endif
+ false;
+ stats->narenas = narenas;
+ stats->balance_threshold =
+#ifdef MALLOC_BALANCE
+ opt_balance_threshold
+#else
+ SIZE_T_MAX
+#endif
+ ;
+ stats->quantum = quantum;
+ stats->small_max = small_max;
+ stats->large_max = arena_maxclass;
+ stats->chunksize = chunksize;
+ stats->dirty_max = opt_dirty_max;
+
+ /*
+ * Gather current memory usage statistics.
+ */
+ stats->mapped = 0;
+ stats->allocated = 0;
+ stats->waste = 0;
+ stats->page_cache = 0;
+ stats->bookkeeping = 0;
+ stats->bin_unused = 0;
+
+ non_arena_mapped = 0;
+
+ /* Get huge mapped/allocated. */
+ malloc_mutex_lock(&huge_mtx);
+ non_arena_mapped += huge_mapped;
+ stats->allocated += huge_allocated;
+ assert(huge_mapped >= huge_allocated);
+ malloc_mutex_unlock(&huge_mtx);
+
+ /* Get base mapped/allocated. */
+ malloc_mutex_lock(&base_mtx);
+ non_arena_mapped += base_mapped;
+ stats->bookkeeping += base_committed;
+ assert(base_mapped >= base_committed);
+ malloc_mutex_unlock(&base_mtx);
+
+ /* Iterate over arenas. */
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+ size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
+ arena_unused, arena_headers;
+ arena_run_t* run;
+ arena_chunk_map_t* mapelm;
+
+ if (arena == NULL) {
+ continue;
+ }
+
+ arena_headers = 0;
+ arena_unused = 0;
+
+ malloc_spin_lock(&arena->lock);
+
+ arena_mapped = arena->stats.mapped;
+
+ /* "committed" counts dirty and allocated memory. */
+ arena_committed = arena->stats.committed << pagesize_2pow;
+
+ arena_allocated = arena->stats.allocated_small +
+ arena->stats.allocated_large;
+
+ arena_dirty = arena->ndirty << pagesize_2pow;
+
+ for (j = 0; j < ntbins + nqbins + nsbins; j++) {
+ arena_bin_t* bin = &arena->bins[j];
+ size_t bin_unused = 0;
+
+ rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
+ run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
+ bin_unused += run->nfree * bin->reg_size;
+ } rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
+
+ if (bin->runcur) {
+ bin_unused += bin->runcur->nfree * bin->reg_size;
+ }
+
+ arena_unused += bin_unused;
+ arena_headers += bin->stats.curruns * bin->reg0_offset;
+ }
+
+ malloc_spin_unlock(&arena->lock);
+
+ assert(arena_mapped >= arena_committed);
+ assert(arena_committed >= arena_allocated + arena_dirty);
+
+ /* "waste" is committed memory that is neither dirty nor
+ * allocated. */
+ stats->mapped += arena_mapped;
+ stats->allocated += arena_allocated;
+ stats->page_cache += arena_dirty;
+ stats->waste += arena_committed -
+ arena_allocated - arena_dirty - arena_unused - arena_headers;
+ stats->bin_unused += arena_unused;
+ stats->bookkeeping += arena_headers;
+ }
+
+ /* Account for arena chunk headers in bookkeeping rather than waste. */
+ chunk_header_size =
+ ((stats->mapped / stats->chunksize) * arena_chunk_header_npages) <<
+ pagesize_2pow;
+
+ stats->mapped += non_arena_mapped;
+ stats->bookkeeping += chunk_header_size;
+ stats->waste -= chunk_header_size;
+
+ assert(stats->mapped >= stats->allocated + stats->waste +
+ stats->page_cache + stats->bookkeeping);
+}
+
+#ifdef MALLOC_DOUBLE_PURGE
+
+/* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
+static void
+hard_purge_chunk(arena_chunk_t *chunk)
+{
+ /* See similar logic in arena_purge(). */
+
+ size_t i;
+ for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
+ /* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
+ size_t npages;
+ for (npages = 0;
+ chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
+ npages++) {
+ /* Turn off the chunk's MADV_FREED bit and turn on its
+ * DECOMMITTED bit. */
+ RELEASE_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
+ chunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+ }
+
+ /* We could use mincore to find out which pages are actually
+ * present, but it's not clear that's better. */
+ if (npages > 0) {
+ pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
+ pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
+ }
+ i += npages;
+ }
+}
+
+/* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
+static void
+hard_purge_arena(arena_t *arena)
+{
+ malloc_spin_lock(&arena->lock);
+
+ while (!LinkedList_IsEmpty(&arena->chunks_madvised)) {
+ LinkedList* next = arena->chunks_madvised.next;
+ arena_chunk_t *chunk =
+ LinkedList_Get(arena->chunks_madvised.next,
+ arena_chunk_t, chunks_madvised_elem);
+ hard_purge_chunk(chunk);
+ LinkedList_Remove(&chunk->chunks_madvised_elem);
+ }
+
+ malloc_spin_unlock(&arena->lock);
+}
+
+MOZ_JEMALLOC_API void
+jemalloc_purge_freed_pages_impl()
+{
+ size_t i;
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+ if (arena != NULL)
+ hard_purge_arena(arena);
+ }
+ if (!config_munmap || config_recycle) {
+ malloc_mutex_lock(&chunks_mtx);
+ extent_node_t *node = extent_tree_szad_first(&chunks_szad_mmap);
+ while (node) {
+ pages_decommit(node->addr, node->size);
+ pages_commit(node->addr, node->size);
+ node->zeroed = true;
+ node = extent_tree_szad_next(&chunks_szad_mmap, node);
+ }
+ malloc_mutex_unlock(&chunks_mtx);
+ }
+}
+
+#else /* !defined MALLOC_DOUBLE_PURGE */
+
+MOZ_JEMALLOC_API void
+jemalloc_purge_freed_pages_impl()
+{
+ /* Do nothing. */
+}
+
+#endif /* defined MALLOC_DOUBLE_PURGE */
+
+
+
+#ifdef MOZ_MEMORY_WINDOWS
+void*
+_recalloc(void *ptr, size_t count, size_t size)
+{
+ size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
+ size_t newsize = count * size;
+
+ /*
+ * In order for all trailing bytes to be zeroed, the caller needs to
+ * use calloc(), followed by recalloc(). However, the current calloc()
+ * implementation only zeros the bytes requested, so if recalloc() is
+ * to work 100% correctly, calloc() will need to change to zero
+ * trailing bytes.
+ */
+
+ ptr = realloc_impl(ptr, newsize);
+ if (ptr != NULL && oldsize < newsize) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0, newsize -
+ oldsize);
+ }
+
+ return ptr;
+}
+
+/*
+ * This impl of _expand doesn't ever actually expand or shrink blocks: it
+ * simply replies that you may continue using a shrunk block.
+ */
+void*
+_expand(void *ptr, size_t newsize)
+{
+ if (isalloc(ptr) >= newsize)
+ return ptr;
+
+ return NULL;
+}
+
+size_t
+_msize(void *ptr)
+{
+
+ return malloc_usable_size_impl(ptr);
+}
+#endif
+
+MOZ_JEMALLOC_API void
+jemalloc_free_dirty_pages_impl(void)
+{
+ size_t i;
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+
+ if (arena != NULL) {
+ malloc_spin_lock(&arena->lock);
+ arena_purge(arena, true);
+ malloc_spin_unlock(&arena->lock);
+ }
+ }
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin library-private functions, used by threading libraries for protection
+ * of malloc during fork(). These functions are only called if the program is
+ * running in threaded mode, so there is no need to check whether the program
+ * is threaded here.
+ */
+
+static void
+_malloc_prefork(void)
+{
+ unsigned i;
+
+ /* Acquire all mutexes in a safe order. */
+
+ malloc_spin_lock(&arenas_lock);
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ malloc_spin_lock(&arenas[i]->lock);
+ }
+
+ malloc_mutex_lock(&base_mtx);
+
+ malloc_mutex_lock(&huge_mtx);
+}
+
+static void
+_malloc_postfork(void)
+{
+ unsigned i;
+
+ /* Release all mutexes, now that fork() has completed. */
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ malloc_mutex_unlock(&base_mtx);
+
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ malloc_spin_unlock(&arenas[i]->lock);
+ }
+ malloc_spin_unlock(&arenas_lock);
+}
+
+/*
+ * End library-private functions.
+ */
+/******************************************************************************/
+
+#ifdef HAVE_DLOPEN
+# include <dlfcn.h>
+#endif
+
+#if defined(MOZ_MEMORY_DARWIN)
+
+#if !defined(MOZ_REPLACE_MALLOC)
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+ return (malloc_impl(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+ return (calloc_impl(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ posix_memalign_impl(&ret, pagesize, size);
+
+ return (ret);
+}
+
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+{
+ return (memalign_impl(alignment, size));
+}
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+ /* This function should never be called. */
+ assert(false);
+ return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+ return malloc_good_size_impl(size);
+}
+
+static size_t
+ozone_size(malloc_zone_t *zone, void *ptr)
+{
+ size_t ret = isalloc_validate(ptr);
+ if (ret == 0)
+ ret = szone->size(zone, ptr);
+
+ return ret;
+}
+
+static void
+ozone_free(malloc_zone_t *zone, void *ptr)
+{
+ if (isalloc_validate(ptr) != 0)
+ free_impl(ptr);
+ else {
+ size_t size = szone->size(zone, ptr);
+ if (size != 0)
+ (szone->free)(zone, ptr);
+ /* Otherwise we leak. */
+ }
+}
+
+static void *
+ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ size_t oldsize;
+ if (ptr == NULL)
+ return (malloc_impl(size));
+
+ oldsize = isalloc_validate(ptr);
+ if (oldsize != 0)
+ return (realloc_impl(ptr, size));
+ else {
+ oldsize = szone->size(zone, ptr);
+ if (oldsize == 0)
+ return (malloc_impl(size));
+ else {
+ void *ret = malloc_impl(size);
+ if (ret != NULL) {
+ memcpy(ret, ptr, (oldsize < size) ? oldsize :
+ size);
+ (szone->free)(zone, ptr);
+ }
+ return (ret);
+ }
+ }
+}
+
+static unsigned
+ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
+ unsigned num_requested)
+{
+ /* Don't bother implementing this interface, since it isn't required. */
+ return 0;
+}
+
+static void
+ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
+{
+ unsigned i;
+
+ for (i = 0; i < num; i++)
+ ozone_free(zone, to_be_freed[i]);
+}
+
+static void
+ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ if (isalloc_validate(ptr) != 0) {
+ assert(isalloc_validate(ptr) == size);
+ free_impl(ptr);
+ } else {
+ assert(size == szone->size(zone, ptr));
+ l_szone.m16(zone, ptr, size);
+ }
+}
+
+static void
+ozone_force_lock(malloc_zone_t *zone)
+{
+ _malloc_prefork();
+ szone->introspect->force_lock(zone);
+}
+
+static void
+ozone_force_unlock(malloc_zone_t *zone)
+{
+ szone->introspect->force_unlock(zone);
+ _malloc_postfork();
+}
+
+static size_t
+zone_version_size(int version)
+{
+ switch (version)
+ {
+ case SNOW_LEOPARD_MALLOC_ZONE_T_VERSION:
+ return sizeof(snow_leopard_malloc_zone);
+ case LEOPARD_MALLOC_ZONE_T_VERSION:
+ return sizeof(leopard_malloc_zone);
+ default:
+ case LION_MALLOC_ZONE_T_VERSION:
+ return sizeof(lion_malloc_zone);
+ }
+}
+
+/*
+ * Overlay the default scalable zone (szone) such that existing allocations are
+ * drained, and further allocations come from jemalloc. This is necessary
+ * because Core Foundation directly accesses and uses the szone before the
+ * jemalloc library is even loaded.
+ */
+static void
+szone2ozone(malloc_zone_t *default_zone, size_t size)
+{
+ lion_malloc_zone *l_zone;
+ assert(malloc_initialized);
+
+ /*
+ * Stash a copy of the original szone so that we can call its
+ * functions as needed. Note that internally, the szone stores its
+ * bookkeeping data structures immediately following the malloc_zone_t
+ * header, so when calling szone functions, we need to pass a pointer to
+ * the original zone structure.
+ */
+ memcpy(szone, default_zone, size);
+
+ /* OSX 10.7 allocates the default zone in protected memory. */
+ if (default_zone->version >= LION_MALLOC_ZONE_T_VERSION) {
+ void* start_of_page = (void*)((size_t)(default_zone) & ~pagesize_mask);
+ mprotect (start_of_page, size, PROT_READ | PROT_WRITE);
+ }
+
+ default_zone->size = (void *)ozone_size;
+ default_zone->malloc = (void *)zone_malloc;
+ default_zone->calloc = (void *)zone_calloc;
+ default_zone->valloc = (void *)zone_valloc;
+ default_zone->free = (void *)ozone_free;
+ default_zone->realloc = (void *)ozone_realloc;
+ default_zone->destroy = (void *)zone_destroy;
+ default_zone->batch_malloc = NULL;
+ default_zone->batch_free = ozone_batch_free;
+ default_zone->introspect = ozone_introspect;
+
+ /* Don't modify default_zone->zone_name; Mac libc may rely on the name
+ * being unchanged. See Mozilla bug 694896. */
+
+ ozone_introspect->enumerator = NULL;
+ ozone_introspect->good_size = (void *)zone_good_size;
+ ozone_introspect->check = NULL;
+ ozone_introspect->print = NULL;
+ ozone_introspect->log = NULL;
+ ozone_introspect->force_lock = (void *)ozone_force_lock;
+ ozone_introspect->force_unlock = (void *)ozone_force_unlock;
+ ozone_introspect->statistics = NULL;
+
+ /* Platform-dependent structs */
+ l_zone = (lion_malloc_zone*)(default_zone);
+
+ if (default_zone->version >= SNOW_LEOPARD_MALLOC_ZONE_T_VERSION) {
+ l_zone->m15 = (void (*)())zone_memalign;
+ l_zone->m16 = (void (*)())ozone_free_definite_size;
+ l_ozone_introspect.m9 = NULL;
+ }
+
+ if (default_zone->version >= LION_MALLOC_ZONE_T_VERSION) {
+ l_zone->m17 = NULL;
+ l_ozone_introspect.m10 = NULL;
+ l_ozone_introspect.m11 = NULL;
+ l_ozone_introspect.m12 = NULL;
+ l_ozone_introspect.m13 = NULL;
+ }
+}
+#endif
+
+__attribute__((constructor))
+void
+jemalloc_darwin_init(void)
+{
+ if (malloc_init_hard())
+ abort();
+}
+
+#endif
+
+/*
+ * is_malloc(malloc_impl) is some macro magic to detect if malloc_impl is
+ * defined as "malloc" in mozmemory_wrap.h
+ */
+#define malloc_is_malloc 1
+#define is_malloc_(a) malloc_is_ ## a
+#define is_malloc(a) is_malloc_(a)
+
+#if !defined(MOZ_MEMORY_DARWIN) && (is_malloc(malloc_impl) == 1)
+# if defined(__GLIBC__) && !defined(__UCLIBC__)
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (bug 493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+MOZ_MEMORY_API void (*__free_hook)(void *ptr) = free_impl;
+MOZ_MEMORY_API void *(*__malloc_hook)(size_t size) = malloc_impl;
+MOZ_MEMORY_API void *(*__realloc_hook)(void *ptr, size_t size) = realloc_impl;
+MOZ_MEMORY_API void *(*__memalign_hook)(size_t alignment, size_t size) = MEMALIGN;
+
+# elif defined(RTLD_DEEPBIND)
+/*
+ * XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
+ * implementations permit similar inconsistencies? Should STV_SINGLETON
+ * visibility be used for interposition where available?
+ */
+# error "Interposing malloc is unsafe on this system without libc malloc hooks."
+# endif
+#endif
+
+#ifdef MOZ_MEMORY_WINDOWS
+/*
+ * In the new style jemalloc integration jemalloc is built as a separate
+ * shared library. Since we're no longer hooking into the CRT binary,
+ * we need to initialize the heap at the first opportunity we get.
+ * DLL_PROCESS_ATTACH in DllMain is that opportunity.
+ */
+BOOL APIENTRY DllMain(HINSTANCE hModule,
+ DWORD reason,
+ LPVOID lpReserved)
+{
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+ /* Don't force the system to page DllMain back in every time
+ * we create/destroy a thread */
+ DisableThreadLibraryCalls(hModule);
+ /* Initialize the heap */
+ malloc_init_hard();
+ break;
+
+ case DLL_PROCESS_DETACH:
+ break;
+
+ }
+
+ return TRUE;
+}
+#endif
diff --git a/memory/mozjemalloc/jemalloc_types.h b/memory/mozjemalloc/jemalloc_types.h
new file mode 100644
index 000000000..ae8dc4414
--- /dev/null
+++ b/memory/mozjemalloc/jemalloc_types.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _JEMALLOC_TYPES_H_
+#define _JEMALLOC_TYPES_H_
+
+/* grab size_t */
+#ifdef _MSC_VER
+#include <crtdefs.h>
+#else
+#include <stddef.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned char jemalloc_bool;
+
+/*
+ * jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be
+ * sure that the compiled results of jemalloc.c are in sync with this header
+ * file.
+ */
+typedef struct {
+ /*
+ * Run-time configuration settings.
+ */
+ jemalloc_bool opt_abort; /* abort(3) on error? */
+ jemalloc_bool opt_junk; /* Fill allocated memory with 0xe4? */
+ jemalloc_bool opt_poison; /* Fill free memory with 0xe5? */
+ jemalloc_bool opt_utrace; /* Trace all allocation events? */
+ jemalloc_bool opt_sysv; /* SysV semantics? */
+ jemalloc_bool opt_xmalloc; /* abort(3) on OOM? */
+ jemalloc_bool opt_zero; /* Fill allocated memory with 0x0? */
+ size_t narenas; /* Number of arenas. */
+ size_t balance_threshold; /* Arena contention rebalance threshold. */
+ size_t quantum; /* Allocation quantum. */
+ size_t small_max; /* Max quantum-spaced allocation size. */
+ size_t large_max; /* Max sub-chunksize allocation size. */
+ size_t chunksize; /* Size of each virtual memory mapping. */
+ size_t dirty_max; /* Max dirty pages per arena. */
+
+ /*
+ * Current memory usage statistics.
+ */
+ size_t mapped; /* Bytes mapped (not necessarily committed). */
+ size_t allocated; /* Bytes allocated (committed, in use by application). */
+ size_t waste; /* Bytes committed, not in use by the
+ application, and not intentionally left
+ unused (i.e., not dirty). */
+ size_t page_cache; /* Committed, unused pages kept around as a
+ cache. (jemalloc calls these "dirty".) */
+ size_t bookkeeping; /* Committed bytes used internally by the
+ allocator. */
+ size_t bin_unused; /* Bytes committed to a bin but currently unused. */
+} jemalloc_stats_t;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* _JEMALLOC_TYPES_H_ */
diff --git a/memory/mozjemalloc/linkedlist.h b/memory/mozjemalloc/linkedlist.h
new file mode 100644
index 000000000..d75531410
--- /dev/null
+++ b/memory/mozjemalloc/linkedlist.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8; indent-tabs-mode: t -*- */
+/* vim:set softtabstop=8 shiftwidth=8 noet: */
+/*-
+ * Copyright (C) the Mozilla Foundation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *******************************************************************************/
+
+#ifndef linkedlist_h__
+#define linkedlist_h__
+
+#include <stddef.h>
+
+typedef struct LinkedList_s LinkedList;
+
+struct LinkedList_s {
+ LinkedList *next;
+ LinkedList *prev;
+};
+
+/* Convert from LinkedList* to foo*. */
+#define LinkedList_Get(e, type, prop) \
+ (type*)((char*)(e) - offsetof(type, prop))
+
+/* Insert |e| at the beginning of |l|. */
+void LinkedList_InsertHead(LinkedList *l, LinkedList *e)
+{
+ e->next = l;
+ e->prev = l->prev;
+ e->next->prev = e;
+ e->prev->next = e;
+}
+
+void LinkedList_Remove(LinkedList *e)
+{
+ e->prev->next = e->next;
+ e->next->prev = e->prev;
+ e->next = e;
+ e->prev = e;
+}
+
+bool LinkedList_IsEmpty(LinkedList *e)
+{
+ return e->next == e;
+}
+
+void LinkedList_Init(LinkedList *e)
+{
+ e->next = e;
+ e->prev = e;
+}
+
+#endif
diff --git a/memory/mozjemalloc/moz.build b/memory/mozjemalloc/moz.build
new file mode 100644
index 000000000..0660d1300
--- /dev/null
+++ b/memory/mozjemalloc/moz.build
@@ -0,0 +1,44 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'jemalloc_types.h',
+]
+
+if not CONFIG['MOZ_JEMALLOC4']:
+ SOURCES += [
+ 'jemalloc.c',
+ ]
+ FINAL_LIBRARY = 'memory'
+
+ # See bug 422055.
+ if CONFIG['OS_ARCH'] == 'SunOS' and not CONFIG['GNU_CC'] \
+ and CONFIG['MOZ_OPTIMIZE']:
+ CFLAGS += ['-xO5']
+
+# For non release/esr builds, enable (some) fatal jemalloc assertions. This
+# helps us catch memory errors.
+if CONFIG['MOZ_UPDATE_CHANNEL'] not in ('release', 'esr'):
+ DEFINES['MOZ_JEMALLOC_HARD_ASSERTS'] = True
+
+DEFINES['abort'] = 'moz_abort'
+
+DEFINES['MOZ_JEMALLOC_IMPL'] = True
+
+#XXX: PGO on Linux causes problems here
+# See bug 419470
+if CONFIG['OS_TARGET'] == 'Linux':
+ NO_PGO = True
+
+LOCAL_INCLUDES += [
+ '/memory/build',
+]
+
+if CONFIG['GNU_CC']:
+ CFLAGS += ['-Wno-unused'] # too many annoying warnings from mfbt/ headers
+
+if CONFIG['_MSC_VER']:
+ CFLAGS += ['-wd4273'] # inconsistent dll linkage (bug 558163)
diff --git a/memory/mozjemalloc/osx_zone_types.h b/memory/mozjemalloc/osx_zone_types.h
new file mode 100644
index 000000000..097953590
--- /dev/null
+++ b/memory/mozjemalloc/osx_zone_types.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The purpose of these structs is described in jemalloc.c, in the comment
+ * marked MALLOC_ZONE_T_NOTE.
+ *
+ * We need access to some structs that come with a specific version of OSX
+ * but can't copy them here because of licensing restrictions (see bug
+ * 603655). The structs below are equivalent in that they'll always be
+ * compiled to the same representation on all platforms.
+ *
+ * `void*` and `void (*)()` may not be the same size on weird platforms, but
+ * the size of a function pointer shouldn't vary according to its parameters
+ * or return type.
+ *
+ * Apple's version of these structures, complete with member names and
+ * comments, is available online at
+ *
+ * http://www.opensource.apple.com/source/Libc/Libc-763.12/include/malloc/malloc.h
+ *
+ */
+
+/*
+ * OSX 10.5 - Leopard
+ */
+typedef struct _leopard_malloc_zone {
+ void *m1;
+ void *m2;
+ void (*m3)();
+ void (*m4)();
+ void (*m5)();
+ void (*m6)();
+ void (*m7)();
+ void (*m8)();
+ void (*m9)();
+ void *m10;
+ void (*m11)();
+ void (*m12)();
+ void *m13;
+ unsigned m14;
+} leopard_malloc_zone;
+
+/*
+ * OSX 10.6 - Snow Leopard
+ */
+typedef struct _snow_leopard_malloc_zone {
+ void *m1;
+ void *m2;
+ void (*m3)();
+ void (*m4)();
+ void (*m5)();
+ void (*m6)();
+ void (*m7)();
+ void (*m8)();
+ void (*m9)();
+ void *m10;
+ void (*m11)();
+ void (*m12)();
+ void *m13;
+ unsigned m14;
+ void (*m15)(); // this member added in 10.6
+ void (*m16)(); // this member added in 10.6
+} snow_leopard_malloc_zone;
+
+typedef struct _snow_leopard_malloc_introspection {
+ void (*m1)();
+ void (*m2)();
+ void (*m3)();
+ void (*m4)();
+ void (*m5)();
+ void (*m6)();
+ void (*m7)();
+ void (*m8)();
+ void (*m9)(); // this member added in 10.6
+} snow_leopard_malloc_introspection;
+
+/*
+ * OSX 10.7 - Lion
+ */
+typedef struct _lion_malloc_zone {
+ void *m1;
+ void *m2;
+ void (*m3)();
+ void (*m4)();
+ void (*m5)();
+ void (*m6)();
+ void (*m7)();
+ void (*m8)();
+ void (*m9)();
+ void *m10;
+ void (*m11)();
+ void (*m12)();
+ void *m13;
+ unsigned m14;
+ void (*m15)();
+ void (*m16)();
+ void (*m17)(); // this member added in 10.7
+} lion_malloc_zone;
+
+typedef struct _lion_malloc_introspection {
+ void (*m1)();
+ void (*m2)();
+ void (*m3)();
+ void (*m4)();
+ void (*m5)();
+ void (*m6)();
+ void (*m7)();
+ void (*m8)();
+ void (*m9)();
+ void (*m10)(); // this member added in 10.7
+ void (*m11)(); // this member added in 10.7
+ void (*m12)(); // this member added in 10.7
+#ifdef __BLOCKS__
+ void (*m13)(); // this member added in 10.7
+#else
+ void *m13; // this member added in 10.7
+#endif
+} lion_malloc_introspection;
diff --git a/memory/mozjemalloc/ql.h b/memory/mozjemalloc/ql.h
new file mode 100644
index 000000000..000cd2380
--- /dev/null
+++ b/memory/mozjemalloc/ql.h
@@ -0,0 +1,114 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer
+ * unmodified other than the allowable addition of one or more
+ * copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+/*
+ * List definitions.
+ */
+#define ql_head(a_type) \
+struct { \
+ a_type *qlh_first; \
+}
+
+#define ql_head_initializer(a_head) {NULL}
+
+#define ql_elm(a_type) qr(a_type)
+
+/* List functions. */
+#define ql_new(a_head) do { \
+ (a_head)->qlh_first = NULL; \
+} while (0)
+
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+
+#define ql_first(a_head) ((a_head)->qlh_first)
+
+#define ql_last(a_head, a_field) \
+ ((ql_first(a_head) != NULL) \
+ ? qr_prev(ql_first(a_head), a_field) : NULL)
+
+#define ql_next(a_head, a_elm, a_field) \
+ ((ql_last(a_head, a_field) != (a_elm)) \
+ ? qr_next((a_elm), a_field) : NULL)
+
+#define ql_prev(a_head, a_elm, a_field) \
+ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
+ : NULL)
+
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+ qr_before_insert((a_qlelm), (a_elm), a_field); \
+ if (ql_first(a_head) == (a_qlelm)) { \
+ ql_first(a_head) = (a_elm); \
+ } \
+} while (0)
+
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
+ qr_after_insert((a_qlelm), (a_elm), a_field)
+
+#define ql_head_insert(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = (a_elm); \
+} while (0)
+
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = qr_next((a_elm), a_field); \
+} while (0)
+
+#define ql_remove(a_head, a_elm, a_field) do { \
+ if (ql_first(a_head) == (a_elm)) { \
+ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
+ } \
+ if (ql_first(a_head) != (a_elm)) { \
+ qr_remove((a_elm), a_field); \
+ } else { \
+ ql_first(a_head) = NULL; \
+ } \
+} while (0)
+
+#define ql_head_remove(a_head, a_type, a_field) do { \
+ a_type *t = ql_first(a_head); \
+ ql_remove((a_head), t, a_field); \
+} while (0)
+
+#define ql_tail_remove(a_head, a_type, a_field) do { \
+ a_type *t = ql_last(a_head, a_field); \
+ ql_remove((a_head), t, a_field); \
+} while (0)
+
+#define ql_foreach(a_var, a_head, a_field) \
+ qr_foreach((a_var), ql_first(a_head), a_field)
+
+#define ql_reverse_foreach(a_var, a_head, a_field) \
+ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/memory/mozjemalloc/qr.h b/memory/mozjemalloc/qr.h
new file mode 100644
index 000000000..ee60491d7
--- /dev/null
+++ b/memory/mozjemalloc/qr.h
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer
+ * unmodified other than the allowable addition of one or more
+ * copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+/* Ring definitions. */
+#define qr(a_type) \
+struct { \
+ a_type *qre_next; \
+ a_type *qre_prev; \
+}
+
+/* Ring functions. */
+#define qr_new(a_qr, a_field) do { \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qrelm); \
+ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
+ (a_qrelm)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
+ do \
+ { \
+ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
+ (a_qr)->a_field.qre_prev = (a_qrelm); \
+ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
+ (a_qrelm)->a_field.qre_next = (a_qr); \
+ } while (0)
+
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+ void *t; \
+ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
+ t = (a_qr_a)->a_field.qre_prev; \
+ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
+ (a_qr_b)->a_field.qre_prev = t; \
+} while (0)
+
+/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code. */
+#define qr_split(a_qr_a, a_qr_b, a_field) \
+ qr_meld((a_qr_a), (a_qr_b), a_field)
+
+#define qr_remove(a_qr, a_field) do { \
+ (a_qr)->a_field.qre_prev->a_field.qre_next \
+ = (a_qr)->a_field.qre_next; \
+ (a_qr)->a_field.qre_next->a_field.qre_prev \
+ = (a_qr)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
+} while (0)
+
+#define qr_foreach(var, a_qr, a_field) \
+ for ((var) = (a_qr); \
+ (var) != NULL; \
+ (var) = (((var)->a_field.qre_next != (a_qr)) \
+ ? (var)->a_field.qre_next : NULL))
+
+#define qr_reverse_foreach(var, a_qr, a_field) \
+ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
+ (var) != NULL; \
+ (var) = (((var) != (a_qr)) \
+ ? (var)->a_field.qre_prev : NULL))
diff --git a/memory/mozjemalloc/rb.h b/memory/mozjemalloc/rb.h
new file mode 100644
index 000000000..43d8569d0
--- /dev/null
+++ b/memory/mozjemalloc/rb.h
@@ -0,0 +1,982 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer
+ * unmodified other than the allowable addition of one or more
+ * copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************
+ *
+ * cpp macro implementation of left-leaning red-black trees.
+ *
+ * Usage:
+ *
+ * (Optional.)
+ * #define SIZEOF_PTR ...
+ * #define SIZEOF_PTR_2POW ...
+ * #define RB_NO_C99_VARARRAYS
+ *
+ * (Optional, see assert(3).)
+ * #define NDEBUG
+ *
+ * (Required.)
+ * #include <assert.h>
+ * #include <rb.h>
+ * ...
+ *
+ * All operations are done non-recursively. Parent pointers are not used, and
+ * color bits are stored in the least significant bit of right-child pointers,
+ * thus making node linkage as compact as is possible for red-black trees.
+ *
+ * Some macros use a comparison function pointer, which is expected to have the
+ * following prototype:
+ *
+ * int (a_cmp *)(a_type *a_node, a_type *a_other);
+ * ^^^^^^
+ * or a_key
+ *
+ * Interpretation of comparision function return values:
+ *
+ * -1 : a_node < a_other
+ * 0 : a_node == a_other
+ * 1 : a_node > a_other
+ *
+ * In all cases, the a_node or a_key macro argument is the first argument to the
+ * comparison function, which makes it possible to write comparison functions
+ * that treat the first argument specially.
+ *
+ ******************************************************************************/
+
+#ifndef RB_H_
+#define RB_H_
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 178995 2008-05-14 18:33:13Z jasone $");
+#endif
+
+/* Node structure. */
+#define rb_node(a_type) \
+struct { \
+ a_type *rbn_left; \
+ a_type *rbn_right_red; \
+}
+
+/* Root structure. */
+#define rb_tree(a_type) \
+struct { \
+ a_type *rbt_root; \
+ a_type rbt_nil; \
+}
+
+/* Left accessors. */
+#define rbp_left_get(a_type, a_field, a_node) \
+ ((a_node)->a_field.rbn_left)
+#define rbp_left_set(a_type, a_field, a_node, a_left) do { \
+ (a_node)->a_field.rbn_left = a_left; \
+} while (0)
+
+/* Right accessors. */
+#define rbp_right_get(a_type, a_field, a_node) \
+ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((ssize_t)-2)))
+#define rbp_right_set(a_type, a_field, a_node, a_right) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
+ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
+} while (0)
+
+/* Color accessors. */
+#define rbp_red_get(a_type, a_field, a_node) \
+ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((size_t)1)))
+#define rbp_color_set(a_type, a_field, a_node, a_red) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
+ | ((ssize_t)a_red)); \
+} while (0)
+#define rbp_red_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
+ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
+} while (0)
+#define rbp_black_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
+} while (0)
+
+/* Node initializer. */
+#define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
+ rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
+ rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+/* Tree initializer. */
+#define rb_new(a_type, a_field, a_tree) do { \
+ (a_tree)->rbt_root = &(a_tree)->rbt_nil; \
+ rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil); \
+} while (0)
+
+/* Tree operations. */
+#define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0; \
+ rbp_bh_t != &(a_tree)->rbt_nil; \
+ rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) { \
+ if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+#define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
+ for ((r_node) = (a_root); \
+ rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
+ (r_node) = rbp_left_get(a_type, a_field, (r_node))) { \
+ } \
+} while (0)
+
+#define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
+ for ((r_node) = (a_root); \
+ rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
+ (r_node) = rbp_right_get(a_type, a_field, (r_node))) { \
+ } \
+} while (0)
+
+#define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ if (rbp_right_get(a_type, a_field, (a_node)) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type, \
+ a_field, (a_node)), (r_node)); \
+ } else { \
+ a_type *rbp_n_t = (a_tree)->rbt_root; \
+ assert(rbp_n_t != &(a_tree)->rbt_nil); \
+ (r_node) = &(a_tree)->rbt_nil; \
+ while (true) { \
+ int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t); \
+ if (rbp_n_cmp < 0) { \
+ (r_node) = rbp_n_t; \
+ rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t); \
+ } else if (rbp_n_cmp > 0) { \
+ rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_n_t != &(a_tree)->rbt_nil); \
+ } \
+ } \
+} while (0)
+
+#define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
+ rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type, \
+ a_field, (a_node)), (r_node)); \
+ } else { \
+ a_type *rbp_p_t = (a_tree)->rbt_root; \
+ assert(rbp_p_t != &(a_tree)->rbt_nil); \
+ (r_node) = &(a_tree)->rbt_nil; \
+ while (true) { \
+ int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t); \
+ if (rbp_p_cmp < 0) { \
+ rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t); \
+ } else if (rbp_p_cmp > 0) { \
+ (r_node) = rbp_p_t; \
+ rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_p_t != &(a_tree)->rbt_nil); \
+ } \
+ } \
+} while (0)
+
+#define rb_first(a_type, a_field, a_tree, r_node) do { \
+ rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_last(a_type, a_field, a_tree, r_node) do { \
+ rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ int rbp_se_cmp; \
+ (r_node) = (a_tree)->rbt_root; \
+ while ((r_node) != &(a_tree)->rbt_nil \
+ && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) { \
+ if (rbp_se_cmp < 0) { \
+ (r_node) = rbp_left_get(a_type, a_field, (r_node)); \
+ } else { \
+ (r_node) = rbp_right_get(a_type, a_field, (r_node)); \
+ } \
+ } \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+/*
+ * Find a match if it exists. Otherwise, find the next greater node, if one
+ * exists.
+ */
+#define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ a_type *rbp_ns_t = (a_tree)->rbt_root; \
+ (r_node) = NULL; \
+ while (rbp_ns_t != &(a_tree)->rbt_nil) { \
+ int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t); \
+ if (rbp_ns_cmp < 0) { \
+ (r_node) = rbp_ns_t; \
+ rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t); \
+ } else if (rbp_ns_cmp > 0) { \
+ rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t); \
+ } else { \
+ (r_node) = rbp_ns_t; \
+ break; \
+ } \
+ } \
+} while (0)
+
+/*
+ * Find a match if it exists. Otherwise, find the previous lesser node, if one
+ * exists.
+ */
+#define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ a_type *rbp_ps_t = (a_tree)->rbt_root; \
+ (r_node) = NULL; \
+ while (rbp_ps_t != &(a_tree)->rbt_nil) { \
+ int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t); \
+ if (rbp_ps_cmp < 0) { \
+ rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t); \
+ } else if (rbp_ps_cmp > 0) { \
+ (r_node) = rbp_ps_t; \
+ rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t); \
+ } else { \
+ (r_node) = rbp_ps_t; \
+ break; \
+ } \
+ } \
+} while (0)
+
+#define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbp_right_get(a_type, a_field, (a_node)); \
+ rbp_right_set(a_type, a_field, (a_node), \
+ rbp_left_get(a_type, a_field, (r_node))); \
+ rbp_left_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+#define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbp_left_get(a_type, a_field, (a_node)); \
+ rbp_left_set(a_type, a_field, (a_node), \
+ rbp_right_get(a_type, a_field, (r_node))); \
+ rbp_right_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+#define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
+ bool rbp_ll_red; \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ rbp_ll_red = rbp_red_get(a_type, a_field, (a_node)); \
+ rbp_color_set(a_type, a_field, (r_node), rbp_ll_red); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+#define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
+ bool rbp_lr_red; \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_lr_red = rbp_red_get(a_type, a_field, (a_node)); \
+ rbp_color_set(a_type, a_field, (r_node), rbp_lr_red); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+#define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
+ a_type *rbp_mrl_t, *rbp_mrl_u; \
+ rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node)); \
+ rbp_red_set(a_type, a_field, rbp_mrl_t); \
+ rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
+ rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t); \
+ if (rbp_red_get(a_type, a_field, rbp_mrl_u)) { \
+ rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u); \
+ rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u); \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
+ if (rbp_red_get(a_type, a_field, rbp_mrl_t)) { \
+ rbp_black_set(a_type, a_field, rbp_mrl_t); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t); \
+ rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t); \
+ } else { \
+ rbp_black_set(a_type, a_field, (a_node)); \
+ } \
+ } else { \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ } \
+} while (0)
+
+#define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
+ a_type *rbp_mrr_t; \
+ rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node)); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
+ a_type *rbp_mrr_u, *rbp_mrr_v; \
+ rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t); \
+ rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_v)) { \
+ rbp_color_set(a_type, a_field, rbp_mrr_u, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ rbp_black_set(a_type, a_field, rbp_mrr_v); \
+ rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u); \
+ rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } else { \
+ rbp_color_set(a_type, a_field, rbp_mrr_t, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ rbp_red_set(a_type, a_field, rbp_mrr_u); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ } else { \
+ rbp_red_set(a_type, a_field, rbp_mrr_t); \
+ rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
+ rbp_black_set(a_type, a_field, rbp_mrr_t); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } else { \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ } \
+ } \
+} while (0)
+
+#define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
+ a_type rbp_i_s; \
+ a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u; \
+ int rbp_i_cmp = 0; \
+ rbp_i_g = &(a_tree)->rbt_nil; \
+ rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root); \
+ rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &rbp_i_s); \
+ rbp_i_p = &rbp_i_s; \
+ rbp_i_c = (a_tree)->rbt_root; \
+ /* Iteratively search down the tree for the insertion point, */\
+ /* splitting 4-nodes as they are encountered. At the end of each */\
+ /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down */\
+ /* the tree, assuming a sufficiently deep tree. */\
+ while (rbp_i_c != &(a_tree)->rbt_nil) { \
+ rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c); \
+ rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
+ if (rbp_red_get(a_type, a_field, rbp_i_t) \
+ && rbp_red_get(a_type, a_field, rbp_i_u)) { \
+ /* rbp_i_c is the top of a logical 4-node, so split it. */\
+ /* This iteration does not move down the tree, due to the */\
+ /* disruptiveness of node splitting. */\
+ /* */\
+ /* Rotate right. */\
+ rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t); \
+ /* Pass red links up one level. */\
+ rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
+ rbp_black_set(a_type, a_field, rbp_i_u); \
+ if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) { \
+ rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t); \
+ rbp_i_c = rbp_i_t; \
+ } else { \
+ /* rbp_i_c was the right child of rbp_i_p, so rotate */\
+ /* left in order to maintain the left-leaning */\
+ /* invariant. */\
+ assert(rbp_right_get(a_type, a_field, rbp_i_p) \
+ == rbp_i_c); \
+ rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t); \
+ rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u); \
+ if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+ rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, rbp_i_g) \
+ == rbp_i_p); \
+ rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u); \
+ } \
+ rbp_i_p = rbp_i_u; \
+ rbp_i_cmp = (a_cmp)((a_node), rbp_i_p); \
+ if (rbp_i_cmp < 0) { \
+ rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p); \
+ } else { \
+ assert(rbp_i_cmp > 0); \
+ rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p); \
+ } \
+ continue; \
+ } \
+ } \
+ rbp_i_g = rbp_i_p; \
+ rbp_i_p = rbp_i_c; \
+ rbp_i_cmp = (a_cmp)((a_node), rbp_i_c); \
+ if (rbp_i_cmp < 0) { \
+ rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c); \
+ } else { \
+ assert(rbp_i_cmp > 0); \
+ rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c); \
+ } \
+ } \
+ /* rbp_i_p now refers to the node under which to insert. */\
+ rbp_node_new(a_type, a_field, a_tree, (a_node)); \
+ if (rbp_i_cmp > 0) { \
+ rbp_right_set(a_type, a_field, rbp_i_p, (a_node)); \
+ rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t); \
+ if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) { \
+ rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t); \
+ } else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+ rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t); \
+ } \
+ } else { \
+ rbp_left_set(a_type, a_field, rbp_i_p, (a_node)); \
+ } \
+ /* Update the root and make sure that it is black. */\
+ (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s); \
+ rbp_black_set(a_type, a_field, (a_tree)->rbt_root); \
+} while (0)
+
+#define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
+ a_type rbp_r_s; \
+ a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u; \
+ int rbp_r_cmp; \
+ rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root); \
+ rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &rbp_r_s); \
+ rbp_r_p = &rbp_r_s; \
+ rbp_r_c = (a_tree)->rbt_root; \
+ rbp_r_xp = &(a_tree)->rbt_nil; \
+ /* Iterate down the tree, but always transform 2-nodes to 3- or */\
+ /* 4-nodes in order to maintain the invariant that the current */\
+ /* node is not a 2-node. This allows simple deletion once a leaf */\
+ /* is reached. Handle the root specially though, since there may */\
+ /* be no way to convert it from a 2-node to a 3-node. */\
+ rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
+ if (rbp_r_cmp < 0) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
+ && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ /* Apply standard transform to prepare for left move. */\
+ rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t); \
+ rbp_black_set(a_type, a_field, rbp_r_t); \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ /* Move left. */\
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
+ } \
+ } else { \
+ if (rbp_r_cmp == 0) { \
+ assert((a_node) == rbp_r_c); \
+ if (rbp_right_get(a_type, a_field, rbp_r_c) \
+ == &(a_tree)->rbt_nil) { \
+ /* Delete root node (which is also a leaf node). */\
+ if (rbp_left_get(a_type, a_field, rbp_r_c) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ rbp_r_t = &(a_tree)->rbt_nil; \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ } else { \
+ /* This is the node we want to delete, but we will */\
+ /* instead swap it with its successor and delete the */\
+ /* successor. Record enough information to do the */\
+ /* swap later. rbp_r_xp is the a_node's parent. */\
+ rbp_r_xp = rbp_r_p; \
+ rbp_r_cmp = 1; /* Note that deletion is incomplete. */\
+ } \
+ } \
+ if (rbp_r_cmp == 1) { \
+ if (rbp_red_get(a_type, a_field, rbp_left_get(a_type, \
+ a_field, rbp_right_get(a_type, a_field, rbp_r_c))) \
+ == false) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t)) { \
+ /* Standard transform. */\
+ rbp_move_red_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ } else { \
+ /* Root-specific transform. */\
+ rbp_red_set(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_u)) { \
+ rbp_black_set(a_type, a_field, rbp_r_u); \
+ rbp_rotate_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ rbp_rotate_left(a_type, a_field, rbp_r_c, \
+ rbp_r_u); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ rbp_r_u); \
+ } else { \
+ rbp_red_set(a_type, a_field, rbp_r_t); \
+ rbp_rotate_left(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ } \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ /* Move right. */\
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
+ } \
+ } \
+ } \
+ if (rbp_r_cmp != 0) { \
+ while (true) { \
+ assert(rbp_r_p != &(a_tree)->rbt_nil); \
+ rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
+ if (rbp_r_cmp < 0) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ if (rbp_r_t == &(a_tree)->rbt_nil) { \
+ /* rbp_r_c now refers to the successor node to */\
+ /* relocate, and rbp_r_xp/a_node refer to the */\
+ /* context for the relocation. */\
+ if (rbp_left_get(a_type, a_field, rbp_r_xp) \
+ == (a_node)) { \
+ rbp_left_set(a_type, a_field, rbp_r_xp, \
+ rbp_r_c); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, \
+ rbp_r_xp) == (a_node)); \
+ rbp_right_set(a_type, a_field, rbp_r_xp, \
+ rbp_r_c); \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_c, \
+ rbp_left_get(a_type, a_field, (a_node))); \
+ rbp_right_set(a_type, a_field, rbp_r_c, \
+ rbp_right_get(a_type, a_field, (a_node))); \
+ rbp_color_set(a_type, a_field, rbp_r_c, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c); \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ &(a_tree)->rbt_nil); \
+ } \
+ break; \
+ } \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
+ && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ rbp_move_red_left(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
+ } \
+ } else { \
+ /* Check whether to delete this node (it has to be */\
+ /* the correct node and a leaf node). */\
+ if (rbp_r_cmp == 0) { \
+ assert((a_node) == rbp_r_c); \
+ if (rbp_right_get(a_type, a_field, rbp_r_c) \
+ == &(a_tree)->rbt_nil) { \
+ /* Delete leaf node. */\
+ if (rbp_left_get(a_type, a_field, rbp_r_c) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_lean_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ rbp_r_t = &(a_tree)->rbt_nil; \
+ } \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ break; \
+ } else { \
+ /* This is the node we want to delete, but we */\
+ /* will instead swap it with its successor */\
+ /* and delete the successor. Record enough */\
+ /* information to do the swap later. */\
+ /* rbp_r_xp is a_node's parent. */\
+ rbp_r_xp = rbp_r_p; \
+ } \
+ } \
+ rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ rbp_move_red_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
+ } \
+ } \
+ } \
+ } \
+ /* Update root. */\
+ (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s); \
+} while (0)
+
+/*
+ * The rb_wrap() macro provides a convenient way to wrap functions around the
+ * cpp macros. The main benefits of wrapping are that 1) repeated macro
+ * expansion can cause code bloat, especially for rb_{insert,remove)(), and
+ * 2) type, linkage, comparison functions, etc. need not be specified at every
+ * call point.
+ */
+
+#define rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_tree_type *tree) { \
+ rb_new(a_type, a_field, tree); \
+} \
+a_attr a_type * \
+a_prefix##first(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_first(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##last(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_last(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##next(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_next(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##prev(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_prev(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##search(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_search(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##nsearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_nsearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##psearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_psearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##insert(a_tree_type *tree, a_type *node) { \
+ rb_insert(a_type, a_field, a_cmp, tree, node); \
+} \
+a_attr void \
+a_prefix##remove(a_tree_type *tree, a_type *node) { \
+ rb_remove(a_type, a_field, a_cmp, tree, node); \
+}
+
+/*
+ * The iterators simulate recursion via an array of pointers that store the
+ * current path. This is critical to performance, since a series of calls to
+ * rb_{next,prev}() would require time proportional to (n lg n), whereas this
+ * implementation only requires time proportional to (n).
+ *
+ * Since the iterators cache a path down the tree, any tree modification may
+ * cause the cached path to become invalid. In order to continue iteration,
+ * use something like the following sequence:
+ *
+ * {
+ * a_type *node, *tnode;
+ *
+ * rb_foreach_begin(a_type, a_field, a_tree, node) {
+ * ...
+ * rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
+ * rb_remove(a_type, a_field, a_cmp, a_tree, node);
+ * rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
+ * ...
+ * } rb_foreach_end(a_type, a_field, a_tree, node)
+ * }
+ *
+ * Note that this idiom is not advised if every iteration modifies the tree,
+ * since in that case there is no algorithmic complexity improvement over a
+ * series of rb_{next,prev}() calls, thus making the setup overhead wasted
+ * effort.
+ */
+
+#ifdef RB_NO_C99_VARARRAYS
+ /*
+ * Avoid using variable-length arrays, at the cost of using more stack space.
+ * Size the path arrays such that they are always large enough, even if a
+ * tree consumes all of memory. Since each node must contain a minimum of
+ * two pointers, there can never be more nodes than:
+ *
+ * 1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
+ *
+ * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
+ * is:
+ *
+ * (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+ *
+ * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
+ * systems, respectively (approximatly 348 and 1440 bytes, respectively).
+ */
+# define rbp_compute_f_height(a_type, a_field, a_tree)
+# define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+# define rbp_compute_fr_height(a_type, a_field, a_tree)
+# define rbp_fr_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+#else
+# define rbp_compute_f_height(a_type, a_field, a_tree) \
+ /* Compute the maximum possible tree depth (3X the black height). */\
+ unsigned rbp_f_height; \
+ rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
+ rbp_f_height *= 3;
+# define rbp_compute_fr_height(a_type, a_field, a_tree) \
+ /* Compute the maximum possible tree depth (3X the black height). */\
+ unsigned rbp_fr_height; \
+ rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
+ rbp_fr_height *= 3;
+#endif
+
+#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
+ rbp_compute_f_height(a_type, a_field, a_tree) \
+ { \
+ /* Initialize the path to contain the left spine. */\
+ a_type *rbp_f_path[rbp_f_height]; \
+ a_type *rbp_f_node; \
+ bool rbp_f_synced = false; \
+ unsigned rbp_f_depth = 0; \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
+ rbp_f_depth++; \
+ while ((rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } \
+ /* While the path is non-empty, iterate. */\
+ while (rbp_f_depth > 0) { \
+ (a_var) = rbp_f_path[rbp_f_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_f_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
+ rbp_f_depth++; \
+ rbp_f_node = rbp_f_path[0]; \
+ while (true) { \
+ int rbp_f_cmp = (a_cmp)((a_node), \
+ rbp_f_path[rbp_f_depth-1]); \
+ if (rbp_f_cmp < 0) { \
+ rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else if (rbp_f_cmp > 0) { \
+ rbp_f_node = rbp_right_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_f_node != &(a_tree)->rbt_nil); \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } \
+ } \
+ rbp_f_synced = true;
+
+#define rb_foreach_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_f_synced) { \
+ rbp_f_synced = false; \
+ continue; \
+ } \
+ /* Find the successor. */\
+ if ((rbp_f_node = rbp_right_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ /* The successor is the left-most node in the right */\
+ /* subtree. */\
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ while ((rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } else { \
+ /* The successor is above the current node. Unwind */\
+ /* until a left-leaning edge is removed from the */\
+ /* path, or the path is empty. */\
+ for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) { \
+ if (rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]) \
+ == rbp_f_path[rbp_f_depth]) { \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ } \
+}
+
+#define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { \
+ rbp_compute_fr_height(a_type, a_field, a_tree) \
+ { \
+ /* Initialize the path to contain the right spine. */\
+ a_type *rbp_fr_path[rbp_fr_height]; \
+ a_type *rbp_fr_node; \
+ bool rbp_fr_synced = false; \
+ unsigned rbp_fr_depth = 0; \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
+ rbp_fr_depth++; \
+ while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } \
+ /* While the path is non-empty, iterate. */\
+ while (rbp_fr_depth > 0) { \
+ (a_var) = rbp_fr_path[rbp_fr_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_fr_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
+ rbp_fr_depth++; \
+ rbp_fr_node = rbp_fr_path[0]; \
+ while (true) { \
+ int rbp_fr_cmp = (a_cmp)((a_node), \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ if (rbp_fr_cmp < 0) { \
+ rbp_fr_node = rbp_left_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else if (rbp_fr_cmp > 0) { \
+ rbp_fr_node = rbp_right_get(a_type, a_field,\
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_fr_node != &(a_tree)->rbt_nil); \
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } \
+ } \
+ rbp_fr_synced = true;
+
+#define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_fr_synced) { \
+ rbp_fr_synced = false; \
+ continue; \
+ } \
+ if (rbp_fr_depth == 0) { \
+ /* rb_foreach_reverse_sync() was called with a NULL */\
+ /* a_node. */\
+ break; \
+ } \
+ /* Find the predecessor. */\
+ if ((rbp_fr_node = rbp_left_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
+ /* The predecessor is the right-most node in the left */\
+ /* subtree. */\
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } else { \
+ /* The predecessor is above the current node. Unwind */\
+ /* until a right-leaning edge is removed from the */\
+ /* path, or the path is empty. */\
+ for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
+ if (rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1]) \
+ == rbp_fr_path[rbp_fr_depth]) { \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ } \
+}
+
+#endif /* RB_H_ */
diff --git a/memory/replace/dmd/DMD.cpp b/memory/replace/dmd/DMD.cpp
new file mode 100644
index 000000000..49eb27970
--- /dev/null
+++ b/memory/replace/dmd/DMD.cpp
@@ -0,0 +1,2122 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if !defined(MOZ_PROFILING)
+#error "DMD requires MOZ_PROFILING"
+#endif
+
+#ifdef XP_WIN
+#include <windows.h>
+#include <process.h>
+#else
+#include <unistd.h>
+#endif
+
+#ifdef ANDROID
+#include <android/log.h>
+#endif
+
+#include "nscore.h"
+#include "mozilla/StackWalk.h"
+
+#include "js/HashTable.h"
+#include "js/Vector.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FastBernoulliTrial.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/JSONWriter.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MemoryReporting.h"
+
+// CodeAddressService is defined entirely in the header, so this does not make
+// DMD depend on XPCOM's object file.
+#include "CodeAddressService.h"
+
+// replace_malloc.h needs to be included before replace_malloc_bridge.h,
+// which DMD.h includes, so DMD.h needs to be included after replace_malloc.h.
+// MOZ_REPLACE_ONLY_MEMALIGN saves us from having to define
+// replace_{posix_memalign,aligned_alloc,valloc}. It requires defining
+// PAGE_SIZE. Nb: sysconf() is expensive, but it's only used for (the obsolete
+// and rarely used) valloc.
+#define MOZ_REPLACE_ONLY_MEMALIGN 1
+
+#ifndef PAGE_SIZE
+#define DMD_DEFINED_PAGE_SIZE
+#ifdef XP_WIN
+#define PAGE_SIZE GetPageSize()
+static long GetPageSize()
+{
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+}
+#else // XP_WIN
+#define PAGE_SIZE sysconf(_SC_PAGESIZE)
+#endif // XP_WIN
+#endif // PAGE_SIZE
+#include "replace_malloc.h"
+#undef MOZ_REPLACE_ONLY_MEMALIGN
+#ifdef DMD_DEFINED_PAGE_SIZE
+#undef DMD_DEFINED_PAGE_SIZE
+#undef PAGE_SIZE
+#endif // DMD_DEFINED_PAGE_SIZE
+
+#include "DMD.h"
+
+namespace mozilla {
+namespace dmd {
+
+class DMDBridge : public ReplaceMallocBridge
+{
+ virtual DMDFuncs* GetDMDFuncs() override;
+};
+
+static DMDBridge* gDMDBridge;
+static DMDFuncs gDMDFuncs;
+
+DMDFuncs*
+DMDBridge::GetDMDFuncs()
+{
+ return &gDMDFuncs;
+}
+
+inline void
+StatusMsg(const char* aFmt, ...)
+{
+ va_list ap;
+ va_start(ap, aFmt);
+ gDMDFuncs.StatusMsg(aFmt, ap);
+ va_end(ap);
+}
+
+//---------------------------------------------------------------------------
+// Utilities
+//---------------------------------------------------------------------------
+
+#ifndef DISALLOW_COPY_AND_ASSIGN
+#define DISALLOW_COPY_AND_ASSIGN(T) \
+ T(const T&); \
+ void operator=(const T&)
+#endif
+
+static const malloc_table_t* gMallocTable = nullptr;
+
+// Whether DMD finished initializing.
+static bool gIsDMDInitialized = false;
+
+// This provides infallible allocations (they abort on OOM). We use it for all
+// of DMD's own allocations, which fall into the following three cases.
+//
+// - Direct allocations (the easy case).
+//
+// - Indirect allocations in js::{Vector,HashSet,HashMap} -- this class serves
+// as their AllocPolicy.
+//
+// - Other indirect allocations (e.g. MozStackWalk) -- see the comments on
+// Thread::mBlockIntercepts and in replace_malloc for how these work.
+//
+// It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
+// but DMD cannot use mozalloc.
+//
+class InfallibleAllocPolicy
+{
+ static void ExitOnFailure(const void* aP);
+
+public:
+ template <typename T>
+ static T* maybe_pod_malloc(size_t aNumElems)
+ {
+ if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
+ return nullptr;
+ return (T*)gMallocTable->malloc(aNumElems * sizeof(T));
+ }
+
+ template <typename T>
+ static T* maybe_pod_calloc(size_t aNumElems)
+ {
+ return (T*)gMallocTable->calloc(aNumElems, sizeof(T));
+ }
+
+ template <typename T>
+ static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+ {
+ if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
+ return nullptr;
+ return (T*)gMallocTable->realloc(aPtr, aNewSize * sizeof(T));
+ }
+
+ static void* malloc_(size_t aSize)
+ {
+ void* p = gMallocTable->malloc(aSize);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ template <typename T>
+ static T* pod_malloc(size_t aNumElems)
+ {
+ T* p = maybe_pod_malloc<T>(aNumElems);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ static void* calloc_(size_t aSize)
+ {
+ void* p = gMallocTable->calloc(1, aSize);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ template <typename T>
+ static T* pod_calloc(size_t aNumElems)
+ {
+ T* p = maybe_pod_calloc<T>(aNumElems);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ // This realloc_ is the one we use for direct reallocs within DMD.
+ static void* realloc_(void* aPtr, size_t aNewSize)
+ {
+ void* p = gMallocTable->realloc(aPtr, aNewSize);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ // This realloc_ is required for this to be a JS container AllocPolicy.
+ template <typename T>
+ static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+ {
+ T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ static void* memalign_(size_t aAlignment, size_t aSize)
+ {
+ void* p = gMallocTable->memalign(aAlignment, aSize);
+ ExitOnFailure(p);
+ return p;
+ }
+
+ static void free_(void* aPtr) { gMallocTable->free(aPtr); }
+
+ static char* strdup_(const char* aStr)
+ {
+ char* s = (char*) InfallibleAllocPolicy::malloc_(strlen(aStr) + 1);
+ strcpy(s, aStr);
+ return s;
+ }
+
+ template <class T>
+ static T* new_()
+ {
+ void* mem = malloc_(sizeof(T));
+ return new (mem) T;
+ }
+
+ template <class T, typename P1>
+ static T* new_(P1 aP1)
+ {
+ void* mem = malloc_(sizeof(T));
+ return new (mem) T(aP1);
+ }
+
+ template <class T>
+ static void delete_(T* aPtr)
+ {
+ if (aPtr) {
+ aPtr->~T();
+ InfallibleAllocPolicy::free_(aPtr);
+ }
+ }
+
+ static void reportAllocOverflow() { ExitOnFailure(nullptr); }
+ bool checkSimulatedOOM() const { return true; }
+};
+
+// This is only needed because of the |const void*| vs |void*| arg mismatch.
+static size_t
+MallocSizeOf(const void* aPtr)
+{
+ return gMallocTable->malloc_usable_size(const_cast<void*>(aPtr));
+}
+
+void
+DMDFuncs::StatusMsg(const char* aFmt, va_list aAp)
+{
+#ifdef ANDROID
+ __android_log_vprint(ANDROID_LOG_INFO, "DMD", aFmt, aAp);
+#else
+ // The +64 is easily enough for the "DMD[<pid>] " prefix and the NUL.
+ char* fmt = (char*) InfallibleAllocPolicy::malloc_(strlen(aFmt) + 64);
+ sprintf(fmt, "DMD[%d] %s", getpid(), aFmt);
+ vfprintf(stderr, fmt, aAp);
+ InfallibleAllocPolicy::free_(fmt);
+#endif
+}
+
+/* static */ void
+InfallibleAllocPolicy::ExitOnFailure(const void* aP)
+{
+ if (!aP) {
+ MOZ_CRASH("DMD out of memory; aborting");
+ }
+}
+
+static double
+Percent(size_t part, size_t whole)
+{
+ return (whole == 0) ? 0 : 100 * (double)part / whole;
+}
+
+// Commifies the number.
+static char*
+Show(size_t n, char* buf, size_t buflen)
+{
+ int nc = 0, i = 0, lasti = buflen - 2;
+ buf[lasti + 1] = '\0';
+ if (n == 0) {
+ buf[lasti - i] = '0';
+ i++;
+ } else {
+ while (n > 0) {
+ if (((i - nc) % 3) == 0 && i != 0) {
+ buf[lasti - i] = ',';
+ i++;
+ nc++;
+ }
+ buf[lasti - i] = static_cast<char>((n % 10) + '0');
+ i++;
+ n /= 10;
+ }
+ }
+ int firstCharIndex = lasti - i + 1;
+
+ MOZ_ASSERT(firstCharIndex >= 0);
+ return &buf[firstCharIndex];
+}
+
+//---------------------------------------------------------------------------
+// Options (Part 1)
+//---------------------------------------------------------------------------
+
+class Options
+{
+ template <typename T>
+ struct NumOption
+ {
+ const T mDefault;
+ const T mMax;
+ T mActual;
+ NumOption(T aDefault, T aMax)
+ : mDefault(aDefault), mMax(aMax), mActual(aDefault)
+ {}
+ };
+
+ // DMD has several modes. These modes affect what data is recorded and
+ // written to the output file, and the written data affects the
+ // post-processing that dmd.py can do.
+ //
+ // Users specify the mode as soon as DMD starts. This leads to minimal memory
+ // usage and log file size. It has the disadvantage that is inflexible -- if
+ // you want to change modes you have to re-run DMD. But in practice changing
+ // modes seems to be rare, so it's not much of a problem.
+ //
+ // An alternative possibility would be to always record and output *all* the
+ // information needed for all modes. This would let you choose the mode when
+ // running dmd.py, and so you could do multiple kinds of profiling on a
+ // single DMD run. But if you are only interested in one of the simpler
+ // modes, you'd pay the price of (a) increased memory usage and (b) *very*
+ // large log files.
+ //
+ // Finally, another alternative possibility would be to do mode selection
+ // partly at DMD startup or recording, and then partly in dmd.py. This would
+ // give some extra flexibility at moderate memory and file size cost. But
+ // certain mode pairs wouldn't work, which would be confusing.
+ //
+ enum class Mode
+ {
+ // For each live block, this mode outputs: size (usable and slop) and
+ // (possibly) and allocation stack. This mode is good for live heap
+ // profiling.
+ Live,
+
+ // Like "Live", but for each live block it also outputs: zero or more
+ // report stacks. This mode is good for identifying where memory reporters
+ // should be added. This is the default mode.
+ DarkMatter,
+
+ // Like "Live", but also outputs the same data for dead blocks. This mode
+ // does cumulative heap profiling, which is good for identifying where large
+ // amounts of short-lived allocations ("heap churn") occur.
+ Cumulative,
+
+ // Like "Live", but this mode also outputs for each live block the address
+ // of the block and the values contained in the blocks. This mode is useful
+ // for investigating leaks, by helping to figure out which blocks refer to
+ // other blocks. This mode force-enables full stacks coverage.
+ Scan
+ };
+
+ // With full stacks, every heap block gets a stack trace recorded for it.
+ // This is complete but slow.
+ //
+ // With partial stacks, not all heap blocks will get a stack trace recorded.
+ // A Bernoulli trial (see mfbt/FastBernoulliTrial.h for details) is performed
+ // for each heap block to decide if it gets one. Because bigger heap blocks
+ // are more likely to get a stack trace, even though most heap *blocks* won't
+ // get a stack trace, most heap *bytes* will.
+ enum class Stacks
+ {
+ Full,
+ Partial
+ };
+
+ char* mDMDEnvVar; // a saved copy, for later printing
+
+ Mode mMode;
+ Stacks mStacks;
+ bool mShowDumpStats;
+
+ void BadArg(const char* aArg);
+ static const char* ValueIfMatch(const char* aArg, const char* aOptionName);
+ static bool GetLong(const char* aArg, const char* aOptionName,
+ long aMin, long aMax, long* aValue);
+ static bool GetBool(const char* aArg, const char* aOptionName, bool* aValue);
+
+public:
+ explicit Options(const char* aDMDEnvVar);
+
+ bool IsLiveMode() const { return mMode == Mode::Live; }
+ bool IsDarkMatterMode() const { return mMode == Mode::DarkMatter; }
+ bool IsCumulativeMode() const { return mMode == Mode::Cumulative; }
+ bool IsScanMode() const { return mMode == Mode::Scan; }
+
+ const char* ModeString() const;
+
+ const char* DMDEnvVar() const { return mDMDEnvVar; }
+
+ bool DoFullStacks() const { return mStacks == Stacks::Full; }
+ size_t ShowDumpStats() const { return mShowDumpStats; }
+};
+
+static Options *gOptions;
+
+//---------------------------------------------------------------------------
+// The global lock
+//---------------------------------------------------------------------------
+
+// MutexBase implements the platform-specific parts of a mutex.
+
+#ifdef XP_WIN
+
+class MutexBase
+{
+ CRITICAL_SECTION mCS;
+
+ DISALLOW_COPY_AND_ASSIGN(MutexBase);
+
+public:
+ MutexBase() { InitializeCriticalSection(&mCS); }
+ ~MutexBase() { DeleteCriticalSection(&mCS); }
+
+ void Lock() { EnterCriticalSection(&mCS); }
+ void Unlock() { LeaveCriticalSection(&mCS); }
+};
+
+#else
+
+#include <pthread.h>
+#include <sys/types.h>
+
+class MutexBase
+{
+ pthread_mutex_t mMutex;
+
+ DISALLOW_COPY_AND_ASSIGN(MutexBase);
+
+public:
+ MutexBase() { pthread_mutex_init(&mMutex, nullptr); }
+
+ void Lock() { pthread_mutex_lock(&mMutex); }
+ void Unlock() { pthread_mutex_unlock(&mMutex); }
+};
+
+#endif
+
+class Mutex : private MutexBase
+{
+ bool mIsLocked;
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+
+public:
+ Mutex()
+ : mIsLocked(false)
+ {}
+
+ void Lock()
+ {
+ MutexBase::Lock();
+ MOZ_ASSERT(!mIsLocked);
+ mIsLocked = true;
+ }
+
+ void Unlock()
+ {
+ MOZ_ASSERT(mIsLocked);
+ mIsLocked = false;
+ MutexBase::Unlock();
+ }
+
+ bool IsLocked() { return mIsLocked; }
+};
+
+// This lock must be held while manipulating global state such as
+// gStackTraceTable, gLiveBlockTable, gDeadBlockTable. Note that gOptions is
+// *not* protected by this lock because it is only written to by Options(),
+// which is only invoked at start-up and in ResetEverything(), which is only
+// used by SmokeDMD.cpp.
+static Mutex* gStateLock = nullptr;
+
+class AutoLockState
+{
+ DISALLOW_COPY_AND_ASSIGN(AutoLockState);
+
+public:
+ AutoLockState() { gStateLock->Lock(); }
+ ~AutoLockState() { gStateLock->Unlock(); }
+};
+
+class AutoUnlockState
+{
+ DISALLOW_COPY_AND_ASSIGN(AutoUnlockState);
+
+public:
+ AutoUnlockState() { gStateLock->Unlock(); }
+ ~AutoUnlockState() { gStateLock->Lock(); }
+};
+
+//---------------------------------------------------------------------------
+// Thread-local storage and blocking of intercepts
+//---------------------------------------------------------------------------
+
+#ifdef XP_WIN
+
+#define DMD_TLS_INDEX_TYPE DWORD
+#define DMD_CREATE_TLS_INDEX(i_) do { \
+ (i_) = TlsAlloc(); \
+ } while (0)
+#define DMD_DESTROY_TLS_INDEX(i_) TlsFree((i_))
+#define DMD_GET_TLS_DATA(i_) TlsGetValue((i_))
+#define DMD_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_))
+
+#else
+
+#include <pthread.h>
+
+#define DMD_TLS_INDEX_TYPE pthread_key_t
+#define DMD_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), nullptr)
+#define DMD_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_))
+#define DMD_GET_TLS_DATA(i_) pthread_getspecific((i_))
+#define DMD_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_))
+
+#endif
+
+static DMD_TLS_INDEX_TYPE gTlsIndex;
+
+class Thread
+{
+ // Required for allocation via InfallibleAllocPolicy::new_.
+ friend class InfallibleAllocPolicy;
+
+ // When true, this blocks intercepts, which allows malloc interception
+ // functions to themselves call malloc. (Nb: for direct calls to malloc we
+ // can just use InfallibleAllocPolicy::{malloc_,new_}, but we sometimes
+ // indirectly call vanilla malloc via functions like MozStackWalk.)
+ bool mBlockIntercepts;
+
+ Thread()
+ : mBlockIntercepts(false)
+ {}
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+
+public:
+ static Thread* Fetch();
+
+ bool BlockIntercepts()
+ {
+ MOZ_ASSERT(!mBlockIntercepts);
+ return mBlockIntercepts = true;
+ }
+
+ bool UnblockIntercepts()
+ {
+ MOZ_ASSERT(mBlockIntercepts);
+ return mBlockIntercepts = false;
+ }
+
+ bool InterceptsAreBlocked() const { return mBlockIntercepts; }
+};
+
+/* static */ Thread*
+Thread::Fetch()
+{
+ Thread* t = static_cast<Thread*>(DMD_GET_TLS_DATA(gTlsIndex));
+
+ if (MOZ_UNLIKELY(!t)) {
+ // This memory is never freed, even if the thread dies. It's a leak, but
+ // only a tiny one.
+ t = InfallibleAllocPolicy::new_<Thread>();
+ DMD_SET_TLS_DATA(gTlsIndex, t);
+ }
+
+ return t;
+}
+
+// An object of this class must be created (on the stack) before running any
+// code that might allocate.
+class AutoBlockIntercepts
+{
+ Thread* const mT;
+
+ DISALLOW_COPY_AND_ASSIGN(AutoBlockIntercepts);
+
+public:
+ explicit AutoBlockIntercepts(Thread* aT)
+ : mT(aT)
+ {
+ mT->BlockIntercepts();
+ }
+ ~AutoBlockIntercepts()
+ {
+ MOZ_ASSERT(mT->InterceptsAreBlocked());
+ mT->UnblockIntercepts();
+ }
+};
+
+//---------------------------------------------------------------------------
+// Location service
+//---------------------------------------------------------------------------
+
+class StringTable
+{
+public:
+ StringTable()
+ {
+ MOZ_ALWAYS_TRUE(mSet.init(64));
+ }
+
+ const char*
+ Intern(const char* aString)
+ {
+ StringHashSet::AddPtr p = mSet.lookupForAdd(aString);
+ if (p) {
+ return *p;
+ }
+
+ const char* newString = InfallibleAllocPolicy::strdup_(aString);
+ MOZ_ALWAYS_TRUE(mSet.add(p, newString));
+ return newString;
+ }
+
+ size_t
+ SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+ {
+ size_t n = 0;
+ n += mSet.sizeOfExcludingThis(aMallocSizeOf);
+ for (auto r = mSet.all(); !r.empty(); r.popFront()) {
+ n += aMallocSizeOf(r.front());
+ }
+ return n;
+ }
+
+private:
+ struct StringHasher
+ {
+ typedef const char* Lookup;
+
+ static uint32_t hash(const char* const& aS)
+ {
+ return HashString(aS);
+ }
+
+ static bool match(const char* const& aA, const char* const& aB)
+ {
+ return strcmp(aA, aB) == 0;
+ }
+ };
+
+ typedef js::HashSet<const char*, StringHasher, InfallibleAllocPolicy> StringHashSet;
+
+ StringHashSet mSet;
+};
+
+class StringAlloc
+{
+public:
+ static char* copy(const char* aString)
+ {
+ return InfallibleAllocPolicy::strdup_(aString);
+ }
+ static void free(char* aString)
+ {
+ InfallibleAllocPolicy::free_(aString);
+ }
+};
+
+struct DescribeCodeAddressLock
+{
+ static void Unlock() { gStateLock->Unlock(); }
+ static void Lock() { gStateLock->Lock(); }
+ static bool IsLocked() { return gStateLock->IsLocked(); }
+};
+
+typedef CodeAddressService<StringTable, StringAlloc, DescribeCodeAddressLock>
+ CodeAddressService;
+
+//---------------------------------------------------------------------------
+// Stack traces
+//---------------------------------------------------------------------------
+
+class StackTrace
+{
+public:
+ static const uint32_t MaxFrames = 24;
+
+private:
+ uint32_t mLength; // The number of PCs.
+ const void* mPcs[MaxFrames]; // The PCs themselves.
+
+public:
+ StackTrace() : mLength(0) {}
+
+ uint32_t Length() const { return mLength; }
+ const void* Pc(uint32_t i) const
+ {
+ MOZ_ASSERT(i < mLength);
+ return mPcs[i];
+ }
+
+ uint32_t Size() const { return mLength * sizeof(mPcs[0]); }
+
+ // The stack trace returned by this function is interned in gStackTraceTable,
+ // and so is immortal and unmovable.
+ static const StackTrace* Get(Thread* aT);
+
+ // Hash policy.
+
+ typedef StackTrace* Lookup;
+
+ static uint32_t hash(const StackTrace* const& aSt)
+ {
+ return mozilla::HashBytes(aSt->mPcs, aSt->Size());
+ }
+
+ static bool match(const StackTrace* const& aA,
+ const StackTrace* const& aB)
+ {
+ return aA->mLength == aB->mLength &&
+ memcmp(aA->mPcs, aB->mPcs, aA->Size()) == 0;
+ }
+
+private:
+ static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
+ void* aClosure)
+ {
+ StackTrace* st = (StackTrace*) aClosure;
+ MOZ_ASSERT(st->mLength < MaxFrames);
+ st->mPcs[st->mLength] = aPc;
+ st->mLength++;
+ MOZ_ASSERT(st->mLength == aFrameNumber);
+ }
+};
+
+typedef js::HashSet<StackTrace*, StackTrace, InfallibleAllocPolicy>
+ StackTraceTable;
+static StackTraceTable* gStackTraceTable = nullptr;
+
+typedef js::HashSet<const StackTrace*, js::DefaultHasher<const StackTrace*>,
+ InfallibleAllocPolicy>
+ StackTraceSet;
+
+typedef js::HashSet<const void*, js::DefaultHasher<const void*>,
+ InfallibleAllocPolicy>
+ PointerSet;
+typedef js::HashMap<const void*, uint32_t, js::DefaultHasher<const void*>,
+ InfallibleAllocPolicy>
+ PointerIdMap;
+
+// We won't GC the stack trace table until it this many elements.
+static uint32_t gGCStackTraceTableWhenSizeExceeds = 4 * 1024;
+
+/* static */ const StackTrace*
+StackTrace::Get(Thread* aT)
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(aT->InterceptsAreBlocked());
+
+ // On Windows, MozStackWalk can acquire a lock from the shared library
+ // loader. Another thread might call malloc while holding that lock (when
+ // loading a shared library). So we can't be in gStateLock during the call
+ // to MozStackWalk. For details, see
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
+ // On Linux, something similar can happen; see bug 824340.
+ // So let's just release it on all platforms.
+ StackTrace tmp;
+ {
+ AutoUnlockState unlock;
+ uint32_t skipFrames = 2;
+ if (MozStackWalk(StackWalkCallback, skipFrames,
+ MaxFrames, &tmp, 0, nullptr)) {
+ // Handle the common case first. All is ok. Nothing to do.
+ } else {
+ tmp.mLength = 0;
+ }
+ }
+
+ StackTraceTable::AddPtr p = gStackTraceTable->lookupForAdd(&tmp);
+ if (!p) {
+ StackTrace* stnew = InfallibleAllocPolicy::new_<StackTrace>(tmp);
+ MOZ_ALWAYS_TRUE(gStackTraceTable->add(p, stnew));
+ }
+ return *p;
+}
+
+//---------------------------------------------------------------------------
+// Heap blocks
+//---------------------------------------------------------------------------
+
+// This class combines a 2-byte-aligned pointer (i.e. one whose bottom bit
+// is zero) with a 1-bit tag.
+//
+// |T| is the pointer type, e.g. |int*|, not the pointed-to type. This makes
+// is easier to have const pointers, e.g. |TaggedPtr<const int*>|.
+template <typename T>
+class TaggedPtr
+{
+ union
+ {
+ T mPtr;
+ uintptr_t mUint;
+ };
+
+ static const uintptr_t kTagMask = uintptr_t(0x1);
+ static const uintptr_t kPtrMask = ~kTagMask;
+
+ static bool IsTwoByteAligned(T aPtr)
+ {
+ return (uintptr_t(aPtr) & kTagMask) == 0;
+ }
+
+public:
+ TaggedPtr()
+ : mPtr(nullptr)
+ {}
+
+ TaggedPtr(T aPtr, bool aBool)
+ : mPtr(aPtr)
+ {
+ MOZ_ASSERT(IsTwoByteAligned(aPtr));
+ uintptr_t tag = uintptr_t(aBool);
+ MOZ_ASSERT(tag <= kTagMask);
+ mUint |= (tag & kTagMask);
+ }
+
+ void Set(T aPtr, bool aBool)
+ {
+ MOZ_ASSERT(IsTwoByteAligned(aPtr));
+ mPtr = aPtr;
+ uintptr_t tag = uintptr_t(aBool);
+ MOZ_ASSERT(tag <= kTagMask);
+ mUint |= (tag & kTagMask);
+ }
+
+ T Ptr() const { return reinterpret_cast<T>(mUint & kPtrMask); }
+
+ bool Tag() const { return bool(mUint & kTagMask); }
+};
+
+// A live heap block. Stores both basic data and data about reports, if we're
+// in DarkMatter mode.
+class LiveBlock
+{
+ const void* mPtr;
+ const size_t mReqSize; // size requested
+
+ // The stack trace where this block was allocated, or nullptr if we didn't
+ // record one.
+ const StackTrace* const mAllocStackTrace;
+
+ // This array has two elements because we record at most two reports of a
+ // block.
+ // - Ptr: |mReportStackTrace| - stack trace where this block was reported.
+ // nullptr if not reported.
+ // - Tag bit 0: |mReportedOnAlloc| - was the block reported immediately on
+ // allocation? If so, DMD must not clear the report at the end of
+ // Analyze(). Only relevant if |mReportStackTrace| is non-nullptr.
+ //
+ // |mPtr| is used as the key in LiveBlockTable, so it's ok for this member
+ // to be |mutable|.
+ //
+ // Only used in DarkMatter mode.
+ mutable TaggedPtr<const StackTrace*> mReportStackTrace_mReportedOnAlloc[2];
+
+public:
+ LiveBlock(const void* aPtr, size_t aReqSize,
+ const StackTrace* aAllocStackTrace)
+ : mPtr(aPtr)
+ , mReqSize(aReqSize)
+ , mAllocStackTrace(aAllocStackTrace)
+ , mReportStackTrace_mReportedOnAlloc() // all fields get zeroed
+ {}
+
+ const void* Address() const { return mPtr; }
+
+ size_t ReqSize() const { return mReqSize; }
+
+ size_t SlopSize() const
+ {
+ return MallocSizeOf(mPtr) - mReqSize;
+ }
+
+ const StackTrace* AllocStackTrace() const
+ {
+ return mAllocStackTrace;
+ }
+
+ const StackTrace* ReportStackTrace1() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ return mReportStackTrace_mReportedOnAlloc[0].Ptr();
+ }
+
+ const StackTrace* ReportStackTrace2() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ return mReportStackTrace_mReportedOnAlloc[1].Ptr();
+ }
+
+ bool ReportedOnAlloc1() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ return mReportStackTrace_mReportedOnAlloc[0].Tag();
+ }
+
+ bool ReportedOnAlloc2() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ return mReportStackTrace_mReportedOnAlloc[1].Tag();
+ }
+
+ void AddStackTracesToTable(StackTraceSet& aStackTraces) const
+ {
+ if (AllocStackTrace()) {
+ MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));
+ }
+ if (gOptions->IsDarkMatterMode()) {
+ if (ReportStackTrace1()) {
+ MOZ_ALWAYS_TRUE(aStackTraces.put(ReportStackTrace1()));
+ }
+ if (ReportStackTrace2()) {
+ MOZ_ALWAYS_TRUE(aStackTraces.put(ReportStackTrace2()));
+ }
+ }
+ }
+
+ uint32_t NumReports() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ if (ReportStackTrace2()) {
+ MOZ_ASSERT(ReportStackTrace1());
+ return 2;
+ }
+ if (ReportStackTrace1()) {
+ return 1;
+ }
+ return 0;
+ }
+
+ // This is |const| thanks to the |mutable| fields above.
+ void Report(Thread* aT, bool aReportedOnAlloc) const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ // We don't bother recording reports after the 2nd one.
+ uint32_t numReports = NumReports();
+ if (numReports < 2) {
+ mReportStackTrace_mReportedOnAlloc[numReports].Set(StackTrace::Get(aT),
+ aReportedOnAlloc);
+ }
+ }
+
+ void UnreportIfNotReportedOnAlloc() const
+ {
+ MOZ_ASSERT(gOptions->IsDarkMatterMode());
+ if (!ReportedOnAlloc1() && !ReportedOnAlloc2()) {
+ mReportStackTrace_mReportedOnAlloc[0].Set(nullptr, 0);
+ mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
+
+ } else if (!ReportedOnAlloc1() && ReportedOnAlloc2()) {
+ // Shift the 2nd report down to the 1st one.
+ mReportStackTrace_mReportedOnAlloc[0] =
+ mReportStackTrace_mReportedOnAlloc[1];
+ mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
+
+ } else if (ReportedOnAlloc1() && !ReportedOnAlloc2()) {
+ mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
+ }
+ }
+
+ // Hash policy.
+
+ typedef const void* Lookup;
+
+ static uint32_t hash(const void* const& aPtr)
+ {
+ return mozilla::HashGeneric(aPtr);
+ }
+
+ static bool match(const LiveBlock& aB, const void* const& aPtr)
+ {
+ return aB.mPtr == aPtr;
+ }
+};
+
+// A table of live blocks where the lookup key is the block address.
+typedef js::HashSet<LiveBlock, LiveBlock, InfallibleAllocPolicy> LiveBlockTable;
+static LiveBlockTable* gLiveBlockTable = nullptr;
+
+class AggregatedLiveBlockHashPolicy
+{
+public:
+ typedef const LiveBlock* const Lookup;
+
+ static uint32_t hash(const LiveBlock* const& aB)
+ {
+ return gOptions->IsDarkMatterMode()
+ ? mozilla::HashGeneric(aB->ReqSize(),
+ aB->SlopSize(),
+ aB->AllocStackTrace(),
+ aB->ReportedOnAlloc1(),
+ aB->ReportedOnAlloc2())
+ : mozilla::HashGeneric(aB->ReqSize(),
+ aB->SlopSize(),
+ aB->AllocStackTrace());
+ }
+
+ static bool match(const LiveBlock* const& aA, const LiveBlock* const& aB)
+ {
+ return gOptions->IsDarkMatterMode()
+ ? aA->ReqSize() == aB->ReqSize() &&
+ aA->SlopSize() == aB->SlopSize() &&
+ aA->AllocStackTrace() == aB->AllocStackTrace() &&
+ aA->ReportStackTrace1() == aB->ReportStackTrace1() &&
+ aA->ReportStackTrace2() == aB->ReportStackTrace2()
+ : aA->ReqSize() == aB->ReqSize() &&
+ aA->SlopSize() == aB->SlopSize() &&
+ aA->AllocStackTrace() == aB->AllocStackTrace();
+ }
+};
+
+// A table of live blocks where the lookup key is everything but the block
+// address. For aggregating similar live blocks at output time.
+typedef js::HashMap<const LiveBlock*, size_t, AggregatedLiveBlockHashPolicy,
+ InfallibleAllocPolicy>
+ AggregatedLiveBlockTable;
+
+// A freed heap block.
+class DeadBlock
+{
+ const size_t mReqSize; // size requested
+ const size_t mSlopSize; // slop above size requested
+
+ // The stack trace where this block was allocated.
+ const StackTrace* const mAllocStackTrace;
+
+public:
+ DeadBlock()
+ : mReqSize(0)
+ , mSlopSize(0)
+ , mAllocStackTrace(nullptr)
+ {}
+
+ explicit DeadBlock(const LiveBlock& aLb)
+ : mReqSize(aLb.ReqSize())
+ , mSlopSize(aLb.SlopSize())
+ , mAllocStackTrace(aLb.AllocStackTrace())
+ {}
+
+ ~DeadBlock() {}
+
+ size_t ReqSize() const { return mReqSize; }
+ size_t SlopSize() const { return mSlopSize; }
+
+ const StackTrace* AllocStackTrace() const
+ {
+ return mAllocStackTrace;
+ }
+
+ void AddStackTracesToTable(StackTraceSet& aStackTraces) const
+ {
+ if (AllocStackTrace()) {
+ MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));
+ }
+ }
+
+ // Hash policy.
+
+ typedef DeadBlock Lookup;
+
+ static uint32_t hash(const DeadBlock& aB)
+ {
+ return mozilla::HashGeneric(aB.ReqSize(),
+ aB.SlopSize(),
+ aB.AllocStackTrace());
+ }
+
+ static bool match(const DeadBlock& aA, const DeadBlock& aB)
+ {
+ return aA.ReqSize() == aB.ReqSize() &&
+ aA.SlopSize() == aB.SlopSize() &&
+ aA.AllocStackTrace() == aB.AllocStackTrace();
+ }
+};
+
+// For each unique DeadBlock value we store a count of how many actual dead
+// blocks have that value.
+typedef js::HashMap<DeadBlock, size_t, DeadBlock, InfallibleAllocPolicy>
+ DeadBlockTable;
+static DeadBlockTable* gDeadBlockTable = nullptr;
+
+// Add the dead block to the dead block table, if that's appropriate.
+void MaybeAddToDeadBlockTable(const DeadBlock& aDb)
+{
+ if (gOptions->IsCumulativeMode() && aDb.AllocStackTrace()) {
+ AutoLockState lock;
+ if (DeadBlockTable::AddPtr p = gDeadBlockTable->lookupForAdd(aDb)) {
+ p->value() += 1;
+ } else {
+ MOZ_ALWAYS_TRUE(gDeadBlockTable->add(p, aDb, 1));
+ }
+ }
+}
+
+// Add a pointer to each live stack trace into the given StackTraceSet. (A
+// stack trace is live if it's used by one of the live blocks.)
+static void
+GatherUsedStackTraces(StackTraceSet& aStackTraces)
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
+
+ aStackTraces.finish();
+ MOZ_ALWAYS_TRUE(aStackTraces.init(512));
+
+ for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
+ r.front().AddStackTracesToTable(aStackTraces);
+ }
+
+ for (auto r = gDeadBlockTable->all(); !r.empty(); r.popFront()) {
+ r.front().key().AddStackTracesToTable(aStackTraces);
+ }
+}
+
+// Delete stack traces that we aren't using, and compact our hashtable.
+static void
+GCStackTraces()
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
+
+ StackTraceSet usedStackTraces;
+ GatherUsedStackTraces(usedStackTraces);
+
+ // Delete all unused stack traces from gStackTraceTable. The Enum destructor
+ // will automatically rehash and compact the table.
+ for (StackTraceTable::Enum e(*gStackTraceTable); !e.empty(); e.popFront()) {
+ StackTrace* const& st = e.front();
+ if (!usedStackTraces.has(st)) {
+ e.removeFront();
+ InfallibleAllocPolicy::delete_(st);
+ }
+ }
+
+ // Schedule a GC when we have twice as many stack traces as we had right after
+ // this GC finished.
+ gGCStackTraceTableWhenSizeExceeds = 2 * gStackTraceTable->count();
+}
+
+//---------------------------------------------------------------------------
+// malloc/free callbacks
+//---------------------------------------------------------------------------
+
+static FastBernoulliTrial* gBernoulli;
+
+// In testing, a probability of 0.003 resulted in ~25% of heap blocks getting
+// a stack trace and ~80% of heap bytes getting a stack trace. (This is
+// possible because big heap blocks are more likely to get a stack trace.)
+//
+// We deliberately choose not to give the user control over this probability
+// (other than effectively setting it to 1 via --stacks=full) because it's
+// quite inscrutable and generally the user just wants "faster and imprecise"
+// or "slower and precise".
+//
+// The random number seeds are arbitrary and were obtained from random.org. If
+// you change them you'll need to change the tests as well, because their
+// expected output is based on the particular sequence of trial results that we
+// get with these seeds.
+static void
+ResetBernoulli()
+{
+ new (gBernoulli) FastBernoulliTrial(0.003, 0x8e26eeee166bc8ca,
+ 0x56820f304a9c9ae0);
+}
+
+static void
+AllocCallback(void* aPtr, size_t aReqSize, Thread* aT)
+{
+ if (!aPtr) {
+ return;
+ }
+
+ AutoLockState lock;
+ AutoBlockIntercepts block(aT);
+
+ size_t actualSize = gMallocTable->malloc_usable_size(aPtr);
+
+ // We may or may not record the allocation stack trace, depending on the
+ // options and the outcome of a Bernoulli trial.
+ bool getTrace = gOptions->DoFullStacks() || gBernoulli->trial(actualSize);
+ LiveBlock b(aPtr, aReqSize, getTrace ? StackTrace::Get(aT) : nullptr);
+ MOZ_ALWAYS_TRUE(gLiveBlockTable->putNew(aPtr, b));
+}
+
+static void
+FreeCallback(void* aPtr, Thread* aT, DeadBlock* aDeadBlock)
+{
+ if (!aPtr) {
+ return;
+ }
+
+ AutoLockState lock;
+ AutoBlockIntercepts block(aT);
+
+ if (LiveBlockTable::Ptr lb = gLiveBlockTable->lookup(aPtr)) {
+ if (gOptions->IsCumulativeMode()) {
+ // Copy it out so it can be added to the dead block list later.
+ new (aDeadBlock) DeadBlock(*lb);
+ }
+ gLiveBlockTable->remove(lb);
+ } else {
+ // We have no record of the block. It must be a bogus pointer, or one that
+ // DMD wasn't able to see allocated. This should be extremely rare.
+ }
+
+ if (gStackTraceTable->count() > gGCStackTraceTableWhenSizeExceeds) {
+ GCStackTraces();
+ }
+}
+
+//---------------------------------------------------------------------------
+// malloc/free interception
+//---------------------------------------------------------------------------
+
+static void Init(const malloc_table_t* aMallocTable);
+
+} // namespace dmd
+} // namespace mozilla
+
+void
+replace_init(const malloc_table_t* aMallocTable)
+{
+ mozilla::dmd::Init(aMallocTable);
+}
+
+ReplaceMallocBridge*
+replace_get_bridge()
+{
+ return mozilla::dmd::gDMDBridge;
+}
+
+void*
+replace_malloc(size_t aSize)
+{
+ using namespace mozilla::dmd;
+
+ if (!gIsDMDInitialized) {
+ // DMD hasn't started up, either because it wasn't enabled by the user, or
+ // we're still in Init() and something has indirectly called malloc. Do a
+ // vanilla malloc. (In the latter case, if it fails we'll crash. But
+ // OOM is highly unlikely so early on.)
+ return gMallocTable->malloc(aSize);
+ }
+
+ Thread* t = Thread::Fetch();
+ if (t->InterceptsAreBlocked()) {
+ // Intercepts are blocked, which means this must be a call to malloc
+ // triggered indirectly by DMD (e.g. via MozStackWalk). Be infallible.
+ return InfallibleAllocPolicy::malloc_(aSize);
+ }
+
+ // This must be a call to malloc from outside DMD. Intercept it.
+ void* ptr = gMallocTable->malloc(aSize);
+ AllocCallback(ptr, aSize, t);
+ return ptr;
+}
+
+void*
+replace_calloc(size_t aCount, size_t aSize)
+{
+ using namespace mozilla::dmd;
+
+ if (!gIsDMDInitialized) {
+ return gMallocTable->calloc(aCount, aSize);
+ }
+
+ Thread* t = Thread::Fetch();
+ if (t->InterceptsAreBlocked()) {
+ return InfallibleAllocPolicy::calloc_(aCount * aSize);
+ }
+
+ void* ptr = gMallocTable->calloc(aCount, aSize);
+ AllocCallback(ptr, aCount * aSize, t);
+ return ptr;
+}
+
+void*
+replace_realloc(void* aOldPtr, size_t aSize)
+{
+ using namespace mozilla::dmd;
+
+ if (!gIsDMDInitialized) {
+ return gMallocTable->realloc(aOldPtr, aSize);
+ }
+
+ Thread* t = Thread::Fetch();
+ if (t->InterceptsAreBlocked()) {
+ return InfallibleAllocPolicy::realloc_(aOldPtr, aSize);
+ }
+
+ // If |aOldPtr| is nullptr, the call is equivalent to |malloc(aSize)|.
+ if (!aOldPtr) {
+ return replace_malloc(aSize);
+ }
+
+ // Be very careful here! Must remove the block from the table before doing
+ // the realloc to avoid races, just like in replace_free().
+ // Nb: This does an unnecessary hashtable remove+add if the block doesn't
+ // move, but doing better isn't worth the effort.
+ DeadBlock db;
+ FreeCallback(aOldPtr, t, &db);
+ void* ptr = gMallocTable->realloc(aOldPtr, aSize);
+ if (ptr) {
+ AllocCallback(ptr, aSize, t);
+ MaybeAddToDeadBlockTable(db);
+ } else {
+ // If realloc fails, we undo the prior operations by re-inserting the old
+ // pointer into the live block table. We don't have to do anything with the
+ // dead block list because the dead block hasn't yet been inserted. The
+ // block will end up looking like it was allocated for the first time here,
+ // which is untrue, and the slop bytes will be zero, which may be untrue.
+ // But this case is rare and doing better isn't worth the effort.
+ AllocCallback(aOldPtr, gMallocTable->malloc_usable_size(aOldPtr), t);
+ }
+ return ptr;
+}
+
+void*
+replace_memalign(size_t aAlignment, size_t aSize)
+{
+ using namespace mozilla::dmd;
+
+ if (!gIsDMDInitialized) {
+ return gMallocTable->memalign(aAlignment, aSize);
+ }
+
+ Thread* t = Thread::Fetch();
+ if (t->InterceptsAreBlocked()) {
+ return InfallibleAllocPolicy::memalign_(aAlignment, aSize);
+ }
+
+ void* ptr = gMallocTable->memalign(aAlignment, aSize);
+ AllocCallback(ptr, aSize, t);
+ return ptr;
+}
+
+void
+replace_free(void* aPtr)
+{
+ using namespace mozilla::dmd;
+
+ if (!gIsDMDInitialized) {
+ gMallocTable->free(aPtr);
+ return;
+ }
+
+ Thread* t = Thread::Fetch();
+ if (t->InterceptsAreBlocked()) {
+ return InfallibleAllocPolicy::free_(aPtr);
+ }
+
+ // Do the actual free after updating the table. Otherwise, another thread
+ // could call malloc and get the freed block and update the table, and then
+ // our update here would remove the newly-malloc'd block.
+ DeadBlock db;
+ FreeCallback(aPtr, t, &db);
+ MaybeAddToDeadBlockTable(db);
+ gMallocTable->free(aPtr);
+}
+
+namespace mozilla {
+namespace dmd {
+
+//---------------------------------------------------------------------------
+// Options (Part 2)
+//---------------------------------------------------------------------------
+
+// Given an |aOptionName| like "foo", succeed if |aArg| has the form "foo=blah"
+// (where "blah" is non-empty) and return the pointer to "blah". |aArg| can
+// have leading space chars (but not other whitespace).
+const char*
+Options::ValueIfMatch(const char* aArg, const char* aOptionName)
+{
+ MOZ_ASSERT(!isspace(*aArg)); // any leading whitespace should not remain
+ size_t optionLen = strlen(aOptionName);
+ if (strncmp(aArg, aOptionName, optionLen) == 0 && aArg[optionLen] == '=' &&
+ aArg[optionLen + 1]) {
+ return aArg + optionLen + 1;
+ }
+ return nullptr;
+}
+
+// Extracts a |long| value for an option from an argument. It must be within
+// the range |aMin..aMax| (inclusive).
+bool
+Options::GetLong(const char* aArg, const char* aOptionName,
+ long aMin, long aMax, long* aValue)
+{
+ if (const char* optionValue = ValueIfMatch(aArg, aOptionName)) {
+ char* endPtr;
+ *aValue = strtol(optionValue, &endPtr, /* base */ 10);
+ if (!*endPtr && aMin <= *aValue && *aValue <= aMax &&
+ *aValue != LONG_MIN && *aValue != LONG_MAX) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Extracts a |bool| value for an option -- encoded as "yes" or "no" -- from an
+// argument.
+bool
+Options::GetBool(const char* aArg, const char* aOptionName, bool* aValue)
+{
+ if (const char* optionValue = ValueIfMatch(aArg, aOptionName)) {
+ if (strcmp(optionValue, "yes") == 0) {
+ *aValue = true;
+ return true;
+ }
+ if (strcmp(optionValue, "no") == 0) {
+ *aValue = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+Options::Options(const char* aDMDEnvVar)
+ : mDMDEnvVar(aDMDEnvVar ? InfallibleAllocPolicy::strdup_(aDMDEnvVar)
+ : nullptr)
+ , mMode(Mode::DarkMatter)
+ , mStacks(Stacks::Partial)
+ , mShowDumpStats(false)
+{
+ // It's no longer necessary to set the DMD env var to "1" if you want default
+ // options (you can leave it undefined) but we still accept "1" for
+ // backwards compatibility.
+ char* e = mDMDEnvVar;
+ if (e && strcmp(e, "1") != 0) {
+ bool isEnd = false;
+ while (!isEnd) {
+ // Consume leading whitespace.
+ while (isspace(*e)) {
+ e++;
+ }
+
+ // Save the start of the arg.
+ const char* arg = e;
+
+ // Find the first char after the arg, and temporarily change it to '\0'
+ // to isolate the arg.
+ while (!isspace(*e) && *e != '\0') {
+ e++;
+ }
+ char replacedChar = *e;
+ isEnd = replacedChar == '\0';
+ *e = '\0';
+
+ // Handle arg
+ bool myBool;
+ if (strcmp(arg, "--mode=live") == 0) {
+ mMode = Mode::Live;
+ } else if (strcmp(arg, "--mode=dark-matter") == 0) {
+ mMode = Mode::DarkMatter;
+ } else if (strcmp(arg, "--mode=cumulative") == 0) {
+ mMode = Mode::Cumulative;
+ } else if (strcmp(arg, "--mode=scan") == 0) {
+ mMode = Mode::Scan;
+
+ } else if (strcmp(arg, "--stacks=full") == 0) {
+ mStacks = Stacks::Full;
+ } else if (strcmp(arg, "--stacks=partial") == 0) {
+ mStacks = Stacks::Partial;
+
+ } else if (GetBool(arg, "--show-dump-stats", &myBool)) {
+ mShowDumpStats = myBool;
+
+ } else if (strcmp(arg, "") == 0) {
+ // This can only happen if there is trailing whitespace. Ignore.
+ MOZ_ASSERT(isEnd);
+
+ } else {
+ BadArg(arg);
+ }
+
+ // Undo the temporary isolation.
+ *e = replacedChar;
+ }
+ }
+
+ if (mMode == Mode::Scan) {
+ mStacks = Stacks::Full;
+ }
+}
+
+void
+Options::BadArg(const char* aArg)
+{
+ StatusMsg("\n");
+ StatusMsg("Bad entry in the $DMD environment variable: '%s'.\n", aArg);
+ StatusMsg("See the output of |mach help run| for the allowed options.\n");
+ exit(1);
+}
+
+const char*
+Options::ModeString() const
+{
+ switch (mMode) {
+ case Mode::Live:
+ return "live";
+ case Mode::DarkMatter:
+ return "dark-matter";
+ case Mode::Cumulative:
+ return "cumulative";
+ case Mode::Scan:
+ return "scan";
+ default:
+ MOZ_ASSERT(false);
+ return "(unknown DMD mode)";
+ }
+}
+
+//---------------------------------------------------------------------------
+// DMD start-up
+//---------------------------------------------------------------------------
+
+#ifdef XP_MACOSX
+static void
+NopStackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
+ void* aClosure)
+{
+}
+#endif
+
+// WARNING: this function runs *very* early -- before all static initializers
+// have run. For this reason, non-scalar globals such as gStateLock and
+// gStackTraceTable are allocated dynamically (so we can guarantee their
+// construction in this function) rather than statically.
+static void
+Init(const malloc_table_t* aMallocTable)
+{
+ gMallocTable = aMallocTable;
+ gDMDBridge = InfallibleAllocPolicy::new_<DMDBridge>();
+
+ // DMD is controlled by the |DMD| environment variable.
+ const char* e = getenv("DMD");
+
+ if (e) {
+ StatusMsg("$DMD = '%s'\n", e);
+ } else {
+ StatusMsg("$DMD is undefined\n", e);
+ }
+
+ // Parse $DMD env var.
+ gOptions = InfallibleAllocPolicy::new_<Options>(e);
+
+#ifdef XP_MACOSX
+ // On Mac OS X we need to call StackWalkInitCriticalAddress() very early
+ // (prior to the creation of any mutexes, apparently) otherwise we can get
+ // hangs when getting stack traces (bug 821577). But
+ // StackWalkInitCriticalAddress() isn't exported from xpcom/, so instead we
+ // just call MozStackWalk, because that calls StackWalkInitCriticalAddress().
+ // See the comment above StackWalkInitCriticalAddress() for more details.
+ (void)MozStackWalk(NopStackWalkCallback, /* skipFrames */ 0,
+ /* maxFrames */ 1, nullptr, 0, nullptr);
+#endif
+
+ gStateLock = InfallibleAllocPolicy::new_<Mutex>();
+
+ gBernoulli = (FastBernoulliTrial*)
+ InfallibleAllocPolicy::malloc_(sizeof(FastBernoulliTrial));
+ ResetBernoulli();
+
+ DMD_CREATE_TLS_INDEX(gTlsIndex);
+
+ {
+ AutoLockState lock;
+
+ gStackTraceTable = InfallibleAllocPolicy::new_<StackTraceTable>();
+ MOZ_ALWAYS_TRUE(gStackTraceTable->init(8192));
+
+ gLiveBlockTable = InfallibleAllocPolicy::new_<LiveBlockTable>();
+ MOZ_ALWAYS_TRUE(gLiveBlockTable->init(8192));
+
+ // Create this even if the mode isn't Cumulative (albeit with a small
+ // size), in case the mode is changed later on (as is done by SmokeDMD.cpp,
+ // for example).
+ gDeadBlockTable = InfallibleAllocPolicy::new_<DeadBlockTable>();
+ size_t tableSize = gOptions->IsCumulativeMode() ? 8192 : 4;
+ MOZ_ALWAYS_TRUE(gDeadBlockTable->init(tableSize));
+ }
+
+ gIsDMDInitialized = true;
+}
+
+//---------------------------------------------------------------------------
+// Block reporting and unreporting
+//---------------------------------------------------------------------------
+
+static void
+ReportHelper(const void* aPtr, bool aReportedOnAlloc)
+{
+ if (!gOptions->IsDarkMatterMode() || !aPtr) {
+ return;
+ }
+
+ Thread* t = Thread::Fetch();
+
+ AutoBlockIntercepts block(t);
+ AutoLockState lock;
+
+ if (LiveBlockTable::Ptr p = gLiveBlockTable->lookup(aPtr)) {
+ p->Report(t, aReportedOnAlloc);
+ } else {
+ // We have no record of the block. It must be a bogus pointer. This should
+ // be extremely rare because Report() is almost always called in
+ // conjunction with a malloc_size_of-style function.
+ }
+}
+
+void
+DMDFuncs::Report(const void* aPtr)
+{
+ ReportHelper(aPtr, /* onAlloc */ false);
+}
+
+void
+DMDFuncs::ReportOnAlloc(const void* aPtr)
+{
+ ReportHelper(aPtr, /* onAlloc */ true);
+}
+
+//---------------------------------------------------------------------------
+// DMD output
+//---------------------------------------------------------------------------
+
+// The version number of the output format. Increment this if you make
+// backwards-incompatible changes to the format. See DMD.h for the version
+// history.
+static const int kOutputVersionNumber = 5;
+
+// Note that, unlike most SizeOf* functions, this function does not take a
+// |mozilla::MallocSizeOf| argument. That's because those arguments are
+// primarily to aid DMD track heap blocks... but DMD deliberately doesn't track
+// heap blocks it allocated for itself!
+//
+// SizeOfInternal should be called while you're holding the state lock and
+// while intercepts are blocked; SizeOf acquires the lock and blocks
+// intercepts.
+
+static void
+SizeOfInternal(Sizes* aSizes)
+{
+ MOZ_ASSERT(gStateLock->IsLocked());
+ MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
+
+ aSizes->Clear();
+
+ StackTraceSet usedStackTraces;
+ GatherUsedStackTraces(usedStackTraces);
+
+ for (auto r = gStackTraceTable->all(); !r.empty(); r.popFront()) {
+ StackTrace* const& st = r.front();
+
+ if (usedStackTraces.has(st)) {
+ aSizes->mStackTracesUsed += MallocSizeOf(st);
+ } else {
+ aSizes->mStackTracesUnused += MallocSizeOf(st);
+ }
+ }
+
+ aSizes->mStackTraceTable =
+ gStackTraceTable->sizeOfIncludingThis(MallocSizeOf);
+
+ aSizes->mLiveBlockTable = gLiveBlockTable->sizeOfIncludingThis(MallocSizeOf);
+
+ aSizes->mDeadBlockTable = gDeadBlockTable->sizeOfIncludingThis(MallocSizeOf);
+}
+
+void
+DMDFuncs::SizeOf(Sizes* aSizes)
+{
+ aSizes->Clear();
+
+ AutoBlockIntercepts block(Thread::Fetch());
+ AutoLockState lock;
+ SizeOfInternal(aSizes);
+}
+
+void
+DMDFuncs::ClearReports()
+{
+ if (!gOptions->IsDarkMatterMode()) {
+ return;
+ }
+
+ AutoLockState lock;
+
+ // Unreport all blocks that were marked reported by a memory reporter. This
+ // excludes those that were reported on allocation, because they need to keep
+ // their reported marking.
+ for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
+ r.front().UnreportIfNotReportedOnAlloc();
+ }
+}
+
+class ToIdStringConverter final
+{
+public:
+ ToIdStringConverter()
+ : mNextId(0)
+ {
+ MOZ_ALWAYS_TRUE(mIdMap.init(512));
+ }
+
+ // Converts a pointer to a unique ID. Reuses the existing ID for the pointer
+ // if it's been seen before.
+ const char* ToIdString(const void* aPtr)
+ {
+ uint32_t id;
+ PointerIdMap::AddPtr p = mIdMap.lookupForAdd(aPtr);
+ if (!p) {
+ id = mNextId++;
+ MOZ_ALWAYS_TRUE(mIdMap.add(p, aPtr, id));
+ } else {
+ id = p->value();
+ }
+ return Base32(id);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+ {
+ return mIdMap.sizeOfExcludingThis(aMallocSizeOf);
+ }
+
+private:
+ // This function converts an integer to base-32. We use base-32 values for
+ // indexing into the traceTable and the frameTable, for the following reasons.
+ //
+ // - Base-32 gives more compact indices than base-16.
+ //
+ // - 32 is a power-of-two, which makes the necessary div/mod calculations
+ // fast.
+ //
+ // - We can (and do) choose non-numeric digits for base-32. When
+ // inspecting/debugging the JSON output, non-numeric indices are easier to
+ // search for than numeric indices.
+ //
+ char* Base32(uint32_t aN)
+ {
+ static const char digits[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef";
+
+ char* b = mIdBuf + kIdBufLen - 1;
+ *b = '\0';
+ do {
+ b--;
+ if (b == mIdBuf) {
+ MOZ_CRASH("Base32 buffer too small");
+ }
+ *b = digits[aN % 32];
+ aN /= 32;
+ } while (aN);
+
+ return b;
+ }
+
+ PointerIdMap mIdMap;
+ uint32_t mNextId;
+
+ // |mIdBuf| must have space for at least eight chars, which is the space
+ // needed to hold 'Dffffff' (including the terminating null char), which is
+ // the base-32 representation of 0xffffffff.
+ static const size_t kIdBufLen = 16;
+ char mIdBuf[kIdBufLen];
+};
+
+// Helper class for converting a pointer value to a string.
+class ToStringConverter
+{
+public:
+ const char* ToPtrString(const void* aPtr)
+ {
+ snprintf(kPtrBuf, sizeof(kPtrBuf) - 1, "%" PRIxPTR, (uintptr_t)aPtr);
+ return kPtrBuf;
+ }
+
+private:
+ char kPtrBuf[32];
+};
+
+static void
+WriteBlockContents(JSONWriter& aWriter, const LiveBlock& aBlock)
+{
+ size_t numWords = aBlock.ReqSize() / sizeof(uintptr_t*);
+ if (numWords == 0) {
+ return;
+ }
+
+ aWriter.StartArrayProperty("contents", aWriter.SingleLineStyle);
+ {
+ const uintptr_t** block = (const uintptr_t**)aBlock.Address();
+ ToStringConverter sc;
+ for (size_t i = 0; i < numWords; ++i) {
+ aWriter.StringElement(sc.ToPtrString(block[i]));
+ }
+ }
+ aWriter.EndArray();
+}
+
+static void
+AnalyzeImpl(UniquePtr<JSONWriteFunc> aWriter)
+{
+ // Some blocks may have been allocated while creating |aWriter|. Those blocks
+ // will be freed at the end of this function when |write| is destroyed. The
+ // allocations will have occurred while intercepts were not blocked, so the
+ // frees better be as well, otherwise we'll get assertion failures.
+ // Therefore, this declaration must precede the AutoBlockIntercepts
+ // declaration, to ensure that |write| is destroyed *after* intercepts are
+ // unblocked.
+ JSONWriter writer(Move(aWriter));
+
+ AutoBlockIntercepts block(Thread::Fetch());
+ AutoLockState lock;
+
+ // Allocate this on the heap instead of the stack because it's fairly large.
+ auto locService = InfallibleAllocPolicy::new_<CodeAddressService>();
+
+ StackTraceSet usedStackTraces;
+ MOZ_ALWAYS_TRUE(usedStackTraces.init(512));
+
+ PointerSet usedPcs;
+ MOZ_ALWAYS_TRUE(usedPcs.init(512));
+
+ size_t iscSize;
+
+ static int analysisCount = 1;
+ StatusMsg("Dump %d {\n", analysisCount++);
+
+ writer.Start();
+ {
+ writer.IntProperty("version", kOutputVersionNumber);
+
+ writer.StartObjectProperty("invocation");
+ {
+ const char* var = gOptions->DMDEnvVar();
+ if (var) {
+ writer.StringProperty("dmdEnvVar", var);
+ } else {
+ writer.NullProperty("dmdEnvVar");
+ }
+
+ writer.StringProperty("mode", gOptions->ModeString());
+ }
+ writer.EndObject();
+
+ StatusMsg(" Constructing the heap block list...\n");
+
+ ToIdStringConverter isc;
+ ToStringConverter sc;
+
+ writer.StartArrayProperty("blockList");
+ {
+ // Lambda that writes out a live block.
+ auto writeLiveBlock = [&](const LiveBlock& aB, size_t aNum) {
+ aB.AddStackTracesToTable(usedStackTraces);
+
+ MOZ_ASSERT_IF(gOptions->IsScanMode(), aNum == 1);
+
+ writer.StartObjectElement(writer.SingleLineStyle);
+ {
+ if (gOptions->IsScanMode()) {
+ writer.StringProperty("addr", sc.ToPtrString(aB.Address()));
+ WriteBlockContents(writer, aB);
+ }
+ writer.IntProperty("req", aB.ReqSize());
+ if (aB.SlopSize() > 0) {
+ writer.IntProperty("slop", aB.SlopSize());
+ }
+
+ if (aB.AllocStackTrace()) {
+ writer.StringProperty("alloc",
+ isc.ToIdString(aB.AllocStackTrace()));
+ }
+
+ if (gOptions->IsDarkMatterMode() && aB.NumReports() > 0) {
+ writer.StartArrayProperty("reps");
+ {
+ if (aB.ReportStackTrace1()) {
+ writer.StringElement(isc.ToIdString(aB.ReportStackTrace1()));
+ }
+ if (aB.ReportStackTrace2()) {
+ writer.StringElement(isc.ToIdString(aB.ReportStackTrace2()));
+ }
+ }
+ writer.EndArray();
+ }
+
+ if (aNum > 1) {
+ writer.IntProperty("num", aNum);
+ }
+ }
+ writer.EndObject();
+ };
+
+ // Live blocks.
+ if (!gOptions->IsScanMode()) {
+ // At this point we typically have many LiveBlocks that differ only in
+ // their address. Aggregate them to reduce the size of the output file.
+ AggregatedLiveBlockTable agg;
+ MOZ_ALWAYS_TRUE(agg.init(8192));
+ for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
+ const LiveBlock& b = r.front();
+ b.AddStackTracesToTable(usedStackTraces);
+
+ if (AggregatedLiveBlockTable::AddPtr p = agg.lookupForAdd(&b)) {
+ p->value() += 1;
+ } else {
+ MOZ_ALWAYS_TRUE(agg.add(p, &b, 1));
+ }
+ }
+
+ // Now iterate over the aggregated table.
+ for (auto r = agg.all(); !r.empty(); r.popFront()) {
+ const LiveBlock& b = *r.front().key();
+ size_t num = r.front().value();
+ writeLiveBlock(b, num);
+ }
+
+ } else {
+ // In scan mode we cannot aggregate because we print each live block's
+ // address and contents.
+ for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
+ const LiveBlock& b = r.front();
+ b.AddStackTracesToTable(usedStackTraces);
+
+ writeLiveBlock(b, 1);
+ }
+ }
+
+ // Dead blocks.
+ for (auto r = gDeadBlockTable->all(); !r.empty(); r.popFront()) {
+ const DeadBlock& b = r.front().key();
+ b.AddStackTracesToTable(usedStackTraces);
+
+ size_t num = r.front().value();
+ MOZ_ASSERT(num > 0);
+
+ writer.StartObjectElement(writer.SingleLineStyle);
+ {
+ writer.IntProperty("req", b.ReqSize());
+ if (b.SlopSize() > 0) {
+ writer.IntProperty("slop", b.SlopSize());
+ }
+ if (b.AllocStackTrace()) {
+ writer.StringProperty("alloc", isc.ToIdString(b.AllocStackTrace()));
+ }
+
+ if (num > 1) {
+ writer.IntProperty("num", num);
+ }
+ }
+ writer.EndObject();
+ }
+ }
+ writer.EndArray();
+
+ StatusMsg(" Constructing the stack trace table...\n");
+
+ writer.StartObjectProperty("traceTable");
+ {
+ for (auto r = usedStackTraces.all(); !r.empty(); r.popFront()) {
+ const StackTrace* const st = r.front();
+ writer.StartArrayProperty(isc.ToIdString(st), writer.SingleLineStyle);
+ {
+ for (uint32_t i = 0; i < st->Length(); i++) {
+ const void* pc = st->Pc(i);
+ writer.StringElement(isc.ToIdString(pc));
+ MOZ_ALWAYS_TRUE(usedPcs.put(pc));
+ }
+ }
+ writer.EndArray();
+ }
+ }
+ writer.EndObject();
+
+ StatusMsg(" Constructing the stack frame table...\n");
+
+ writer.StartObjectProperty("frameTable");
+ {
+ static const size_t locBufLen = 1024;
+ char locBuf[locBufLen];
+
+ for (PointerSet::Enum e(usedPcs); !e.empty(); e.popFront()) {
+ const void* const pc = e.front();
+
+ // Use 0 for the frame number. See the JSON format description comment
+ // in DMD.h to understand why.
+ locService->GetLocation(0, pc, locBuf, locBufLen);
+ writer.StringProperty(isc.ToIdString(pc), locBuf);
+ }
+ }
+ writer.EndObject();
+
+ iscSize = isc.sizeOfExcludingThis(MallocSizeOf);
+ }
+ writer.End();
+
+ if (gOptions->ShowDumpStats()) {
+ Sizes sizes;
+ SizeOfInternal(&sizes);
+
+ static const size_t kBufLen = 64;
+ char buf1[kBufLen];
+ char buf2[kBufLen];
+ char buf3[kBufLen];
+
+ StatusMsg(" Execution measurements {\n");
+
+ StatusMsg(" Data structures that persist after Dump() ends {\n");
+
+ StatusMsg(" Used stack traces: %10s bytes\n",
+ Show(sizes.mStackTracesUsed, buf1, kBufLen));
+
+ StatusMsg(" Unused stack traces: %10s bytes\n",
+ Show(sizes.mStackTracesUnused, buf1, kBufLen));
+
+ StatusMsg(" Stack trace table: %10s bytes (%s entries, %s used)\n",
+ Show(sizes.mStackTraceTable, buf1, kBufLen),
+ Show(gStackTraceTable->capacity(), buf2, kBufLen),
+ Show(gStackTraceTable->count(), buf3, kBufLen));
+
+ StatusMsg(" Live block table: %10s bytes (%s entries, %s used)\n",
+ Show(sizes.mLiveBlockTable, buf1, kBufLen),
+ Show(gLiveBlockTable->capacity(), buf2, kBufLen),
+ Show(gLiveBlockTable->count(), buf3, kBufLen));
+
+ StatusMsg(" Dead block table: %10s bytes (%s entries, %s used)\n",
+ Show(sizes.mDeadBlockTable, buf1, kBufLen),
+ Show(gDeadBlockTable->capacity(), buf2, kBufLen),
+ Show(gDeadBlockTable->count(), buf3, kBufLen));
+
+ StatusMsg(" }\n");
+ StatusMsg(" Data structures that are destroyed after Dump() ends {\n");
+
+ StatusMsg(" Location service: %10s bytes\n",
+ Show(locService->SizeOfIncludingThis(MallocSizeOf), buf1, kBufLen));
+ StatusMsg(" Used stack traces set: %10s bytes\n",
+ Show(usedStackTraces.sizeOfExcludingThis(MallocSizeOf), buf1, kBufLen));
+ StatusMsg(" Used PCs set: %10s bytes\n",
+ Show(usedPcs.sizeOfExcludingThis(MallocSizeOf), buf1, kBufLen));
+ StatusMsg(" Pointer ID map: %10s bytes\n",
+ Show(iscSize, buf1, kBufLen));
+
+ StatusMsg(" }\n");
+ StatusMsg(" Counts {\n");
+
+ size_t hits = locService->NumCacheHits();
+ size_t misses = locService->NumCacheMisses();
+ size_t requests = hits + misses;
+ StatusMsg(" Location service: %10s requests\n",
+ Show(requests, buf1, kBufLen));
+
+ size_t count = locService->CacheCount();
+ size_t capacity = locService->CacheCapacity();
+ StatusMsg(" Location service cache: "
+ "%4.1f%% hit rate, %.1f%% occupancy at end\n",
+ Percent(hits, requests), Percent(count, capacity));
+
+ StatusMsg(" }\n");
+ StatusMsg(" }\n");
+ }
+
+ InfallibleAllocPolicy::delete_(locService);
+
+ StatusMsg("}\n");
+}
+
+void
+DMDFuncs::Analyze(UniquePtr<JSONWriteFunc> aWriter)
+{
+ AnalyzeImpl(Move(aWriter));
+ ClearReports();
+}
+
+//---------------------------------------------------------------------------
+// Testing
+//---------------------------------------------------------------------------
+
+void
+DMDFuncs::ResetEverything(const char* aOptions)
+{
+ AutoLockState lock;
+
+ // Reset options.
+ InfallibleAllocPolicy::delete_(gOptions);
+ gOptions = InfallibleAllocPolicy::new_<Options>(aOptions);
+
+ // Clear all existing blocks.
+ gLiveBlockTable->clear();
+ gDeadBlockTable->clear();
+
+ // Reset gBernoulli to a deterministic state. (Its current state depends on
+ // all previous trials.)
+ ResetBernoulli();
+}
+
+} // namespace dmd
+} // namespace mozilla
diff --git a/memory/replace/dmd/DMD.h b/memory/replace/dmd/DMD.h
new file mode 100644
index 000000000..ca3ccab16
--- /dev/null
+++ b/memory/replace/dmd/DMD.h
@@ -0,0 +1,310 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DMD_h___
+#define DMD_h___
+
+#include <string.h>
+#include <stdarg.h>
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Move.h"
+#include "mozilla/Types.h"
+#include "mozilla/UniquePtr.h"
+
+#include "replace_malloc_bridge.h"
+
+namespace mozilla {
+
+class JSONWriteFunc;
+
+namespace dmd {
+
+struct Sizes
+{
+ size_t mStackTracesUsed;
+ size_t mStackTracesUnused;
+ size_t mStackTraceTable;
+ size_t mLiveBlockTable;
+ size_t mDeadBlockTable;
+
+ Sizes() { Clear(); }
+ void Clear() { memset(this, 0, sizeof(Sizes)); }
+};
+
+// See further below for a description of each method. The DMDFuncs class
+// should contain a virtual method for each of them (except IsRunning,
+// which can be inferred from the DMDFuncs singleton existing).
+struct DMDFuncs
+{
+ virtual void Report(const void*);
+
+ virtual void ReportOnAlloc(const void*);
+
+ virtual void ClearReports();
+
+ virtual void Analyze(UniquePtr<JSONWriteFunc>);
+
+ virtual void SizeOf(Sizes*);
+
+ virtual void StatusMsg(const char*, va_list);
+
+ virtual void ResetEverything(const char*);
+
+#ifndef REPLACE_MALLOC_IMPL
+ // We deliberately don't use ReplaceMalloc::GetDMDFuncs here, because if we
+ // did, the following would happen.
+ // - The code footprint of each call to Get() larger as GetDMDFuncs ends
+ // up inlined.
+ // - When no replace-malloc library is loaded, the number of instructions
+ // executed is equivalent, but don't necessarily fit in the same cache
+ // line.
+ // - When a non-DMD replace-malloc library is loaded, the overhead is
+ // higher because there is first a check for the replace malloc bridge
+ // and then for the DMDFuncs singleton.
+ // Initializing the DMDFuncs singleton on the first access makes the
+ // overhead even worse. Either Get() is inlined and massive, or it isn't
+ // and a simple value check becomes a function call.
+ static DMDFuncs* Get() { return sSingleton.Get(); }
+
+private:
+ // Wrapper class keeping a pointer to the DMD functions. It is statically
+ // initialized because it needs to be set early enough.
+ // Debug builds also check that it's never accessed before the static
+ // initialization actually occured, which could be the case if some other
+ // static initializer ended up calling into DMD.
+ class Singleton
+ {
+ public:
+ Singleton()
+ : mValue(ReplaceMalloc::GetDMDFuncs())
+#ifdef DEBUG
+ , mInitialized(true)
+#endif
+ {}
+
+ DMDFuncs* Get()
+ {
+ MOZ_ASSERT(mInitialized);
+ return mValue;
+ }
+
+ private:
+ DMDFuncs* mValue;
+#ifdef DEBUG
+ bool mInitialized;
+#endif
+ };
+
+ // This singleton pointer must be defined on the program side. In Gecko,
+ // this is done in xpcom/base/nsMemoryInfoDumper.cpp.
+ static /* DMDFuncs:: */Singleton sSingleton;
+#endif
+};
+
+#ifndef REPLACE_MALLOC_IMPL
+// Mark a heap block as reported by a memory reporter.
+inline void
+Report(const void* aPtr)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->Report(aPtr);
+ }
+}
+
+// Mark a heap block as reported immediately on allocation.
+inline void
+ReportOnAlloc(const void* aPtr)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->ReportOnAlloc(aPtr);
+ }
+}
+
+// Clears existing reportedness data from any prior runs of the memory
+// reporters. The following sequence should be used.
+// - ClearReports()
+// - run the memory reporters
+// - Analyze()
+// This sequence avoids spurious twice-reported warnings.
+inline void
+ClearReports()
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->ClearReports();
+ }
+}
+
+// Determines which heap blocks have been reported, and dumps JSON output
+// (via |aWriter|) describing the heap.
+//
+// The following sample output contains comments that explain the format and
+// design choices. The output files can be quite large, so a number of
+// decisions were made to minimize size, such as using short property names and
+// omitting properties whenever possible.
+//
+// {
+// // The version number of the format, which will be incremented each time
+// // backwards-incompatible changes are made. A mandatory integer.
+// //
+// // Version history:
+// // - 1: Bug 1044709
+// // - 2: Bug 1094552
+// // - 3: Bug 1100851
+// // - 4: Bug 1121830
+// // - 5: Bug 1253512
+// "version": 5,
+//
+// // Information about how DMD was invoked. A mandatory object.
+// "invocation": {
+// // The contents of the $DMD environment variable. A string, or |null| if
+// // $DMD is undefined.
+// "dmdEnvVar": "--mode=dark-matter",
+//
+// // The profiling mode. A mandatory string taking one of the following
+// // values: "live", "dark-matter", "cumulative", "scan".
+// "mode": "dark-matter",
+// },
+//
+// // Details of all analyzed heap blocks. A mandatory array.
+// "blockList": [
+// // An example of a heap block.
+// {
+// // Requested size, in bytes. This is a mandatory integer.
+// "req": 3584,
+//
+// // Requested slop size, in bytes. This is mandatory if it is non-zero,
+// // but omitted otherwise.
+// "slop": 512,
+//
+// // The stack trace at which the block was allocated. An optional
+// // string that indexes into the "traceTable" object. If omitted, no
+// // allocation stack trace was recorded for the block.
+// "alloc": "A",
+//
+// // One or more stack traces at which this heap block was reported by a
+// // memory reporter. An optional array that will only be present in
+// // "dark-matter" mode. The elements are strings that index into
+// // the "traceTable" object.
+// "reps": ["B"]
+//
+// // The number of heap blocks with exactly the above properties. This
+// // is mandatory if it is greater than one, but omitted otherwise.
+// // (Blocks with identical properties don't have to be aggregated via
+// // this property, but it can greatly reduce output file size.)
+// "num": 5,
+//
+// // The address of the block. This is mandatory in "scan" mode, but
+// // omitted otherwise.
+// "addr": "4e4e4e4e",
+//
+// // The contents of the block, read one word at a time. This is
+// // mandatory in "scan" mode for blocks at least one word long, but
+// // omitted otherwise.
+// "contents": ["0", "6", "7f7f7f7f", "0"]
+// }
+// ],
+//
+// // The stack traces referenced by elements of the "blockList" array. This
+// // could be an array, but making it an object makes it easier to see
+// // which stacks correspond to which references in the "blockList" array.
+// "traceTable": {
+// // Each property corresponds to a stack trace mentioned in the "blocks"
+// // object. Each element is an index into the "frameTable" object.
+// "A": ["D", "E"],
+// "B": ["F", "G"]
+// },
+//
+// // The stack frames referenced by the "traceTable" object. The
+// // descriptions can be quite long, so they are stored separately from the
+// // "traceTable" object so that each one only has to be written once.
+// // This could also be an array, but again, making it an object makes it
+// // easier to see which frames correspond to which references in the
+// // "traceTable" object.
+// "frameTable": {
+// // Each property key is a frame key mentioned in the "traceTable" object.
+// // Each property value is a string containing a frame description. Each
+// // frame description must be in a format recognized by the stack-fixing
+// // scripts (e.g. fix_linux_stack.py), which require a frame number at
+// // the start. Because each stack frame description in this table can
+// // be shared between multiple stack traces, we use a dummy value of
+// // #00. The proper frame number can be reconstructed later by scripts
+// // that output stack traces in a conventional non-shared format.
+// "D": "#00: foo (Foo.cpp:123)",
+// "E": "#00: bar (Bar.cpp:234)",
+// "F": "#00: baz (Baz.cpp:345)",
+// "G": "#00: quux (Quux.cpp:456)"
+// }
+// }
+//
+// Implementation note: normally, this function wouldn't be templated, but in
+// that case, the function is compiled, which makes the destructor for the
+// UniquePtr fire up, and that needs JSONWriteFunc to be fully defined. That,
+// in turn, requires to include JSONWriter.h, which includes
+// double-conversion.h, which ends up breaking various things built with
+// -Werror for various reasons.
+//
+template <typename JSONWriteFunc>
+inline void
+Analyze(UniquePtr<JSONWriteFunc> aWriteFunc)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->Analyze(Move(aWriteFunc));
+ }
+}
+
+// Gets the size of various data structures. Used to implement a memory
+// reporter for DMD.
+inline void
+SizeOf(Sizes* aSizes)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->SizeOf(aSizes);
+ }
+}
+
+// Prints a status message prefixed with "DMD[<pid>]". Use sparingly.
+inline void
+StatusMsg(const char* aFmt, ...)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ va_list ap;
+ va_start(ap, aFmt);
+ funcs->StatusMsg(aFmt, ap);
+ va_end(ap);
+ }
+}
+
+// Indicates whether or not DMD is running.
+inline bool
+IsRunning()
+{
+ return !!DMDFuncs::Get();
+}
+
+// Resets all DMD options and then sets new ones according to those specified
+// in |aOptions|. Also clears all recorded data about allocations. Only used
+// for testing purposes.
+inline void
+ResetEverything(const char* aOptions)
+{
+ DMDFuncs* funcs = DMDFuncs::Get();
+ if (funcs) {
+ funcs->ResetEverything(aOptions);
+ }
+}
+#endif
+
+} // namespace dmd
+} // namespace mozilla
+
+#endif /* DMD_h___ */
diff --git a/memory/replace/dmd/README b/memory/replace/dmd/README
new file mode 100644
index 000000000..eeca7047e
--- /dev/null
+++ b/memory/replace/dmd/README
@@ -0,0 +1,2 @@
+This is DMD. See https://wiki.mozilla.org/Performance/MemShrink/DMD for
+details on how to use it.
diff --git a/memory/replace/dmd/block_analyzer.py b/memory/replace/dmd/block_analyzer.py
new file mode 100644
index 000000000..cc0da1e11
--- /dev/null
+++ b/memory/replace/dmd/block_analyzer.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# From a scan mode DMD log, extract some information about a
+# particular block, such as its allocation stack or which other blocks
+# contain pointers to it. This can be useful when investigating leaks
+# caused by unknown references to refcounted objects.
+
+import json
+import gzip
+import sys
+import argparse
+import re
+
+
+# The DMD output version this script handles.
+outputVersion = 5
+
+# If --ignore-alloc-fns is specified, stack frames containing functions that
+# match these strings will be removed from the *start* of stack traces. (Once
+# we hit a non-matching frame, any subsequent frames won't be removed even if
+# they do match.)
+allocatorFns = [
+ 'malloc (',
+ 'replace_malloc',
+ 'replace_calloc',
+ 'replace_realloc',
+ 'replace_memalign',
+ 'replace_posix_memalign',
+ 'malloc_zone_malloc',
+ 'moz_xmalloc',
+ 'moz_xcalloc',
+ 'moz_xrealloc',
+ 'operator new(',
+ 'operator new[](',
+ 'g_malloc',
+ 'g_slice_alloc',
+ 'callocCanGC',
+ 'reallocCanGC',
+ 'vpx_malloc',
+ 'vpx_calloc',
+ 'vpx_realloc',
+ 'vpx_memalign',
+ 'js_malloc',
+ 'js_calloc',
+ 'js_realloc',
+ 'pod_malloc',
+ 'pod_calloc',
+ 'pod_realloc',
+ 'nsTArrayInfallibleAllocator::Malloc',
+ # This one necessary to fully filter some sequences of allocation functions
+ # that happen in practice. Note that ??? entries that follow non-allocation
+ # functions won't be stripped, as explained above.
+ '???',
+]
+
+####
+
+# Command line arguments
+
+def range_1_24(string):
+ value = int(string)
+ if value < 1 or value > 24:
+ msg = '{:s} is not in the range 1..24'.format(string)
+ raise argparse.ArgumentTypeError(msg)
+ return value
+
+parser = argparse.ArgumentParser(description='Analyze the heap graph to find out things about an object. \
+By default this prints out information about blocks that point to the given block.')
+
+parser.add_argument('dmd_log_file_name',
+ help='clamped DMD log file name')
+
+parser.add_argument('block',
+ help='address of the block of interest')
+
+parser.add_argument('--info', dest='info', action='store_true',
+ default=False,
+ help='Print out information about the block.')
+
+parser.add_argument('-sfl', '--max-stack-frame-length', type=int,
+ default=150,
+ help='Maximum number of characters to print from each stack frame')
+
+parser.add_argument('-a', '--ignore-alloc-fns', action='store_true',
+ help='ignore allocation functions at the start of traces')
+
+parser.add_argument('-f', '--max-frames', type=range_1_24,
+ help='maximum number of frames to consider in each trace')
+
+parser.add_argument('-c', '--chain-reports', action='store_true',
+ help='if only one block is found to hold onto the object, report the next one, too')
+
+
+####
+
+
+class BlockData:
+ def __init__(self, json_block):
+ self.addr = json_block['addr']
+
+ if 'contents' in json_block:
+ contents = json_block['contents']
+ else:
+ contents = []
+ self.contents = []
+ for c in contents:
+ self.contents.append(int(c, 16))
+
+ self.req_size = json_block['req']
+
+ self.alloc_stack = json_block['alloc']
+
+
+def print_trace_segment(args, stacks, block):
+ (traceTable, frameTable) = stacks
+
+ for l in traceTable[block.alloc_stack]:
+ # The 5: is to remove the bogus leading "#00: " from the stack frame.
+ print ' ', frameTable[l][5:args.max_stack_frame_length]
+
+
+def show_referrers(args, blocks, stacks, block):
+ visited = set([])
+
+ anyFound = False
+
+ while True:
+ referrers = {}
+
+ for b, data in blocks.iteritems():
+ which_edge = 0
+ for e in data.contents:
+ if e == block:
+ # 8 is the number of bytes per word on a 64-bit system.
+ # XXX This means that this output will be wrong for logs from 32-bit systems!
+ referrers.setdefault(b, []).append(8 * which_edge)
+ anyFound = True
+ which_edge += 1
+
+ for r in referrers:
+ sys.stdout.write('0x{} size = {} bytes'.format(blocks[r].addr, blocks[r].req_size))
+ plural = 's' if len(referrers[r]) > 1 else ''
+ sys.stdout.write(' at byte offset' + plural + ' ' + (', '.join(str(x) for x in referrers[r])))
+ print
+ print_trace_segment(args, stacks, blocks[r])
+ print
+
+ if args.chain_reports:
+ if len(referrers) == 0:
+ sys.stdout.write('Found no more referrers.\n')
+ break
+ if len(referrers) > 1:
+ sys.stdout.write('Found too many referrers.\n')
+ break
+
+ sys.stdout.write('Chaining to next referrer.\n\n')
+ for r in referrers:
+ block = r
+ if block in visited:
+ sys.stdout.write('Found a loop.\n')
+ break
+ visited.add(block)
+ else:
+ break
+
+ if not anyFound:
+ print 'No referrers found.'
+
+
+def show_block_info(args, blocks, stacks, block):
+ b = blocks[block]
+ sys.stdout.write('block: 0x{}\n'.format(b.addr))
+ sys.stdout.write('requested size: {} bytes\n'.format(b.req_size))
+ sys.stdout.write('\n')
+ sys.stdout.write('block contents: ')
+ for c in b.contents:
+ v = '0' if c == 0 else blocks[c].addr
+ sys.stdout.write('0x{} '.format(v))
+ sys.stdout.write('\n\n')
+ sys.stdout.write('allocation stack:\n')
+ print_trace_segment(args, stacks, b)
+ return
+
+
+def cleanupTraceTable(args, frameTable, traceTable):
+ # Remove allocation functions at the start of traces.
+ if args.ignore_alloc_fns:
+ # Build a regexp that matches every function in allocatorFns.
+ escapedAllocatorFns = map(re.escape, allocatorFns)
+ fn_re = re.compile('|'.join(escapedAllocatorFns))
+
+ # Remove allocator fns from each stack trace.
+ for traceKey, frameKeys in traceTable.items():
+ numSkippedFrames = 0
+ for frameKey in frameKeys:
+ frameDesc = frameTable[frameKey]
+ if re.search(fn_re, frameDesc):
+ numSkippedFrames += 1
+ else:
+ break
+ if numSkippedFrames > 0:
+ traceTable[traceKey] = frameKeys[numSkippedFrames:]
+
+ # Trim the number of frames.
+ for traceKey, frameKeys in traceTable.items():
+ if len(frameKeys) > args.max_frames:
+ traceTable[traceKey] = frameKeys[:args.max_frames]
+
+
+def loadGraph(options):
+ # Handle gzipped input if necessary.
+ isZipped = options.dmd_log_file_name.endswith('.gz')
+ opener = gzip.open if isZipped else open
+
+ with opener(options.dmd_log_file_name, 'rb') as f:
+ j = json.load(f)
+
+ if j['version'] != outputVersion:
+ raise Exception("'version' property isn't '{:d}'".format(outputVersion))
+
+ invocation = j['invocation']
+
+ block_list = j['blockList']
+ blocks = {}
+
+ for json_block in block_list:
+ blocks[int(json_block['addr'], 16)] = BlockData(json_block)
+
+ traceTable = j['traceTable']
+ frameTable = j['frameTable']
+
+ cleanupTraceTable(options, frameTable, traceTable)
+
+ return (blocks, (traceTable, frameTable))
+
+
+def analyzeLogs():
+ options = parser.parse_args()
+
+ (blocks, stacks) = loadGraph(options)
+
+ block = int(options.block, 16)
+
+ if not block in blocks:
+ print 'Object', block, 'not found in traces.'
+ print 'It could still be the target of some nodes.'
+ return
+
+ if options.info:
+ show_block_info(options, blocks, stacks, block)
+ return
+
+ show_referrers(options, blocks, stacks, block)
+
+
+if __name__ == "__main__":
+ analyzeLogs()
diff --git a/memory/replace/dmd/dmd.py b/memory/replace/dmd/dmd.py
new file mode 100755
index 000000000..d6b09f756
--- /dev/null
+++ b/memory/replace/dmd/dmd.py
@@ -0,0 +1,890 @@
+#! /usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''This script analyzes a JSON file emitted by DMD.'''
+
+from __future__ import print_function, division
+
+import argparse
+import collections
+import gzip
+import json
+import os
+import platform
+import re
+import shutil
+import sys
+import tempfile
+from bisect import bisect_right
+
+# The DMD output version this script handles.
+outputVersion = 5
+
+# If --ignore-alloc-fns is specified, stack frames containing functions that
+# match these strings will be removed from the *start* of stack traces. (Once
+# we hit a non-matching frame, any subsequent frames won't be removed even if
+# they do match.)
+allocatorFns = [
+ # Matches malloc, replace_malloc, moz_xmalloc, vpx_malloc, js_malloc, pod_malloc, malloc_zone_*, g_malloc.
+ 'malloc',
+ # Matches calloc, replace_calloc, moz_xcalloc, vpx_calloc, js_calloc, pod_calloc, malloc_zone_calloc, pod_callocCanGC.
+ 'calloc',
+ # Matches realloc, replace_realloc, moz_xrealloc, vpx_realloc, js_realloc, pod_realloc, pod_reallocCanGC.
+ 'realloc',
+ # Matches memalign, posix_memalign, replace_memalign, replace_posix_memalign, moz_xmemalign, moz_xposix_memalign, vpx_memalign, malloc_zone_memalign.
+ 'memalign',
+ 'operator new(',
+ 'operator new[](',
+ 'g_slice_alloc',
+ # This one necessary to fully filter some sequences of allocation functions
+ # that happen in practice. Note that ??? entries that follow non-allocation
+ # functions won't be stripped, as explained above.
+ '???',
+]
+
+class Record(object):
+ '''A record is an aggregation of heap blocks that have identical stack
+ traces. It can also be used to represent the difference between two
+ records.'''
+
+ def __init__(self):
+ self.numBlocks = 0
+ self.reqSize = 0
+ self.slopSize = 0
+ self.usableSize = 0
+ self.allocatedAtDesc = None
+ self.reportedAtDescs = []
+ self.usableSizes = collections.defaultdict(int)
+
+ def isZero(self, args):
+ return self.numBlocks == 0 and \
+ self.reqSize == 0 and \
+ self.slopSize == 0 and \
+ self.usableSize == 0 and \
+ len(self.usableSizes) == 0
+
+ def negate(self):
+ self.numBlocks = -self.numBlocks
+ self.reqSize = -self.reqSize
+ self.slopSize = -self.slopSize
+ self.usableSize = -self.usableSize
+
+ negatedUsableSizes = collections.defaultdict(int)
+ for usableSize, count in self.usableSizes.items():
+ negatedUsableSizes[-usableSize] = count
+ self.usableSizes = negatedUsableSizes
+
+ def subtract(self, r):
+ # We should only be calling this on records with matching stack traces.
+ # Check this.
+ assert self.allocatedAtDesc == r.allocatedAtDesc
+ assert self.reportedAtDescs == r.reportedAtDescs
+
+ self.numBlocks -= r.numBlocks
+ self.reqSize -= r.reqSize
+ self.slopSize -= r.slopSize
+ self.usableSize -= r.usableSize
+
+ usableSizes1 = self.usableSizes
+ usableSizes2 = r.usableSizes
+ usableSizes3 = collections.defaultdict(int)
+ for usableSize in usableSizes1:
+ counts1 = usableSizes1[usableSize]
+ if usableSize in usableSizes2:
+ counts2 = usableSizes2[usableSize]
+ del usableSizes2[usableSize]
+ counts3 = counts1 - counts2
+ if counts3 != 0:
+ if counts3 < 0:
+ usableSize = -usableSize
+ counts3 = -counts3
+ usableSizes3[usableSize] = counts3
+ else:
+ usableSizes3[usableSize] = counts1
+
+ for usableSize in usableSizes2:
+ usableSizes3[-usableSize] = usableSizes2[usableSize]
+
+ self.usableSizes = usableSizes3
+
+ @staticmethod
+ def cmpByUsableSize(r1, r2):
+ # Sort by usable size, then by req size.
+ return cmp(abs(r1.usableSize), abs(r2.usableSize)) or \
+ Record.cmpByReqSize(r1, r2)
+
+ @staticmethod
+ def cmpByReqSize(r1, r2):
+ # Sort by req size.
+ return cmp(abs(r1.reqSize), abs(r2.reqSize))
+
+ @staticmethod
+ def cmpBySlopSize(r1, r2):
+ # Sort by slop size.
+ return cmp(abs(r1.slopSize), abs(r2.slopSize))
+
+ @staticmethod
+ def cmpByNumBlocks(r1, r2):
+ # Sort by block counts, then by usable size.
+ return cmp(abs(r1.numBlocks), abs(r2.numBlocks)) or \
+ Record.cmpByUsableSize(r1, r2)
+
+
+sortByChoices = {
+ 'usable': Record.cmpByUsableSize, # the default
+ 'req': Record.cmpByReqSize,
+ 'slop': Record.cmpBySlopSize,
+ 'num-blocks': Record.cmpByNumBlocks,
+}
+
+
+def parseCommandLine():
+ # 24 is the maximum number of frames that DMD will produce.
+ def range_1_24(string):
+ value = int(string)
+ if value < 1 or value > 24:
+ msg = '{:s} is not in the range 1..24'.format(string)
+ raise argparse.ArgumentTypeError(msg)
+ return value
+
+ description = '''
+Analyze heap data produced by DMD.
+If one file is specified, analyze it; if two files are specified, analyze the
+difference.
+Input files can be gzipped.
+Write to stdout unless -o/--output is specified.
+Stack traces are fixed to show function names, filenames and line numbers
+unless --no-fix-stacks is specified; stack fixing modifies the original file
+and may take some time. If specified, the BREAKPAD_SYMBOLS_PATH environment
+variable is used to find breakpad symbols for stack fixing.
+'''
+ p = argparse.ArgumentParser(description=description)
+
+ p.add_argument('-o', '--output', type=argparse.FileType('w'),
+ help='output file; stdout if unspecified')
+
+ p.add_argument('-f', '--max-frames', type=range_1_24,
+ help='maximum number of frames to consider in each trace')
+
+ p.add_argument('-s', '--sort-by', choices=sortByChoices.keys(),
+ default='usable',
+ help='sort the records by a particular metric')
+
+ p.add_argument('-a', '--ignore-alloc-fns', action='store_true',
+ help='ignore allocation functions at the start of traces')
+
+ p.add_argument('--no-fix-stacks', action='store_true',
+ help='do not fix stacks')
+
+ p.add_argument('--clamp-contents', action='store_true',
+ help='for a scan mode log, clamp addresses to the start of live blocks, or zero if not in one')
+
+ p.add_argument('--print-clamp-stats', action='store_true',
+ help='print information about the results of pointer clamping; mostly useful for debugging clamping')
+
+ p.add_argument('--filter-stacks-for-testing', action='store_true',
+ help='filter stack traces; only useful for testing purposes')
+
+ p.add_argument('input_file',
+ help='a file produced by DMD')
+
+ p.add_argument('input_file2', nargs='?',
+ help='a file produced by DMD; if present, it is diff\'d with input_file')
+
+ return p.parse_args(sys.argv[1:])
+
+
+# Fix stacks if necessary: first write the output to a tempfile, then replace
+# the original file with it.
+def fixStackTraces(inputFilename, isZipped, opener):
+ # This append() call is needed to make the import statements work when this
+ # script is installed as a symlink.
+ sys.path.append(os.path.dirname(__file__))
+
+ bpsyms = os.environ.get('BREAKPAD_SYMBOLS_PATH', None)
+ sysname = platform.system()
+ if bpsyms and os.path.exists(bpsyms):
+ import fix_stack_using_bpsyms as fixModule
+ fix = lambda line: fixModule.fixSymbols(line, bpsyms)
+ elif sysname == 'Linux':
+ import fix_linux_stack as fixModule
+ fix = lambda line: fixModule.fixSymbols(line)
+ elif sysname == 'Darwin':
+ import fix_macosx_stack as fixModule
+ fix = lambda line: fixModule.fixSymbols(line)
+ else:
+ fix = None # there is no fix script for Windows
+
+ if fix:
+ # Fix stacks, writing output to a temporary file, and then
+ # overwrite the original file.
+ tmpFile = tempfile.NamedTemporaryFile(delete=False)
+
+ # If the input is gzipped, then the output (written initially to
+ # |tmpFile|) should be gzipped as well.
+ #
+ # And we want to set its pre-gzipped filename to '' rather than the
+ # name of the temporary file, so that programs like the Unix 'file'
+ # utility don't say that it was called 'tmp6ozTxE' (or something like
+ # that) before it was zipped. So that explains the |filename=''|
+ # parameter.
+ #
+ # But setting the filename like that clobbers |tmpFile.name|, so we
+ # must get that now in order to move |tmpFile| at the end.
+ tmpFilename = tmpFile.name
+ if isZipped:
+ tmpFile = gzip.GzipFile(filename='', fileobj=tmpFile)
+
+ with opener(inputFilename, 'rb') as inputFile:
+ for line in inputFile:
+ tmpFile.write(fix(line))
+
+ tmpFile.close()
+
+ shutil.move(tmpFilename, inputFilename)
+
+
+def getDigestFromFile(args, inputFile):
+ # Handle gzipped input if necessary.
+ isZipped = inputFile.endswith('.gz')
+ opener = gzip.open if isZipped else open
+
+ # Fix stack traces unless otherwise instructed.
+ if not args.no_fix_stacks:
+ fixStackTraces(inputFile, isZipped, opener)
+
+ if args.clamp_contents:
+ clampBlockList(args, inputFile, isZipped, opener)
+
+ with opener(inputFile, 'rb') as f:
+ j = json.load(f)
+
+ if j['version'] != outputVersion:
+ raise Exception("'version' property isn't '{:d}'".format(outputVersion))
+
+ # Extract the main parts of the JSON object.
+ invocation = j['invocation']
+ dmdEnvVar = invocation['dmdEnvVar']
+ mode = invocation['mode']
+ blockList = j['blockList']
+ traceTable = j['traceTable']
+ frameTable = j['frameTable']
+
+ # Insert the necessary entries for unrecorded stack traces. Note that 'ut'
+ # and 'uf' will not overlap with any keys produced by DMD's
+ # ToIdStringConverter::Base32() function.
+ unrecordedTraceID = 'ut'
+ unrecordedFrameID = 'uf'
+ traceTable[unrecordedTraceID] = [unrecordedFrameID]
+ frameTable[unrecordedFrameID] = \
+ '#00: (no stack trace recorded due to --stacks=partial)'
+
+ # For the purposes of this script, 'scan' behaves like 'live'.
+ if mode == 'scan':
+ mode = 'live'
+
+ if not mode in ['live', 'dark-matter', 'cumulative']:
+ raise Exception("bad 'mode' property: '{:s}'".format(mode))
+
+ # Remove allocation functions at the start of traces.
+ if args.ignore_alloc_fns:
+ # Build a regexp that matches every function in allocatorFns.
+ escapedAllocatorFns = map(re.escape, allocatorFns)
+ fn_re = re.compile('|'.join(escapedAllocatorFns))
+
+ # Remove allocator fns from each stack trace.
+ for traceKey, frameKeys in traceTable.items():
+ numSkippedFrames = 0
+ for frameKey in frameKeys:
+ frameDesc = frameTable[frameKey]
+ if re.search(fn_re, frameDesc):
+ numSkippedFrames += 1
+ else:
+ break
+ if numSkippedFrames > 0:
+ traceTable[traceKey] = frameKeys[numSkippedFrames:]
+
+ # Trim the number of frames.
+ for traceKey, frameKeys in traceTable.items():
+ if len(frameKeys) > args.max_frames:
+ traceTable[traceKey] = frameKeys[:args.max_frames]
+
+ def buildTraceDescription(traceTable, frameTable, traceKey):
+ frameKeys = traceTable[traceKey]
+ fmt = ' #{:02d}{:}'
+
+ if args.filter_stacks_for_testing:
+ # When running SmokeDMD.cpp, every stack trace should contain at
+ # least one frame that contains 'DMD.cpp', from either |DMD.cpp| or
+ # |SmokeDMD.cpp|. (Or 'dmd.cpp' on Windows.) If we see such a
+ # frame, we replace the entire stack trace with a single,
+ # predictable frame. There is too much variation in the stack
+ # traces across different machines and platforms to do more precise
+ # matching, but this level of matching will result in failure if
+ # stack fixing fails completely.
+ for frameKey in frameKeys:
+ frameDesc = frameTable[frameKey]
+ if 'DMD.cpp' in frameDesc or 'dmd.cpp' in frameDesc:
+ return [fmt.format(1, ': ... DMD.cpp ...')]
+
+ # The frame number is always '#00' (see DMD.h for why), so we have to
+ # replace that with the correct frame number.
+ desc = []
+ for n, frameKey in enumerate(traceTable[traceKey], start=1):
+ desc.append(fmt.format(n, frameTable[frameKey][3:]))
+ return desc
+
+ # Aggregate blocks into records. All sufficiently similar blocks go into a
+ # single record.
+
+ if mode in ['live', 'cumulative']:
+ liveOrCumulativeRecords = collections.defaultdict(Record)
+ elif mode == 'dark-matter':
+ unreportedRecords = collections.defaultdict(Record)
+ onceReportedRecords = collections.defaultdict(Record)
+ twiceReportedRecords = collections.defaultdict(Record)
+
+ heapUsableSize = 0
+ heapBlocks = 0
+
+ recordKeyPartCache = {}
+
+ for block in blockList:
+ # For each block we compute a |recordKey|, and all blocks with the same
+ # |recordKey| are aggregated into a single record. The |recordKey| is
+ # derived from the block's 'alloc' and 'reps' (if present) stack
+ # traces.
+ #
+ # We use frame descriptions (e.g. "#00: foo (X.cpp:99)") when comparing
+ # traces for equality. We can't use trace keys or frame keys because
+ # they're not comparable across different DMD runs (which is relevant
+ # when doing diffs).
+ #
+ # Using frame descriptions also fits in with the stack trimming done
+ # for --max-frames, which requires that stack traces with common
+ # beginnings but different endings to be considered equivalent. E.g. if
+ # we have distinct traces T1:[A:D1,B:D2,C:D3] and T2:[X:D1,Y:D2,Z:D4]
+ # and we trim the final frame of each they should be considered
+ # equivalent because the untrimmed frame descriptions (D1 and D2)
+ # match.
+ #
+ # Having said all that, during a single invocation of dmd.py on a
+ # single DMD file, for a single frameKey value the record key will
+ # always be the same, and we might encounter it 1000s of times. So we
+ # cache prior results for speed.
+ def makeRecordKeyPart(traceKey):
+ if traceKey in recordKeyPartCache:
+ return recordKeyPartCache[traceKey]
+
+ recordKeyPart = str(map(lambda frameKey: frameTable[frameKey],
+ traceTable[traceKey]))
+ recordKeyPartCache[traceKey] = recordKeyPart
+ return recordKeyPart
+
+ allocatedAtTraceKey = block.get('alloc', unrecordedTraceID)
+ if mode in ['live', 'cumulative']:
+ recordKey = makeRecordKeyPart(allocatedAtTraceKey)
+ records = liveOrCumulativeRecords
+ elif mode == 'dark-matter':
+ recordKey = makeRecordKeyPart(allocatedAtTraceKey)
+ if 'reps' in block:
+ reportedAtTraceKeys = block['reps']
+ for reportedAtTraceKey in reportedAtTraceKeys:
+ recordKey += makeRecordKeyPart(reportedAtTraceKey)
+ if len(reportedAtTraceKeys) == 1:
+ records = onceReportedRecords
+ else:
+ records = twiceReportedRecords
+ else:
+ records = unreportedRecords
+
+ record = records[recordKey]
+
+ if 'req' not in block:
+ raise Exception("'req' property missing in block'")
+
+ reqSize = block['req']
+ slopSize = block.get('slop', 0)
+
+ if 'num' in block:
+ num = block['num']
+ else:
+ num = 1
+
+ usableSize = reqSize + slopSize
+ heapUsableSize += num * usableSize
+ heapBlocks += num
+
+ record.numBlocks += num
+ record.reqSize += num * reqSize
+ record.slopSize += num * slopSize
+ record.usableSize += num * usableSize
+ if record.allocatedAtDesc == None:
+ record.allocatedAtDesc = \
+ buildTraceDescription(traceTable, frameTable,
+ allocatedAtTraceKey)
+
+ if mode in ['live', 'cumulative']:
+ pass
+ elif mode == 'dark-matter':
+ if 'reps' in block and record.reportedAtDescs == []:
+ f = lambda k: buildTraceDescription(traceTable, frameTable, k)
+ record.reportedAtDescs = map(f, reportedAtTraceKeys)
+ record.usableSizes[usableSize] += num
+
+ # All the processed data for a single DMD file is called a "digest".
+ digest = {}
+ digest['dmdEnvVar'] = dmdEnvVar
+ digest['mode'] = mode
+ digest['heapUsableSize'] = heapUsableSize
+ digest['heapBlocks'] = heapBlocks
+ if mode in ['live', 'cumulative']:
+ digest['liveOrCumulativeRecords'] = liveOrCumulativeRecords
+ elif mode == 'dark-matter':
+ digest['unreportedRecords'] = unreportedRecords
+ digest['onceReportedRecords'] = onceReportedRecords
+ digest['twiceReportedRecords'] = twiceReportedRecords
+ return digest
+
+
+def diffRecords(args, records1, records2):
+ records3 = {}
+
+ # Process records1.
+ for k in records1:
+ r1 = records1[k]
+ if k in records2:
+ # This record is present in both records1 and records2.
+ r2 = records2[k]
+ del records2[k]
+ r2.subtract(r1)
+ if not r2.isZero(args):
+ records3[k] = r2
+ else:
+ # This record is present only in records1.
+ r1.negate()
+ records3[k] = r1
+
+ for k in records2:
+ # This record is present only in records2.
+ records3[k] = records2[k]
+
+ return records3
+
+
+def diffDigests(args, d1, d2):
+ if (d1['mode'] != d2['mode']):
+ raise Exception("the input files have different 'mode' properties")
+
+ d3 = {}
+ d3['dmdEnvVar'] = (d1['dmdEnvVar'], d2['dmdEnvVar'])
+ d3['mode'] = d1['mode']
+ d3['heapUsableSize'] = d2['heapUsableSize'] - d1['heapUsableSize']
+ d3['heapBlocks'] = d2['heapBlocks'] - d1['heapBlocks']
+ if d1['mode'] in ['live', 'cumulative']:
+ d3['liveOrCumulativeRecords'] = \
+ diffRecords(args, d1['liveOrCumulativeRecords'],
+ d2['liveOrCumulativeRecords'])
+ elif d1['mode'] == 'dark-matter':
+ d3['unreportedRecords'] = diffRecords(args, d1['unreportedRecords'],
+ d2['unreportedRecords'])
+ d3['onceReportedRecords'] = diffRecords(args, d1['onceReportedRecords'],
+ d2['onceReportedRecords'])
+ d3['twiceReportedRecords'] = diffRecords(args, d1['twiceReportedRecords'],
+ d2['twiceReportedRecords'])
+ return d3
+
+
+def printDigest(args, digest):
+ dmdEnvVar = digest['dmdEnvVar']
+ mode = digest['mode']
+ heapUsableSize = digest['heapUsableSize']
+ heapBlocks = digest['heapBlocks']
+ if mode in ['live', 'cumulative']:
+ liveOrCumulativeRecords = digest['liveOrCumulativeRecords']
+ elif mode == 'dark-matter':
+ unreportedRecords = digest['unreportedRecords']
+ onceReportedRecords = digest['onceReportedRecords']
+ twiceReportedRecords = digest['twiceReportedRecords']
+
+ separator = '#' + '-' * 65 + '\n'
+
+ def number(n):
+ '''Format a number with comma as a separator.'''
+ return '{:,d}'.format(n)
+
+ def perc(m, n):
+ return 0 if n == 0 else (100 * m / n)
+
+ def plural(n):
+ return '' if n == 1 else 's'
+
+ # Prints to stdout, or to file if -o/--output was specified.
+ def out(*arguments, **kwargs):
+ print(*arguments, file=args.output, **kwargs)
+
+ def printStack(traceDesc):
+ for frameDesc in traceDesc:
+ out(frameDesc)
+
+ def printRecords(recordKind, records, heapUsableSize):
+ RecordKind = recordKind.capitalize()
+ out(separator)
+ numRecords = len(records)
+ cmpRecords = sortByChoices[args.sort_by]
+ sortedRecords = sorted(records.values(), cmp=cmpRecords, reverse=True)
+ kindBlocks = 0
+ kindUsableSize = 0
+ maxRecord = 1000
+
+ # First iteration: get totals, etc.
+ for record in sortedRecords:
+ kindBlocks += record.numBlocks
+ kindUsableSize += record.usableSize
+
+ # Second iteration: print.
+ if numRecords == 0:
+ out('# no {:} heap blocks\n'.format(recordKind))
+
+ kindCumulativeUsableSize = 0
+ for i, record in enumerate(sortedRecords, start=1):
+ # Stop printing at the |maxRecord|th record.
+ if i == maxRecord:
+ out('# {:}: stopping after {:,d} heap block records\n'.
+ format(RecordKind, i))
+ break
+
+ kindCumulativeUsableSize += record.usableSize
+
+ out(RecordKind + ' {')
+ out(' {:} block{:} in heap block record {:,d} of {:,d}'.
+ format(number(record.numBlocks),
+ plural(record.numBlocks), i, numRecords))
+ out(' {:} bytes ({:} requested / {:} slop)'.
+ format(number(record.usableSize),
+ number(record.reqSize),
+ number(record.slopSize)))
+
+ abscmp = lambda (usableSize1, _1), (usableSize2, _2): \
+ cmp(abs(usableSize1), abs(usableSize2))
+ usableSizes = sorted(record.usableSizes.items(), cmp=abscmp,
+ reverse=True)
+
+ hasSingleBlock = len(usableSizes) == 1 and usableSizes[0][1] == 1
+
+ if not hasSingleBlock:
+ out(' Individual block sizes: ', end='')
+ if len(usableSizes) == 0:
+ out('(no change)', end='')
+ else:
+ isFirst = True
+ for usableSize, count in usableSizes:
+ if not isFirst:
+ out('; ', end='')
+ out('{:}'.format(number(usableSize)), end='')
+ if count > 1:
+ out(' x {:,d}'.format(count), end='')
+ isFirst = False
+ out()
+
+ out(' {:4.2f}% of the heap ({:4.2f}% cumulative)'.
+ format(perc(record.usableSize, heapUsableSize),
+ perc(kindCumulativeUsableSize, heapUsableSize)))
+ if mode in ['live', 'cumulative']:
+ pass
+ elif mode == 'dark-matter':
+ out(' {:4.2f}% of {:} ({:4.2f}% cumulative)'.
+ format(perc(record.usableSize, kindUsableSize),
+ recordKind,
+ perc(kindCumulativeUsableSize, kindUsableSize)))
+ out(' Allocated at {')
+ printStack(record.allocatedAtDesc)
+ out(' }')
+ if mode in ['live', 'cumulative']:
+ pass
+ elif mode == 'dark-matter':
+ for n, reportedAtDesc in enumerate(record.reportedAtDescs):
+ again = 'again ' if n > 0 else ''
+ out(' Reported {:}at {{'.format(again))
+ printStack(reportedAtDesc)
+ out(' }')
+ out('}\n')
+
+ return (kindUsableSize, kindBlocks)
+
+
+ def printInvocation(n, dmdEnvVar, mode):
+ out('Invocation{:} {{'.format(n))
+ if dmdEnvVar == None:
+ out(' $DMD is undefined')
+ else:
+ out(' $DMD = \'' + dmdEnvVar + '\'')
+ out(' Mode = \'' + mode + '\'')
+ out('}\n')
+
+ # Print command line. Strip dirs so the output is deterministic, which is
+ # needed for testing.
+ out(separator, end='')
+ out('# ' + ' '.join(map(os.path.basename, sys.argv)) + '\n')
+
+ # Print invocation(s).
+ if type(dmdEnvVar) is not tuple:
+ printInvocation('', dmdEnvVar, mode)
+ else:
+ printInvocation(' 1', dmdEnvVar[0], mode)
+ printInvocation(' 2', dmdEnvVar[1], mode)
+
+ # Print records.
+ if mode in ['live', 'cumulative']:
+ liveOrCumulativeUsableSize, liveOrCumulativeBlocks = \
+ printRecords(mode, liveOrCumulativeRecords, heapUsableSize)
+ elif mode == 'dark-matter':
+ twiceReportedUsableSize, twiceReportedBlocks = \
+ printRecords('twice-reported', twiceReportedRecords, heapUsableSize)
+
+ unreportedUsableSize, unreportedBlocks = \
+ printRecords('unreported', unreportedRecords, heapUsableSize)
+
+ onceReportedUsableSize, onceReportedBlocks = \
+ printRecords('once-reported', onceReportedRecords, heapUsableSize)
+
+ # Print summary.
+ out(separator)
+ out('Summary {')
+ if mode in ['live', 'cumulative']:
+ out(' Total: {:} bytes in {:} blocks'.
+ format(number(liveOrCumulativeUsableSize),
+ number(liveOrCumulativeBlocks)))
+ elif mode == 'dark-matter':
+ fmt = ' {:15} {:>12} bytes ({:6.2f}%) in {:>7} blocks ({:6.2f}%)'
+ out(fmt.
+ format('Total:',
+ number(heapUsableSize),
+ 100,
+ number(heapBlocks),
+ 100))
+ out(fmt.
+ format('Unreported:',
+ number(unreportedUsableSize),
+ perc(unreportedUsableSize, heapUsableSize),
+ number(unreportedBlocks),
+ perc(unreportedBlocks, heapBlocks)))
+ out(fmt.
+ format('Once-reported:',
+ number(onceReportedUsableSize),
+ perc(onceReportedUsableSize, heapUsableSize),
+ number(onceReportedBlocks),
+ perc(onceReportedBlocks, heapBlocks)))
+ out(fmt.
+ format('Twice-reported:',
+ number(twiceReportedUsableSize),
+ perc(twiceReportedUsableSize, heapUsableSize),
+ number(twiceReportedBlocks),
+ perc(twiceReportedBlocks, heapBlocks)))
+ out('}\n')
+
+
+#############################
+# Pretty printer for DMD JSON
+#############################
+
+def prettyPrintDmdJson(out, j):
+ out.write('{\n')
+
+ out.write(' "version": {0},\n'.format(j['version']))
+ out.write(' "invocation": ')
+ json.dump(j['invocation'], out, sort_keys=True)
+ out.write(',\n')
+
+ out.write(' "blockList": [')
+ first = True
+ for b in j['blockList']:
+ out.write('' if first else ',')
+ out.write('\n ')
+ json.dump(b, out, sort_keys=True)
+ first = False
+ out.write('\n ],\n')
+
+ out.write(' "traceTable": {')
+ first = True
+ for k, l in j['traceTable'].iteritems():
+ out.write('' if first else ',')
+ out.write('\n "{0}": {1}'.format(k, json.dumps(l)))
+ first = False
+ out.write('\n },\n')
+
+ out.write(' "frameTable": {')
+ first = True
+ for k, v in j['frameTable'].iteritems():
+ out.write('' if first else ',')
+ out.write('\n "{0}": {1}'.format(k, json.dumps(v)))
+ first = False
+ out.write('\n }\n')
+
+ out.write('}\n')
+
+
+##################################################################
+# Code for clamping addresses using conservative pointer analysis.
+##################################################################
+
+# Start is the address of the first byte of the block, while end is
+# the address of the first byte after the final byte in the block.
+class AddrRange:
+ def __init__(self, block, length):
+ self.block = block
+ self.start = int(block, 16)
+ self.length = length
+ self.end = self.start + self.length
+
+ assert self.start > 0
+ assert length >= 0
+
+
+class ClampStats:
+ def __init__(self):
+ # Number of pointers already pointing to the start of a block.
+ self.startBlockPtr = 0
+
+ # Number of pointers pointing to the middle of a block. These
+ # are clamped to the start of the block they point into.
+ self.midBlockPtr = 0
+
+ # Number of null pointers.
+ self.nullPtr = 0
+
+ # Number of non-null pointers that didn't point into the middle
+ # of any blocks. These are clamped to null.
+ self.nonNullNonBlockPtr = 0
+
+
+ def clampedBlockAddr(self, sameAddress):
+ if sameAddress:
+ self.startBlockPtr += 1
+ else:
+ self.midBlockPtr += 1
+
+ def nullAddr(self):
+ self.nullPtr += 1
+
+ def clampedNonBlockAddr(self):
+ self.nonNullNonBlockPtr += 1
+
+ def log(self):
+ sys.stderr.write('Results:\n')
+ sys.stderr.write(' Number of pointers already pointing to start of blocks: ' + str(self.startBlockPtr) + '\n')
+ sys.stderr.write(' Number of pointers clamped to start of blocks: ' + str(self.midBlockPtr) + '\n')
+ sys.stderr.write(' Number of non-null pointers not pointing into blocks clamped to null: ' + str(self.nonNullNonBlockPtr) + '\n')
+ sys.stderr.write(' Number of null pointers: ' + str(self.nullPtr) + '\n')
+
+
+# Search the block ranges array for a block that address points into.
+# The search is carried out in an array of starting addresses for each blocks
+# because it is faster.
+def clampAddress(blockRanges, blockStarts, clampStats, address):
+ i = bisect_right(blockStarts, address)
+
+ # Any addresses completely out of the range should have been eliminated already.
+ assert i > 0
+ r = blockRanges[i - 1]
+ assert r.start <= address
+
+ if address >= r.end:
+ assert address < blockRanges[i].start
+ clampStats.clampedNonBlockAddr()
+ return '0'
+
+ clampStats.clampedBlockAddr(r.start == address)
+ return r.block
+
+
+def clampBlockList(args, inputFileName, isZipped, opener):
+ # XXX This isn't very efficient because we end up reading and writing
+ # the file multiple times.
+ with opener(inputFileName, 'rb') as f:
+ j = json.load(f)
+
+ if j['version'] != outputVersion:
+ raise Exception("'version' property isn't '{:d}'".format(outputVersion))
+
+ # Check that the invocation is reasonable for contents clamping.
+ invocation = j['invocation']
+ if invocation['mode'] != 'scan':
+ raise Exception("Log was taken in mode " + invocation['mode'] + " not scan")
+
+ sys.stderr.write('Creating block range list.\n')
+ blockList = j['blockList']
+ blockRanges = []
+ for block in blockList:
+ blockRanges.append(AddrRange(block['addr'], block['req']))
+ blockRanges.sort(key=lambda r: r.start)
+
+ # Make sure there are no overlapping blocks.
+ prevRange = blockRanges[0]
+ for currRange in blockRanges[1:]:
+ assert prevRange.end <= currRange.start
+ prevRange = currRange
+
+ sys.stderr.write('Clamping block contents.\n')
+ clampStats = ClampStats()
+ firstAddr = blockRanges[0].start
+ lastAddr = blockRanges[-1].end
+
+ blockStarts = []
+ for r in blockRanges:
+ blockStarts.append(r.start)
+
+ for block in blockList:
+ # Small blocks don't have any contents.
+ if not 'contents' in block:
+ continue
+
+ cont = block['contents']
+ for i in range(len(cont)):
+ address = int(cont[i], 16)
+
+ if address == 0:
+ clampStats.nullAddr()
+ continue
+
+ # If the address is before the first block or after the last
+ # block then it can't be within a block.
+ if address < firstAddr or address >= lastAddr:
+ clampStats.clampedNonBlockAddr()
+ cont[i] = '0'
+ continue
+
+ cont[i] = clampAddress(blockRanges, blockStarts, clampStats, address)
+
+ # Remove any trailing nulls.
+ while len(cont) and cont[-1] == '0':
+ cont.pop()
+
+ if args.print_clamp_stats:
+ clampStats.log()
+
+ sys.stderr.write('Saving file.\n')
+ tmpFile = tempfile.NamedTemporaryFile(delete=False)
+ tmpFilename = tmpFile.name
+ if isZipped:
+ tmpFile = gzip.GzipFile(filename='', fileobj=tmpFile)
+ prettyPrintDmdJson(tmpFile, j)
+ tmpFile.close()
+ shutil.move(tmpFilename, inputFileName)
+
+
+def main():
+ args = parseCommandLine()
+ digest = getDigestFromFile(args, args.input_file)
+ if args.input_file2:
+ digest2 = getDigestFromFile(args, args.input_file2)
+ digest = diffDigests(args, digest, digest2)
+ printDigest(args, digest)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/memory/replace/dmd/moz.build b/memory/replace/dmd/moz.build
new file mode 100644
index 000000000..9fdec727d
--- /dev/null
+++ b/memory/replace/dmd/moz.build
@@ -0,0 +1,39 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'DMD.h',
+]
+
+SOURCES += [
+ '../../../mfbt/HashFunctions.cpp',
+ '../../../mfbt/JSONWriter.cpp',
+ '../../../mfbt/Poison.cpp',
+ '../../../mozglue/misc/StackWalk.cpp',
+ 'DMD.cpp',
+]
+
+SOURCES += [
+ '../../../nsprpub/lib/libc/src/strcpy.c',
+]
+
+SharedLibrary('dmd')
+
+DEFINES['MOZ_NO_MOZALLOC'] = True
+DEFINES['IMPL_MFBT'] = True
+DEFINES['XPCOM_GLUE'] = True
+
+if CONFIG['MOZ_OPTIMIZE']:
+ DEFINES['MOZ_OPTIMIZE'] = True
+
+DISABLE_STL_WRAPPING = True
+
+if CONFIG['OS_ARCH'] == 'WINNT':
+ OS_LIBS += [
+ 'dbghelp',
+ ]
+
+TEST_DIRS += ['test']
diff --git a/memory/replace/dmd/test/SmokeDMD.cpp b/memory/replace/dmd/test/SmokeDMD.cpp
new file mode 100644
index 000000000..acf76267f
--- /dev/null
+++ b/memory/replace/dmd/test/SmokeDMD.cpp
@@ -0,0 +1,379 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This program is used by the DMD xpcshell test. It is run under DMD and
+// produces some output. The xpcshell test then post-processes and checks this
+// output.
+//
+// Note that this file does not have "Test" or "test" in its name, because that
+// will cause the build system to not record breakpad symbols for it, which
+// will stop the post-processing (which includes stack fixing) from working
+// correctly.
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/JSONWriter.h"
+#include "mozilla/UniquePtr.h"
+#include "DMD.h"
+
+using mozilla::JSONWriter;
+using mozilla::MakeUnique;
+using namespace mozilla::dmd;
+
+DMDFuncs::Singleton DMDFuncs::sSingleton;
+
+class FpWriteFunc : public mozilla::JSONWriteFunc
+{
+public:
+ explicit FpWriteFunc(const char* aFilename)
+ {
+ mFp = fopen(aFilename, "w");
+ if (!mFp) {
+ fprintf(stderr, "SmokeDMD: can't create %s file: %s\n",
+ aFilename, strerror(errno));
+ exit(1);
+ }
+ }
+
+ ~FpWriteFunc() { fclose(mFp); }
+
+ void Write(const char* aStr) { fputs(aStr, mFp); }
+
+private:
+ FILE* mFp;
+};
+
+// This stops otherwise-unused variables from being optimized away.
+static void
+UseItOrLoseIt(void* aPtr, int aSeven)
+{
+ char buf[64];
+ int n = sprintf(buf, "%p\n", aPtr);
+ if (n == 20 + aSeven) {
+ fprintf(stderr, "well, that is surprising");
+ }
+}
+
+// This function checks that heap blocks that have the same stack trace but
+// different (or no) reporters get aggregated separately.
+void Foo(int aSeven)
+{
+ char* a[6];
+ for (int i = 0; i < aSeven - 1; i++) {
+ a[i] = (char*) malloc(128 - 16*i);
+ }
+
+ // Oddly, some versions of clang will cause identical stack traces to be
+ // generated for adjacent calls to Report(), which breaks the test. Inserting
+ // the UseItOrLoseIt() calls in between is enough to prevent this.
+
+ Report(a[2]); // reported
+
+ UseItOrLoseIt(a[2], aSeven);
+
+ for (int i = 0; i < aSeven - 5; i++) {
+ Report(a[i]); // reported
+ }
+
+ UseItOrLoseIt(a[2], aSeven);
+
+ Report(a[3]); // reported
+
+ // a[4], a[5] unreported
+}
+
+void
+TestEmpty(const char* aTestName, const char* aMode)
+{
+ char filename[128];
+ sprintf(filename, "complete-%s-%s.json", aTestName, aMode);
+ auto f = MakeUnique<FpWriteFunc>(filename);
+
+ char options[128];
+ sprintf(options, "--mode=%s --stacks=full", aMode);
+ ResetEverything(options);
+
+ // Zero for everything.
+ Analyze(Move(f));
+}
+
+void
+TestFull(const char* aTestName, int aNum, const char* aMode, int aSeven)
+{
+ char filename[128];
+ sprintf(filename, "complete-%s%d-%s.json", aTestName, aNum, aMode);
+ auto f = MakeUnique<FpWriteFunc>(filename);
+
+ // The --show-dump-stats=yes is there just to give that option some basic
+ // testing, e.g. ensure it doesn't crash. It's hard to test much beyond that.
+ char options[128];
+ sprintf(options, "--mode=%s --stacks=full --show-dump-stats=yes", aMode);
+ ResetEverything(options);
+
+ // Analyze 1: 1 freed, 9 out of 10 unreported.
+ // Analyze 2: still present and unreported.
+ int i;
+ char* a = nullptr;
+ for (i = 0; i < aSeven + 3; i++) {
+ a = (char*) malloc(100);
+ UseItOrLoseIt(a, aSeven);
+ }
+ free(a);
+
+ // A no-op.
+ free(nullptr);
+
+ // Note: 16 bytes is the smallest requested size that gives consistent
+ // behaviour across all platforms with jemalloc.
+ // Analyze 1: reported.
+ // Analyze 2: thrice-reported.
+ char* a2 = (char*) malloc(16);
+ Report(a2);
+
+ // Analyze 1: reported.
+ // Analyze 2: reportedness carries over, due to ReportOnAlloc.
+ char* b = (char*) malloc(10);
+ ReportOnAlloc(b);
+
+ // ReportOnAlloc, then freed.
+ // Analyze 1: freed, irrelevant.
+ // Analyze 2: freed, irrelevant.
+ char* b2 = (char*) malloc(16);
+ ReportOnAlloc(b2);
+ free(b2);
+
+ // Analyze 1: reported 4 times.
+ // Analyze 2: freed, irrelevant.
+ char* c = (char*) calloc(10, 3);
+ Report(c);
+ for (int i = 0; i < aSeven - 4; i++) {
+ Report(c);
+ }
+
+ // Analyze 1: ignored.
+ // Analyze 2: irrelevant.
+ Report((void*)(intptr_t)i);
+
+ // jemalloc rounds this up to 8192.
+ // Analyze 1: reported.
+ // Analyze 2: freed.
+ char* e = (char*) malloc(4096);
+ e = (char*) realloc(e, 7169);
+ Report(e);
+
+ // First realloc is like malloc; second realloc is shrinking.
+ // Analyze 1: reported.
+ // Analyze 2: re-reported.
+ char* e2 = (char*) realloc(nullptr, 1024);
+ e2 = (char*) realloc(e2, 512);
+ Report(e2);
+
+ // First realloc is like malloc; second realloc creates a min-sized block.
+ // XXX: on Windows, second realloc frees the block.
+ // Analyze 1: reported.
+ // Analyze 2: freed, irrelevant.
+ char* e3 = (char*) realloc(nullptr, 1023);
+//e3 = (char*) realloc(e3, 0);
+ MOZ_ASSERT(e3);
+ Report(e3);
+
+ // Analyze 1: freed, irrelevant.
+ // Analyze 2: freed, irrelevant.
+ char* f1 = (char*) malloc(64);
+ free(f1);
+
+ // Analyze 1: ignored.
+ // Analyze 2: irrelevant.
+ Report((void*)(intptr_t)0x0);
+
+ // Analyze 1: mixture of reported and unreported.
+ // Analyze 2: all unreported.
+ Foo(aSeven);
+
+ // Analyze 1: twice-reported.
+ // Analyze 2: twice-reported.
+ char* g1 = (char*) malloc(77);
+ ReportOnAlloc(g1);
+ ReportOnAlloc(g1);
+
+ // Analyze 1: mixture of reported and unreported.
+ // Analyze 2: all unreported.
+ // Nb: this Foo() call is deliberately not adjacent to the previous one. See
+ // the comment about adjacent calls in Foo() for more details.
+ Foo(aSeven);
+
+ // Analyze 1: twice-reported.
+ // Analyze 2: once-reported.
+ char* g2 = (char*) malloc(78);
+ Report(g2);
+ ReportOnAlloc(g2);
+
+ // Analyze 1: twice-reported.
+ // Analyze 2: once-reported.
+ char* g3 = (char*) malloc(79);
+ ReportOnAlloc(g3);
+ Report(g3);
+
+ // All the odd-ball ones.
+ // Analyze 1: all unreported.
+ // Analyze 2: all freed, irrelevant.
+ // XXX: no memalign on Mac
+//void* w = memalign(64, 65); // rounds up to 128
+//UseItOrLoseIt(w, aSeven);
+
+ // XXX: posix_memalign doesn't work on B2G
+//void* x;
+//posix_memalign(&y, 128, 129); // rounds up to 256
+//UseItOrLoseIt(x, aSeven);
+
+ // XXX: valloc doesn't work on Windows.
+//void* y = valloc(1); // rounds up to 4096
+//UseItOrLoseIt(y, aSeven);
+
+ // XXX: C11 only
+//void* z = aligned_alloc(64, 256);
+//UseItOrLoseIt(z, aSeven);
+
+ if (aNum == 1) {
+ // Analyze 1.
+ Analyze(Move(f));
+ }
+
+ ClearReports();
+
+ //---------
+
+ Report(a2);
+ Report(a2);
+ free(c);
+ free(e);
+ Report(e2);
+ free(e3);
+//free(w);
+//free(x);
+//free(y);
+//free(z);
+
+ // Do some allocations that will only show up in cumulative mode.
+ for (int i = 0; i < 100; i++) {
+ free(malloc(128));
+ }
+
+ if (aNum == 2) {
+ // Analyze 2.
+ Analyze(Move(f));
+ }
+}
+
+void
+TestPartial(const char* aTestName, const char* aMode, int aSeven)
+{
+ char filename[128];
+ sprintf(filename, "complete-%s-%s.json", aTestName, aMode);
+ auto f = MakeUnique<FpWriteFunc>(filename);
+
+ char options[128];
+ sprintf(options, "--mode=%s", aMode);
+ ResetEverything(options);
+
+ int kTenThousand = aSeven + 9993;
+ char* s;
+
+ // The output of this function is deterministic but it relies on the
+ // probability and seeds given to the FastBernoulliTrial instance in
+ // ResetBernoulli(). If they change, the output will change too.
+
+ // Expected fraction with stacks: (1 - (1 - 0.003) ** 16) = 0.0469.
+ // So we expect about 0.0469 * 10000 == 469.
+ // We actually get 511.
+ for (int i = 0; i < kTenThousand; i++) {
+ s = (char*) malloc(16);
+ UseItOrLoseIt(s, aSeven);
+ }
+
+ // Expected fraction with stacks: (1 - (1 - 0.003) ** 128) = 0.3193.
+ // So we expect about 0.3193 * 10000 == 3193.
+ // We actually get 3136.
+ for (int i = 0; i < kTenThousand; i++) {
+ s = (char*) malloc(128);
+ UseItOrLoseIt(s, aSeven);
+ }
+
+ // Expected fraction with stacks: (1 - (1 - 0.003) ** 1024) = 0.9539.
+ // So we expect about 0.9539 * 10000 == 9539.
+ // We actually get 9531.
+ for (int i = 0; i < kTenThousand; i++) {
+ s = (char*) malloc(1024);
+ UseItOrLoseIt(s, aSeven);
+ }
+
+ Analyze(Move(f));
+}
+
+void
+TestScan(int aSeven)
+{
+ auto f = MakeUnique<FpWriteFunc>("basic-scan.json");
+
+ ResetEverything("--mode=scan");
+
+ uintptr_t* p = (uintptr_t*) malloc(6 * sizeof(uintptr_t*));
+ UseItOrLoseIt(p, aSeven);
+
+ // Hard-coded values checked by scan-test.py
+ p[0] = 0x123; // outside a block, small value
+ p[1] = 0x0; // null
+ p[2] = (uintptr_t)((uint8_t*)p - 1); // pointer outside a block, but nearby
+ p[3] = (uintptr_t)p; // pointer to start of a block
+ p[4] = (uintptr_t)((uint8_t*)p + 1); // pointer into a block
+ p[5] = 0x0; // trailing null
+
+ Analyze(Move(f));
+}
+
+void
+RunTests()
+{
+ // This test relies on the compiler not doing various optimizations, such as
+ // eliding unused malloc() calls or unrolling loops with fixed iteration
+ // counts. So we compile it with -O0 (or equivalent), which probably prevents
+ // that. We also use the following variable for various loop iteration
+ // counts, just in case compilers might unroll very small loops even with
+ // -O0.
+ int seven = 7;
+
+ // Make sure that DMD is actually running; it is initialized on the first
+ // allocation.
+ int *x = (int*)malloc(100);
+ UseItOrLoseIt(x, seven);
+ MOZ_RELEASE_ASSERT(IsRunning());
+
+ // Please keep this in sync with run_test in test_dmd.js.
+
+ TestEmpty("empty", "live");
+ TestEmpty("empty", "dark-matter");
+ TestEmpty("empty", "cumulative");
+
+ TestFull("full", 1, "live", seven);
+ TestFull("full", 1, "dark-matter", seven);
+
+ TestFull("full", 2, "dark-matter", seven);
+ TestFull("full", 2, "cumulative", seven);
+
+ TestPartial("partial", "live", seven);
+
+ TestScan(seven);
+}
+
+int main()
+{
+ RunTests();
+
+ return 0;
+}
diff --git a/memory/replace/dmd/test/basic-scan-32-expected.txt b/memory/replace/dmd/test/basic-scan-32-expected.txt
new file mode 100644
index 000000000..9f6f4db32
--- /dev/null
+++ b/memory/replace/dmd/test/basic-scan-32-expected.txt
@@ -0,0 +1,25 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o basic-scan-32-actual.txt --clamp-contents basic-scan.json
+
+Invocation {
+ $DMD = '--mode=scan'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 1 block in heap block record 1 of 1
+ 32 bytes (24 requested / 8 slop)
+ 100.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 32 bytes in 1 blocks
+}
+
diff --git a/memory/replace/dmd/test/basic-scan-64-expected.txt b/memory/replace/dmd/test/basic-scan-64-expected.txt
new file mode 100644
index 000000000..59effc07b
--- /dev/null
+++ b/memory/replace/dmd/test/basic-scan-64-expected.txt
@@ -0,0 +1,25 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o basic-scan-64-actual.txt --clamp-contents basic-scan.json
+
+Invocation {
+ $DMD = '--mode=scan'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 1 block in heap block record 1 of 1
+ 48 bytes (48 requested / 0 slop)
+ 100.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 48 bytes in 1 blocks
+}
+
diff --git a/memory/replace/dmd/test/complete-empty-cumulative-expected.txt b/memory/replace/dmd/test/complete-empty-cumulative-expected.txt
new file mode 100644
index 000000000..2486015d0
--- /dev/null
+++ b/memory/replace/dmd/test/complete-empty-cumulative-expected.txt
@@ -0,0 +1,18 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-empty-cumulative-actual.txt complete-empty-cumulative.json
+
+Invocation {
+ $DMD = '--mode=cumulative --stacks=full'
+ Mode = 'cumulative'
+}
+
+#-----------------------------------------------------------------
+
+# no cumulative heap blocks
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 0 bytes in 0 blocks
+}
+
diff --git a/memory/replace/dmd/test/complete-empty-dark-matter-expected.txt b/memory/replace/dmd/test/complete-empty-dark-matter-expected.txt
new file mode 100644
index 000000000..0020cddde
--- /dev/null
+++ b/memory/replace/dmd/test/complete-empty-dark-matter-expected.txt
@@ -0,0 +1,29 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-empty-dark-matter-actual.txt complete-empty-dark-matter.json
+
+Invocation {
+ $DMD = '--mode=dark-matter --stacks=full'
+ Mode = 'dark-matter'
+}
+
+#-----------------------------------------------------------------
+
+# no twice-reported heap blocks
+
+#-----------------------------------------------------------------
+
+# no unreported heap blocks
+
+#-----------------------------------------------------------------
+
+# no once-reported heap blocks
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 0 bytes (100.00%) in 0 blocks (100.00%)
+ Unreported: 0 bytes ( 0.00%) in 0 blocks ( 0.00%)
+ Once-reported: 0 bytes ( 0.00%) in 0 blocks ( 0.00%)
+ Twice-reported: 0 bytes ( 0.00%) in 0 blocks ( 0.00%)
+}
+
diff --git a/memory/replace/dmd/test/complete-empty-live-expected.txt b/memory/replace/dmd/test/complete-empty-live-expected.txt
new file mode 100644
index 000000000..d0d172196
--- /dev/null
+++ b/memory/replace/dmd/test/complete-empty-live-expected.txt
@@ -0,0 +1,18 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-empty-live-actual.txt complete-empty-live.json
+
+Invocation {
+ $DMD = '--mode=live --stacks=full'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+# no live heap blocks
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 0 bytes in 0 blocks
+}
+
diff --git a/memory/replace/dmd/test/complete-full1-dark-matter-expected.txt b/memory/replace/dmd/test/complete-full1-dark-matter-expected.txt
new file mode 100644
index 000000000..2c7d6b634
--- /dev/null
+++ b/memory/replace/dmd/test/complete-full1-dark-matter-expected.txt
@@ -0,0 +1,265 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-full1-dark-matter-actual.txt complete-full1-dark-matter.json
+
+Invocation {
+ $DMD = '--mode=dark-matter --stacks=full --show-dump-stats=yes'
+ Mode = 'dark-matter'
+}
+
+#-----------------------------------------------------------------
+
+Twice-reported {
+ 1 block in heap block record 1 of 4
+ 80 bytes (79 requested / 1 slop)
+ 0.66% of the heap (0.66% cumulative)
+ 29.41% of twice-reported (29.41% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Twice-reported {
+ 1 block in heap block record 2 of 4
+ 80 bytes (78 requested / 2 slop)
+ 0.66% of the heap (1.32% cumulative)
+ 29.41% of twice-reported (58.82% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Twice-reported {
+ 1 block in heap block record 3 of 4
+ 80 bytes (77 requested / 3 slop)
+ 0.66% of the heap (1.98% cumulative)
+ 29.41% of twice-reported (88.24% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Twice-reported {
+ 1 block in heap block record 4 of 4
+ 32 bytes (30 requested / 2 slop)
+ 0.26% of the heap (2.25% cumulative)
+ 11.76% of twice-reported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Unreported {
+ 9 blocks in heap block record 1 of 3
+ 1,008 bytes (900 requested / 108 slop)
+ Individual block sizes: 112 x 9
+ 8.33% of the heap (8.33% cumulative)
+ 81.82% of unreported (81.82% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Unreported {
+ 2 blocks in heap block record 2 of 3
+ 112 bytes (112 requested / 0 slop)
+ Individual block sizes: 64; 48
+ 0.93% of the heap (9.26% cumulative)
+ 9.09% of unreported (90.91% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Unreported {
+ 2 blocks in heap block record 3 of 3
+ 112 bytes (112 requested / 0 slop)
+ Individual block sizes: 64; 48
+ 0.93% of the heap (10.19% cumulative)
+ 9.09% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Once-reported {
+ 1 block in heap block record 1 of 11
+ 8,192 bytes (7,169 requested / 1,023 slop)
+ 67.72% of the heap (67.72% cumulative)
+ 77.34% of once-reported (77.34% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 2 of 11
+ 1,024 bytes (1,023 requested / 1 slop)
+ 8.47% of the heap (76.19% cumulative)
+ 9.67% of once-reported (87.01% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 3 of 11
+ 512 bytes (512 requested / 0 slop)
+ 4.23% of the heap (80.42% cumulative)
+ 4.83% of once-reported (91.84% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 2 blocks in heap block record 4 of 11
+ 240 bytes (240 requested / 0 slop)
+ Individual block sizes: 128; 112
+ 1.98% of the heap (82.41% cumulative)
+ 2.27% of once-reported (94.11% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 2 blocks in heap block record 5 of 11
+ 240 bytes (240 requested / 0 slop)
+ Individual block sizes: 128; 112
+ 1.98% of the heap (84.39% cumulative)
+ 2.27% of once-reported (96.37% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 6 of 11
+ 96 bytes (96 requested / 0 slop)
+ 0.79% of the heap (85.19% cumulative)
+ 0.91% of once-reported (97.28% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 7 of 11
+ 96 bytes (96 requested / 0 slop)
+ 0.79% of the heap (85.98% cumulative)
+ 0.91% of once-reported (98.19% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 8 of 11
+ 80 bytes (80 requested / 0 slop)
+ 0.66% of the heap (86.64% cumulative)
+ 0.76% of once-reported (98.94% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 9 of 11
+ 80 bytes (80 requested / 0 slop)
+ 0.66% of the heap (87.30% cumulative)
+ 0.76% of once-reported (99.70% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 10 of 11
+ 16 bytes (16 requested / 0 slop)
+ 0.13% of the heap (87.43% cumulative)
+ 0.15% of once-reported (99.85% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 11 of 11
+ 16 bytes (10 requested / 6 slop)
+ 0.13% of the heap (87.57% cumulative)
+ 0.15% of once-reported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 12,096 bytes (100.00%) in 30 blocks (100.00%)
+ Unreported: 1,232 bytes ( 10.19%) in 13 blocks ( 43.33%)
+ Once-reported: 10,592 bytes ( 87.57%) in 13 blocks ( 43.33%)
+ Twice-reported: 272 bytes ( 2.25%) in 4 blocks ( 13.33%)
+}
+
diff --git a/memory/replace/dmd/test/complete-full1-live-expected.txt b/memory/replace/dmd/test/complete-full1-live-expected.txt
new file mode 100644
index 000000000..eaa1883e1
--- /dev/null
+++ b/memory/replace/dmd/test/complete-full1-live-expected.txt
@@ -0,0 +1,127 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-full1-live-actual.txt complete-full1-live.json
+
+Invocation {
+ $DMD = '--mode=live --stacks=full --show-dump-stats=yes'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 1 block in heap block record 1 of 12
+ 8,192 bytes (7,169 requested / 1,023 slop)
+ 67.72% of the heap (67.72% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 2 of 12
+ 1,024 bytes (1,023 requested / 1 slop)
+ 8.47% of the heap (76.19% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 9 blocks in heap block record 3 of 12
+ 1,008 bytes (900 requested / 108 slop)
+ Individual block sizes: 112 x 9
+ 8.33% of the heap (84.52% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 6 blocks in heap block record 4 of 12
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 4.37% of the heap (88.89% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 6 blocks in heap block record 5 of 12
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 4.37% of the heap (93.25% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 6 of 12
+ 512 bytes (512 requested / 0 slop)
+ 4.23% of the heap (97.49% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 7 of 12
+ 80 bytes (79 requested / 1 slop)
+ 0.66% of the heap (98.15% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 8 of 12
+ 80 bytes (78 requested / 2 slop)
+ 0.66% of the heap (98.81% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 9 of 12
+ 80 bytes (77 requested / 3 slop)
+ 0.66% of the heap (99.47% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 10 of 12
+ 32 bytes (30 requested / 2 slop)
+ 0.26% of the heap (99.74% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 11 of 12
+ 16 bytes (16 requested / 0 slop)
+ 0.13% of the heap (99.87% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 1 block in heap block record 12 of 12
+ 16 bytes (10 requested / 6 slop)
+ 0.13% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 12,096 bytes in 30 blocks
+}
+
diff --git a/memory/replace/dmd/test/complete-full2-cumulative-expected.txt b/memory/replace/dmd/test/complete-full2-cumulative-expected.txt
new file mode 100644
index 000000000..5a225b9b8
--- /dev/null
+++ b/memory/replace/dmd/test/complete-full2-cumulative-expected.txt
@@ -0,0 +1,173 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-full2-cumulative-actual.txt complete-full2-cumulative.json
+
+Invocation {
+ $DMD = '--mode=cumulative --stacks=full --show-dump-stats=yes'
+ Mode = 'cumulative'
+}
+
+#-----------------------------------------------------------------
+
+Cumulative {
+ 100 blocks in heap block record 1 of 17
+ 12,800 bytes (12,800 requested / 0 slop)
+ Individual block sizes: 128 x 100
+ 42.37% of the heap (42.37% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 2 of 17
+ 8,192 bytes (7,169 requested / 1,023 slop)
+ 27.12% of the heap (69.49% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 3 of 17
+ 4,096 bytes (4,096 requested / 0 slop)
+ 13.56% of the heap (83.05% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 10 blocks in heap block record 4 of 17
+ 1,120 bytes (1,000 requested / 120 slop)
+ Individual block sizes: 112 x 10
+ 3.71% of the heap (86.76% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 5 of 17
+ 1,024 bytes (1,024 requested / 0 slop)
+ 3.39% of the heap (90.15% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 6 of 17
+ 1,024 bytes (1,023 requested / 1 slop)
+ 3.39% of the heap (93.54% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 6 blocks in heap block record 7 of 17
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 1.75% of the heap (95.29% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 6 blocks in heap block record 8 of 17
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 1.75% of the heap (97.03% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 9 of 17
+ 512 bytes (512 requested / 0 slop)
+ 1.69% of the heap (98.73% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 10 of 17
+ 80 bytes (79 requested / 1 slop)
+ 0.26% of the heap (98.99% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 11 of 17
+ 80 bytes (78 requested / 2 slop)
+ 0.26% of the heap (99.26% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 12 of 17
+ 80 bytes (77 requested / 3 slop)
+ 0.26% of the heap (99.52% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 13 of 17
+ 64 bytes (64 requested / 0 slop)
+ 0.21% of the heap (99.74% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 14 of 17
+ 32 bytes (30 requested / 2 slop)
+ 0.11% of the heap (99.84% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 15 of 17
+ 16 bytes (16 requested / 0 slop)
+ 0.05% of the heap (99.89% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 16 of 17
+ 16 bytes (16 requested / 0 slop)
+ 0.05% of the heap (99.95% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Cumulative {
+ 1 block in heap block record 17 of 17
+ 16 bytes (10 requested / 6 slop)
+ 0.05% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 30,208 bytes in 135 blocks
+}
+
diff --git a/memory/replace/dmd/test/complete-full2-dark-matter-expected.txt b/memory/replace/dmd/test/complete-full2-dark-matter-expected.txt
new file mode 100644
index 000000000..5f9585a8c
--- /dev/null
+++ b/memory/replace/dmd/test/complete-full2-dark-matter-expected.txt
@@ -0,0 +1,140 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-full2-dark-matter-actual.txt complete-full2-dark-matter.json
+
+Invocation {
+ $DMD = '--mode=dark-matter --stacks=full --show-dump-stats=yes'
+ Mode = 'dark-matter'
+}
+
+#-----------------------------------------------------------------
+
+Twice-reported {
+ 1 block in heap block record 1 of 2
+ 80 bytes (77 requested / 3 slop)
+ 2.81% of the heap (2.81% cumulative)
+ 83.33% of twice-reported (83.33% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Twice-reported {
+ 1 block in heap block record 2 of 2
+ 16 bytes (16 requested / 0 slop)
+ 0.56% of the heap (3.37% cumulative)
+ 16.67% of twice-reported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+ Reported again at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Unreported {
+ 9 blocks in heap block record 1 of 3
+ 1,008 bytes (900 requested / 108 slop)
+ Individual block sizes: 112 x 9
+ 35.39% of the heap (35.39% cumulative)
+ 48.84% of unreported (48.84% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Unreported {
+ 6 blocks in heap block record 2 of 3
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 18.54% of the heap (53.93% cumulative)
+ 25.58% of unreported (74.42% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Unreported {
+ 6 blocks in heap block record 3 of 3
+ 528 bytes (528 requested / 0 slop)
+ Individual block sizes: 128; 112; 96; 80; 64; 48
+ 18.54% of the heap (72.47% cumulative)
+ 25.58% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Once-reported {
+ 1 block in heap block record 1 of 4
+ 512 bytes (512 requested / 0 slop)
+ 17.98% of the heap (17.98% cumulative)
+ 74.42% of once-reported (74.42% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 2 of 4
+ 80 bytes (79 requested / 1 slop)
+ 2.81% of the heap (20.79% cumulative)
+ 11.63% of once-reported (86.05% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 3 of 4
+ 80 bytes (78 requested / 2 slop)
+ 2.81% of the heap (23.60% cumulative)
+ 11.63% of once-reported (97.67% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Once-reported {
+ 1 block in heap block record 4 of 4
+ 16 bytes (10 requested / 6 slop)
+ 0.56% of the heap (24.16% cumulative)
+ 2.33% of once-reported (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+ Reported at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 2,848 bytes (100.00%) in 27 blocks (100.00%)
+ Unreported: 2,064 bytes ( 72.47%) in 21 blocks ( 77.78%)
+ Once-reported: 688 bytes ( 24.16%) in 4 blocks ( 14.81%)
+ Twice-reported: 96 bytes ( 3.37%) in 2 blocks ( 7.41%)
+}
+
diff --git a/memory/replace/dmd/test/complete-partial-live-expected.txt b/memory/replace/dmd/test/complete-partial-live-expected.txt
new file mode 100644
index 000000000..e7f27b0ee
--- /dev/null
+++ b/memory/replace/dmd/test/complete-partial-live-expected.txt
@@ -0,0 +1,56 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o complete-partial-live-actual.txt complete-partial-live.json
+
+Invocation {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 9,531 blocks in heap block record 1 of 4
+ 9,759,744 bytes (9,759,744 requested / 0 slop)
+ Individual block sizes: 1,024 x 9,531
+ 83.56% of the heap (83.56% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 16,822 blocks in heap block record 2 of 4
+ 1,510,672 bytes (1,510,672 requested / 0 slop)
+ Individual block sizes: 1,024 x 469; 128 x 6,864; 16 x 9,489
+ 12.93% of the heap (96.49% cumulative)
+ Allocated at {
+ #01: (no stack trace recorded due to --stacks=partial)
+ }
+}
+
+Live {
+ 3,136 blocks in heap block record 3 of 4
+ 401,408 bytes (401,408 requested / 0 slop)
+ Individual block sizes: 128 x 3,136
+ 3.44% of the heap (99.93% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+Live {
+ 511 blocks in heap block record 4 of 4
+ 8,176 bytes (8,176 requested / 0 slop)
+ Individual block sizes: 16 x 511
+ 0.07% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: ... DMD.cpp ...
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 11,680,000 bytes in 30,000 blocks
+}
+
diff --git a/memory/replace/dmd/test/moz.build b/memory/replace/dmd/test/moz.build
new file mode 100644
index 000000000..11cab5c4c
--- /dev/null
+++ b/memory/replace/dmd/test/moz.build
@@ -0,0 +1,26 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+GeckoSimplePrograms([
+ 'SmokeDMD',
+], linkage=None)
+
+# See the comment at the top of SmokeDMD.cpp:RunTests().
+if CONFIG['OS_ARCH'] == 'WINNT':
+ CXXFLAGS += ['-Og-']
+else:
+ CXXFLAGS += ['-O0']
+
+DEFINES['MOZ_NO_MOZALLOC'] = True
+
+DISABLE_STL_WRAPPING = True
+
+XPCSHELL_TESTS_MANIFESTS += [
+ 'xpcshell.ini',
+]
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
diff --git a/memory/replace/dmd/test/scan-test.py b/memory/replace/dmd/test/scan-test.py
new file mode 100644
index 000000000..f031ae88f
--- /dev/null
+++ b/memory/replace/dmd/test/scan-test.py
@@ -0,0 +1,83 @@
+#! /usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''Testing for the JSON file emitted by DMD heap scan mode when running SmokeDMD.'''
+
+from __future__ import print_function, division
+
+import argparse
+import gzip
+import json
+import sys
+
+# The DMD output version this script handles.
+outputVersion = 5
+
+
+def parseCommandLine():
+ description = '''
+Ensure that DMD heap scan mode creates the correct output when run with SmokeDMD.
+This is only for testing. Input files can be gzipped.
+'''
+ p = argparse.ArgumentParser(description=description)
+
+ p.add_argument('--clamp-contents', action='store_true',
+ help='expect that the contents of the JSON input file have had their addresses clamped')
+
+ p.add_argument('input_file',
+ help='a file produced by DMD')
+
+ return p.parse_args(sys.argv[1:])
+
+
+def checkScanContents(contents, expected):
+ if len(contents) != len(expected):
+ raise Exception("Expected " + str(len(expected)) + " things in contents but found " + str(len(contents)))
+
+ for i in range(len(expected)):
+ if contents[i] != expected[i]:
+ raise Exception("Expected to find " + expected[i] + " at offset " + str(i) + " but found " + contents[i])
+
+
+def main():
+ args = parseCommandLine()
+
+ # Handle gzipped input if necessary.
+ isZipped = args.input_file.endswith('.gz')
+ opener = gzip.open if isZipped else open
+
+ with opener(args.input_file, 'rb') as f:
+ j = json.load(f)
+
+ if j['version'] != outputVersion:
+ raise Exception("'version' property isn't '{:d}'".format(outputVersion))
+
+ invocation = j['invocation']
+
+ mode = invocation['mode']
+ if mode != 'scan':
+ raise Exception("bad 'mode' property: '{:s}'".format(mode))
+
+ blockList = j['blockList']
+
+ if len(blockList) != 1:
+ raise Exception("Expected only one block")
+
+ b = blockList[0]
+
+ # The expected values are based on hard-coded values in SmokeDMD.cpp.
+ if args.clamp_contents:
+ expected = ['0', '0', '0', b['addr'], b['addr']]
+ else:
+ addr = int(b['addr'], 16)
+ expected = ['123', '0', str(format(addr - 1, 'x')), b['addr'],
+ str(format(addr + 1, 'x')), '0']
+
+ checkScanContents(b['contents'], expected)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/memory/replace/dmd/test/script-diff-dark-matter-expected.txt b/memory/replace/dmd/test/script-diff-dark-matter-expected.txt
new file mode 100644
index 000000000..382f4eee5
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-dark-matter-expected.txt
@@ -0,0 +1,127 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-diff-dark-matter-actual.txt script-diff-dark-matter1.json script-diff-dark-matter2.json
+
+Invocation 1 {
+ $DMD = '--mode=dark-matter'
+ Mode = 'dark-matter'
+}
+
+Invocation 2 {
+ $DMD is undefined
+ Mode = 'dark-matter'
+}
+
+#-----------------------------------------------------------------
+
+Twice-reported {
+ -1 blocks in heap block record 1 of 1
+ -1,088 bytes (-1,064 requested / -24 slop)
+ Individual block sizes: -1,024; -127; 63
+ 15.46% of the heap (15.46% cumulative)
+ 100.00% of twice-reported (100.00% cumulative)
+ Allocated at {
+ #01: F (F.cpp:99)
+ }
+ Reported at {
+ #01: R1 (R1.cpp:99)
+ }
+ Reported again at {
+ #01: R2 (R2.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Unreported {
+ 4 blocks in heap block record 1 of 5
+ 16,384 bytes (16,384 requested / 0 slop)
+ Individual block sizes: 4,096 x 4
+ -232.76% of the heap (-232.76% cumulative)
+ 371.01% of unreported (371.01% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ }
+}
+
+Unreported {
+ 7 blocks in heap block record 2 of 5
+ -11,968 bytes (-12,016 requested / 48 slop)
+ Individual block sizes: -15,360; 2,048; 512 x 2; 128; -127; 64 x 4; 63
+ 170.02% of the heap (-62.74% cumulative)
+ -271.01% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: F (F.cpp:99)
+ }
+}
+
+Unreported {
+ 0 blocks in heap block record 3 of 5
+ 0 bytes (-384 requested / 384 slop)
+ Individual block sizes: (no change)
+ -0.00% of the heap (-62.74% cumulative)
+ 0.00% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+Unreported {
+ -2 blocks in heap block record 4 of 5
+ 0 bytes (0 requested / 0 slop)
+ Individual block sizes: 8,192 x 2; -4,096 x 4
+ -0.00% of the heap (-62.74% cumulative)
+ 0.00% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+Unreported {
+ 0 blocks in heap block record 5 of 5
+ 0 bytes (0 requested / 0 slop)
+ Individual block sizes: 20,480; -16,384; -8,192; 4,096
+ -0.00% of the heap (-62.74% cumulative)
+ 0.00% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: (no stack trace recorded due to --stacks=partial)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Once-reported {
+ -3 blocks in heap block record 1 of 2
+ -10,240 bytes (-10,192 requested / -48 slop)
+ Individual block sizes: -4,096 x 2; -2,048
+ 145.48% of the heap (145.48% cumulative)
+ 98.77% of once-reported (98.77% cumulative)
+ Allocated at {
+ #01: D (D.cpp:99)
+ }
+ Reported at {
+ #01: R1 (R1.cpp:99)
+ }
+}
+
+Once-reported {
+ -1 blocks in heap block record 2 of 2
+ -127 bytes (-151 requested / 24 slop)
+ 1.80% of the heap (147.28% cumulative)
+ 1.23% of once-reported (100.00% cumulative)
+ Allocated at {
+ #01: F (F.cpp:99)
+ }
+ Reported at {
+ #01: R1 (R1.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: -7,039 bytes (100.00%) in 4 blocks (100.00%)
+ Unreported: 4,416 bytes (-62.74%) in 9 blocks (225.00%)
+ Once-reported: -10,367 bytes (147.28%) in -4 blocks (-100.00%)
+ Twice-reported: -1,088 bytes ( 15.46%) in -1 blocks (-25.00%)
+}
+
diff --git a/memory/replace/dmd/test/script-diff-dark-matter1.json b/memory/replace/dmd/test/script-diff-dark-matter1.json
new file mode 100644
index 000000000..c8edafebe
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-dark-matter1.json
@@ -0,0 +1,51 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": "--mode=dark-matter",
+ "mode": "dark-matter"
+ },
+ "blockList": [
+ {"req": 4096, "alloc": "A", "num": 4},
+
+ {"req": 4096, "alloc": "B", "num": 3},
+ {"req": 4096, "alloc": "B"},
+
+ {"req": 4096, "alloc": "C", "num": 2},
+ {"req": 4096, "alloc": "C", "num": 2},
+
+ {"req": 4096, "alloc": "D", "reps": ["R1"], "num": 2},
+ {"req": 2000, "slop": 48, "alloc": "D", "reps": ["R1"]},
+
+ {"req": 15360, "alloc": "F"},
+ {"req": 512, "alloc": "F", "num": 2},
+ {"req": 127, "alloc": "F"},
+ {"req": 1024, "alloc": "F", "reps": ["R1"]},
+ {"req": 127, "alloc": "F", "reps": ["R1"]},
+ {"req": 1000, "slop": 24, "alloc": "F", "reps": ["R1", "R2"]},
+ {"req": 127, "alloc": "F", "reps": ["R1", "R2"]},
+
+ {"req": 4096 },
+ {"req": 8192 },
+ {"req": 16384 }
+ ],
+ "traceTable": {
+ "A": ["AA"],
+ "B": ["BB"],
+ "C": ["CC"],
+ "D": ["DD"],
+ "E": ["EE"],
+ "F": ["FF"],
+ "R1": ["RR1"],
+ "R2": ["RR2"]
+ },
+ "frameTable": {
+ "AA": "#00: A (A.cpp:99)",
+ "BB": "#00: B (B.cpp:99)",
+ "CC": "#00: C (C.cpp:99)",
+ "DD": "#00: D (D.cpp:99)",
+ "EE": "#00: E (E.cpp:99)",
+ "FF": "#00: F (F.cpp:99)",
+ "RR1": "#00: R1 (R1.cpp:99)",
+ "RR2": "#00: R2 (R2.cpp:99)"
+ }
+}
diff --git a/memory/replace/dmd/test/script-diff-dark-matter2.json b/memory/replace/dmd/test/script-diff-dark-matter2.json
new file mode 100644
index 000000000..a001040c0
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-dark-matter2.json
@@ -0,0 +1,51 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": null,
+ "mode": "dark-matter"
+ },
+ "blockList": [
+ {"req": 4096, "alloc": "A", "num": 4},
+
+ {"req": 8192, "alloc": "B"},
+ {"req": 8192, "alloc": "B"},
+
+ {"req": 4000, "slop": 96, "alloc": "C", "num": 4},
+
+ {"req": 4096, "alloc": "E", "num": 4},
+
+ {"req": 2000, "slop": 48, "alloc": "F"},
+ {"req": 1000, "slop": 24, "alloc": "F", "reps": ["R1"]},
+ {"req": 512, "alloc": "F"},
+ {"req": 512, "alloc": "F"},
+ {"req": 512, "alloc": "F"},
+ {"req": 512, "alloc": "F"},
+ {"req": 128, "alloc": "F"},
+ {"req": 63, "alloc": "F", "reps": ["R1", "R2"]},
+ {"req": 64, "alloc": "F", "num": 4},
+ {"req": 63, "alloc": "F"},
+
+ {"req": 4096, "num": 2 },
+ {"req": 20480 }
+ ],
+ "traceTable": {
+ "A": ["AA"],
+ "B": ["BB"],
+ "C": ["CC"],
+ "D": ["DD"],
+ "E": ["EE"],
+ "F": ["FF"],
+ "R1": ["RR1"],
+ "R2": ["RR2"]
+ },
+ "frameTable": {
+ "AA": "#00: A (A.cpp:99)",
+ "BB": "#00: B (B.cpp:99)",
+ "CC": "#00: C (C.cpp:99)",
+ "DD": "#00: D (D.cpp:99)",
+ "EE": "#00: E (E.cpp:99)",
+ "FF": "#00: F (F.cpp:99)",
+ "RR1": "#00: R1 (R1.cpp:99)",
+ "RR2": "#00: R2 (R2.cpp:99)"
+ }
+}
diff --git a/memory/replace/dmd/test/script-diff-live-expected.txt b/memory/replace/dmd/test/script-diff-live-expected.txt
new file mode 100644
index 000000000..ecd291ad8
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-live-expected.txt
@@ -0,0 +1,81 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-diff-live-actual.txt script-diff-live1.json script-diff-live2.json
+
+Invocation 1 {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+Invocation 2 {
+ $DMD = '--mode=live --stacks=partial'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 4 blocks in heap block record 1 of 6
+ 16,384 bytes (16,384 requested / 0 slop)
+ Individual block sizes: 4,096 x 4
+ -232.76% of the heap (-232.76% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 2 of 6
+ -13,183 bytes (-13,231 requested / 48 slop)
+ Individual block sizes: -15,360; 2,048; -1,024; 512 x 2; 128; -127 x 3; 64 x 4; 63 x 2
+ 187.29% of the heap (-45.48% cumulative)
+ Allocated at {
+ #01: F (F.cpp:99)
+ }
+}
+
+Live {
+ -3 blocks in heap block record 3 of 6
+ -10,240 bytes (-10,192 requested / -48 slop)
+ Individual block sizes: -4,096 x 2; -2,048
+ 145.48% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: D (D.cpp:99)
+ }
+}
+
+Live {
+ 0 blocks in heap block record 4 of 6
+ 0 bytes (-384 requested / 384 slop)
+ Individual block sizes: (no change)
+ -0.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+Live {
+ 0 blocks in heap block record 5 of 6
+ 0 bytes (0 requested / 0 slop)
+ Individual block sizes: 20,480; -16,384; -8,192; 4,096
+ -0.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: (no stack trace recorded due to --stacks=partial)
+ }
+}
+
+Live {
+ -2 blocks in heap block record 6 of 6
+ 0 bytes (0 requested / 0 slop)
+ Individual block sizes: 8,192 x 2; -4,096 x 4
+ -0.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: -7,039 bytes in 4 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-diff-live1.json b/memory/replace/dmd/test/script-diff-live1.json
new file mode 100644
index 000000000..87e07aed5
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-live1.json
@@ -0,0 +1,51 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": "--mode=live",
+ "mode": "live"
+ },
+ "blockList": [
+ {"req": 4096, "alloc": "A", "num": 4},
+
+ {"req": 4096, "alloc": "B", "num": 4},
+
+ {"req": 4096, "alloc": "C", "num": 4},
+
+ {"req": 4096, "alloc": "D"},
+ {"req": 4096, "alloc": "D"},
+ {"req": 2000, "slop": 48, "alloc": "D"},
+
+ {"req": 15360, "alloc": "F"},
+ {"req": 512, "alloc": "F"},
+ {"req": 512, "alloc": "F"},
+ {"req": 127, "alloc": "F"},
+ {"req": 1024, "alloc": "F"},
+ {"req": 127, "alloc": "F"},
+ {"req": 1000, "slop": 24, "alloc": "F"},
+ {"req": 127, "alloc": "F"},
+
+ {"req": 4096 },
+ {"req": 8192 },
+ {"req": 16384 }
+ ],
+ "traceTable": {
+ "A": ["AA"],
+ "B": ["BB"],
+ "C": ["CC"],
+ "D": ["DD"],
+ "E": ["EE"],
+ "F": ["FF"],
+ "R1": ["RR1"],
+ "R2": ["RR2"]
+ },
+ "frameTable": {
+ "AA": "#00: A (A.cpp:99)",
+ "BB": "#00: B (B.cpp:99)",
+ "CC": "#00: C (C.cpp:99)",
+ "DD": "#00: D (D.cpp:99)",
+ "EE": "#00: E (E.cpp:99)",
+ "FF": "#00: F (F.cpp:99)",
+ "RR1": "#00: R1 (R1.cpp:99)",
+ "RR2": "#00: R2 (R2.cpp:99)"
+ }
+}
diff --git a/memory/replace/dmd/test/script-diff-live2.json b/memory/replace/dmd/test/script-diff-live2.json
new file mode 100644
index 000000000..4c7476f4c
--- /dev/null
+++ b/memory/replace/dmd/test/script-diff-live2.json
@@ -0,0 +1,53 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": "--mode=live --stacks=partial",
+ "mode": "live"
+ },
+ "blockList": [
+ {"req": 4096, "alloc": "A", "num": 3},
+ {"req": 4096, "alloc": "A"},
+
+ {"req": 8192, "alloc": "B"},
+ {"req": 8192, "alloc": "B"},
+
+ {"req": 4000, "slop": 96, "alloc": "C", "num": 4},
+
+ {"req": 4096, "alloc": "E"},
+ {"req": 4096, "alloc": "E"},
+ {"req": 4096, "alloc": "E"},
+ {"req": 4096, "alloc": "E"},
+
+ {"req": 2000, "slop": 48, "alloc": "F"},
+ {"req": 1000, "slop": 24, "alloc": "F"},
+ {"req": 512, "alloc": "F", "num": 4},
+ {"req": 128, "alloc": "F"},
+ {"req": 63, "alloc": "F"},
+ {"req": 64, "alloc": "F", "num": 4},
+ {"req": 63, "alloc": "F"},
+
+ {"req": 4096 },
+ {"req": 4096 },
+ {"req": 20480 }
+ ],
+ "traceTable": {
+ "A": ["AA"],
+ "B": ["BB"],
+ "C": ["CC"],
+ "D": ["DD"],
+ "E": ["EE"],
+ "F": ["FF"],
+ "R1": ["RR1"],
+ "R2": ["RR2"]
+ },
+ "frameTable": {
+ "AA": "#00: A (A.cpp:99)",
+ "BB": "#00: B (B.cpp:99)",
+ "CC": "#00: C (C.cpp:99)",
+ "DD": "#00: D (D.cpp:99)",
+ "EE": "#00: E (E.cpp:99)",
+ "FF": "#00: F (F.cpp:99)",
+ "RR1": "#00: R1 (R1.cpp:99)",
+ "RR2": "#00: R2 (R2.cpp:99)"
+ }
+}
diff --git a/memory/replace/dmd/test/script-ignore-alloc-fns-expected.txt b/memory/replace/dmd/test/script-ignore-alloc-fns-expected.txt
new file mode 100644
index 000000000..af9a0f6e9
--- /dev/null
+++ b/memory/replace/dmd/test/script-ignore-alloc-fns-expected.txt
@@ -0,0 +1,72 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-ignore-alloc-fns-actual.txt --ignore-alloc-fns script-ignore-alloc-fns.json
+
+Invocation {
+ $DMD is undefined
+ Mode = 'dark-matter'
+}
+
+#-----------------------------------------------------------------
+
+# no twice-reported heap blocks
+
+#-----------------------------------------------------------------
+
+Unreported {
+ 1 block in heap block record 1 of 4
+ 1,048,576 bytes (1,048,576 requested / 0 slop)
+ 93.22% of the heap (93.22% cumulative)
+ 93.22% of unreported (93.22% cumulative)
+ Allocated at {
+ #01: A (A.cpp:99)
+ }
+}
+
+Unreported {
+ 1 block in heap block record 2 of 4
+ 65,536 bytes (65,536 requested / 0 slop)
+ 5.83% of the heap (99.05% cumulative)
+ 5.83% of unreported (99.05% cumulative)
+ Allocated at {
+ #01: js::jit::JitRuntime::initialize(JSContext*) (Ion.cpp:301)
+ }
+}
+
+Unreported {
+ 1 block in heap block record 3 of 4
+ 8,192 bytes (8,000 requested / 192 slop)
+ 0.73% of the heap (99.78% cumulative)
+ 0.73% of unreported (99.78% cumulative)
+ Allocated at {
+ #01: mozilla::Vector::growStorageBy(unsigned long) (Vector.h:802)
+ #02: D (D.cpp:99)
+ }
+}
+
+Unreported {
+ 1 block in heap block record 4 of 4
+ 2,500 bytes (2,500 requested / 0 slop)
+ 0.22% of the heap (100.00% cumulative)
+ 0.22% of unreported (100.00% cumulative)
+ Allocated at {
+ #01: g_type_create_instance (/usr/lib/x86_64-linux-gnu/libgobject-2.0.so.0)
+ #02: not_an_alloc_function_so_alloc_functions_below_here_will_not_be_stripped (blah)
+ #03: replace_posix_memalign (replace_malloc.h:120)
+ #04: ??? (/lib/x86_64-linux-gnu/libglib-2.0.so.0)
+ #05: another_non_alloc_function (blah)
+ }
+}
+
+#-----------------------------------------------------------------
+
+# no once-reported heap blocks
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 1,124,804 bytes (100.00%) in 4 blocks (100.00%)
+ Unreported: 1,124,804 bytes (100.00%) in 4 blocks (100.00%)
+ Once-reported: 0 bytes ( 0.00%) in 0 blocks ( 0.00%)
+ Twice-reported: 0 bytes ( 0.00%) in 0 blocks ( 0.00%)
+}
+
diff --git a/memory/replace/dmd/test/script-ignore-alloc-fns.json b/memory/replace/dmd/test/script-ignore-alloc-fns.json
new file mode 100644
index 000000000..7e9446a78
--- /dev/null
+++ b/memory/replace/dmd/test/script-ignore-alloc-fns.json
@@ -0,0 +1,46 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": null,
+ "mode": "dark-matter"
+ },
+ "blockList": [
+ {"req": 1048576, "alloc": "A"},
+ {"req": 65536, "alloc": "B"},
+ {"req": 8000, "slop": 192, "alloc": "C"},
+ {"req": 2500, "alloc": "D"}
+ ],
+ "traceTable": {
+ "A": ["AA", "AB", "AC", "AD"],
+ "B": ["BA", "BB", "BC"],
+ "C": ["CA", "CB", "CC", "CD"],
+ "D": ["DA", "DB", "DD", "DD", "DE", "DF", "DG", "DH", "DI", "DJ"]
+ },
+ "frameTable": {
+ "AA": "#00: replace_malloc (DMD.cpp:1106)",
+ "AB": "#00: moz_xmalloc (mozalloc.cpp:68)",
+ "AC": "#00: operator new(unsigned long) (mozalloc.h:208)",
+ "AD": "#00: A (A.cpp:99)",
+
+ "BA": "#00: replace_calloc (DMD.cpp:1125)",
+ "BB": "#00: js_calloc(unsigned long) (Utility.h:107)",
+ "BC": "#06: js::jit::JitRuntime::initialize(JSContext*) (Ion.cpp:301)",
+
+ "CA": "#00: replace_realloc (DMD.cpp:1153)",
+ "CB": "#00: bool* mozilla::MallocAllocPolicy::pod_realloc<bool>(bool*, unsigned long, unsigned long) (AllocPolicy.h:74)",
+ "CC": "#00: mozilla::Vector::growStorageBy(unsigned long) (Vector.h:802)",
+ "CD": "#00: D (D.cpp:99)",
+
+ "DA": "#00: replace_memalign (DMD.cpp:1181)",
+ "DB": "#00: replace_posix_memalign (replace_malloc.h:120)",
+ "DC": "#00: ??? (/lib/x86_64-linux-gnu/libglib-2.0.so.0)",
+ "DD": "#00: g_slice_alloc (/lib/x86_64-linux-gnu/libglib-2.0.so.0)",
+ "DE": "#00: g_slice_alloc0 (/lib/x86_64-linux-gnu/libglib-2.0.so.0)",
+ "DF": "#00: g_type_create_instance (/usr/lib/x86_64-linux-gnu/libgobject-2.0.so.0)",
+ "DG": "#00: not_an_alloc_function_so_alloc_functions_below_here_will_not_be_stripped (blah)",
+ "DH": "#00: replace_posix_memalign (replace_malloc.h:120)",
+ "DI": "#00: ??? (/lib/x86_64-linux-gnu/libglib-2.0.so.0)",
+ "DJ": "#00: another_non_alloc_function (blah)"
+ }
+}
+
diff --git a/memory/replace/dmd/test/script-max-frames-1-expected.txt b/memory/replace/dmd/test/script-max-frames-1-expected.txt
new file mode 100644
index 000000000..65a00762b
--- /dev/null
+++ b/memory/replace/dmd/test/script-max-frames-1-expected.txt
@@ -0,0 +1,26 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-max-frames-1-actual.txt --max-frames=1 script-max-frames.json
+
+Invocation {
+ $DMD = '--mode=live --stacks=full'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 4 blocks in heap block record 1 of 1
+ 4,416 bytes (4,404 requested / 12 slop)
+ Individual block sizes: 4,096; 128; 112; 80
+ 100.00% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 4,416 bytes in 4 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-max-frames-3-expected.txt b/memory/replace/dmd/test/script-max-frames-3-expected.txt
new file mode 100644
index 000000000..5df491473
--- /dev/null
+++ b/memory/replace/dmd/test/script-max-frames-3-expected.txt
@@ -0,0 +1,48 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-max-frames-3-actual.txt --max-frames=3 --no-fix-stacks script-max-frames.json
+
+Invocation {
+ $DMD = '--mode=live --stacks=full'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 2 blocks in heap block record 1 of 3
+ 4,224 bytes (4,224 requested / 0 slop)
+ Individual block sizes: 4,096; 128
+ 95.65% of the heap (95.65% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ #02: F (F.cpp:99)
+ #03: G (G.cpp:99)
+ }
+}
+
+Live {
+ 1 block in heap block record 2 of 3
+ 112 bytes (100 requested / 12 slop)
+ 2.54% of the heap (98.19% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ #02: X (X.cpp:99)
+ #03: Y (Y.cpp:99)
+ }
+}
+
+Live {
+ 1 block in heap block record 3 of 3
+ 80 bytes (80 requested / 0 slop)
+ 1.81% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 4,416 bytes in 4 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-max-frames-8-expected.txt b/memory/replace/dmd/test/script-max-frames-8-expected.txt
new file mode 100644
index 000000000..d1ba7c7f1
--- /dev/null
+++ b/memory/replace/dmd/test/script-max-frames-8-expected.txt
@@ -0,0 +1,69 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-max-frames-8-actual.txt --max-frames=8 script-max-frames.json
+
+Invocation {
+ $DMD = '--mode=live --stacks=full'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 1 block in heap block record 1 of 4
+ 4,096 bytes (4,096 requested / 0 slop)
+ 92.75% of the heap (92.75% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ #02: F (F.cpp:99)
+ #03: G (G.cpp:99)
+ #04: H (H.cpp:99)
+ #05: I (I.cpp:99)
+ #06: J (J.cpp:99)
+ #07: K (K.cpp:99)
+ #08: L (L.cpp:99)
+ }
+}
+
+Live {
+ 1 block in heap block record 2 of 4
+ 128 bytes (128 requested / 0 slop)
+ 2.90% of the heap (95.65% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ #02: F (F.cpp:99)
+ #03: G (G.cpp:99)
+ #04: R (R.cpp:99)
+ #05: S (S.cpp:99)
+ #06: T (T.cpp:99)
+ #07: U (U.cpp:99)
+ #08: V (V.cpp:99)
+ }
+}
+
+Live {
+ 1 block in heap block record 3 of 4
+ 112 bytes (100 requested / 12 slop)
+ 2.54% of the heap (98.19% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ #02: X (X.cpp:99)
+ #03: Y (Y.cpp:99)
+ #04: Z (Z.cpp:99)
+ }
+}
+
+Live {
+ 1 block in heap block record 4 of 4
+ 80 bytes (80 requested / 0 slop)
+ 1.81% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: E (E.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 4,416 bytes in 4 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-max-frames.json b/memory/replace/dmd/test/script-max-frames.json
new file mode 100644
index 000000000..690d50fa7
--- /dev/null
+++ b/memory/replace/dmd/test/script-max-frames.json
@@ -0,0 +1,43 @@
+{
+ "version": 5,
+ "invocation": {
+ "dmdEnvVar": "--mode=live --stacks=full",
+ "mode": "live"
+ },
+ "blockList": [
+ {"req": 4096, "alloc": "A"},
+ {"req": 128, "alloc": "B"},
+ {"req": 100, "slop":12, "alloc": "C"},
+ {"req": 80, "alloc": "D"}
+ ],
+ "traceTable": {
+ "A": ["E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P"],
+ "B": ["E", "F", "G", "R", "S", "T", "U", "V"],
+ "C": ["E", "X", "Y", "Z"],
+ "D": ["E"]
+ },
+ "frameTable": {
+ "E": "#00: E (E.cpp:99)",
+ "F": "#00: F (F.cpp:99)",
+ "G": "#00: G (G.cpp:99)",
+ "H": "#00: H (H.cpp:99)",
+ "I": "#00: I (I.cpp:99)",
+ "J": "#00: J (J.cpp:99)",
+ "K": "#00: K (K.cpp:99)",
+ "L": "#00: L (L.cpp:99)",
+ "M": "#00: M (M.cpp:99)",
+ "N": "#00: N (N.cpp:99)",
+ "O": "#00: O (O.cpp:99)",
+ "P": "#00: P (P.cpp:99)",
+ "Q": "#00: Q (Q.cpp:99)",
+ "R": "#00: R (R.cpp:99)",
+ "S": "#00: S (S.cpp:99)",
+ "T": "#00: T (T.cpp:99)",
+ "U": "#00: U (U.cpp:99)",
+ "V": "#00: V (V.cpp:99)",
+ "W": "#00: W (W.cpp:99)",
+ "X": "#00: X (X.cpp:99)",
+ "Y": "#00: Y (Y.cpp:99)",
+ "Z": "#00: Z (Z.cpp:99)"
+ }
+}
diff --git a/memory/replace/dmd/test/script-sort-by-num-blocks-expected.txt b/memory/replace/dmd/test/script-sort-by-num-blocks-expected.txt
new file mode 100644
index 000000000..8de03d953
--- /dev/null
+++ b/memory/replace/dmd/test/script-sort-by-num-blocks-expected.txt
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-sort-by-num-blocks-actual.txt --sort-by=num-blocks script-sort-by.json.gz
+
+Invocation {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 8 blocks in heap block record 1 of 3
+ 16,384 bytes (8,200 requested / 8,184 slop)
+ Individual block sizes: 2,048 x 8
+ 33.32% of the heap (33.32% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 2 of 3
+ 16,400 bytes (12,016 requested / 4,384 slop)
+ Individual block sizes: 4,096 x 4; 16
+ 33.35% of the heap (66.67% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 3 of 3
+ 16,392 bytes (16,392 requested / 0 slop)
+ Individual block sizes: 4,096 x 4; 8
+ 33.33% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: A (A.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 49,176 bytes in 18 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-sort-by-req-expected.txt b/memory/replace/dmd/test/script-sort-by-req-expected.txt
new file mode 100644
index 000000000..3ab21ba8f
--- /dev/null
+++ b/memory/replace/dmd/test/script-sort-by-req-expected.txt
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-sort-by-req-actual.txt --sort-by=req --no-fix-stacks script-sort-by.json.gz
+
+Invocation {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 5 blocks in heap block record 1 of 3
+ 16,392 bytes (16,392 requested / 0 slop)
+ Individual block sizes: 4,096 x 4; 8
+ 33.33% of the heap (33.33% cumulative)
+ Allocated at {
+ #01: A (A.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 2 of 3
+ 16,400 bytes (12,016 requested / 4,384 slop)
+ Individual block sizes: 4,096 x 4; 16
+ 33.35% of the heap (66.68% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+Live {
+ 8 blocks in heap block record 3 of 3
+ 16,384 bytes (8,200 requested / 8,184 slop)
+ Individual block sizes: 2,048 x 8
+ 33.32% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 49,176 bytes in 18 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-sort-by-slop-expected.txt b/memory/replace/dmd/test/script-sort-by-slop-expected.txt
new file mode 100644
index 000000000..c325c7ed4
--- /dev/null
+++ b/memory/replace/dmd/test/script-sort-by-slop-expected.txt
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-sort-by-slop-actual.txt --sort-by=slop script-sort-by.json.gz
+
+Invocation {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 8 blocks in heap block record 1 of 3
+ 16,384 bytes (8,200 requested / 8,184 slop)
+ Individual block sizes: 2,048 x 8
+ 33.32% of the heap (33.32% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 2 of 3
+ 16,400 bytes (12,016 requested / 4,384 slop)
+ Individual block sizes: 4,096 x 4; 16
+ 33.35% of the heap (66.67% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 3 of 3
+ 16,392 bytes (16,392 requested / 0 slop)
+ Individual block sizes: 4,096 x 4; 8
+ 33.33% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: A (A.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 49,176 bytes in 18 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-sort-by-usable-expected.txt b/memory/replace/dmd/test/script-sort-by-usable-expected.txt
new file mode 100644
index 000000000..8239a4759
--- /dev/null
+++ b/memory/replace/dmd/test/script-sort-by-usable-expected.txt
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# dmd.py --filter-stacks-for-testing -o script-sort-by-usable-actual.txt --sort-by=usable script-sort-by.json.gz
+
+Invocation {
+ $DMD = '--mode=live'
+ Mode = 'live'
+}
+
+#-----------------------------------------------------------------
+
+Live {
+ 5 blocks in heap block record 1 of 3
+ 16,400 bytes (12,016 requested / 4,384 slop)
+ Individual block sizes: 4,096 x 4; 16
+ 33.35% of the heap (33.35% cumulative)
+ Allocated at {
+ #01: B (B.cpp:99)
+ }
+}
+
+Live {
+ 5 blocks in heap block record 2 of 3
+ 16,392 bytes (16,392 requested / 0 slop)
+ Individual block sizes: 4,096 x 4; 8
+ 33.33% of the heap (66.68% cumulative)
+ Allocated at {
+ #01: A (A.cpp:99)
+ }
+}
+
+Live {
+ 8 blocks in heap block record 3 of 3
+ 16,384 bytes (8,200 requested / 8,184 slop)
+ Individual block sizes: 2,048 x 8
+ 33.32% of the heap (100.00% cumulative)
+ Allocated at {
+ #01: C (C.cpp:99)
+ }
+}
+
+#-----------------------------------------------------------------
+
+Summary {
+ Total: 49,176 bytes in 18 blocks
+}
+
diff --git a/memory/replace/dmd/test/script-sort-by.json.gz b/memory/replace/dmd/test/script-sort-by.json.gz
new file mode 100644
index 000000000..fa7da08c2
--- /dev/null
+++ b/memory/replace/dmd/test/script-sort-by.json.gz
Binary files differ
diff --git a/memory/replace/dmd/test/test_dmd.js b/memory/replace/dmd/test/test_dmd.js
new file mode 100644
index 000000000..b9d4b90dd
--- /dev/null
+++ b/memory/replace/dmd/test/test_dmd.js
@@ -0,0 +1,226 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*-*/
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+var {classes: Cc, interfaces: Ci, utils: Cu} = Components
+
+Cu.import("resource://gre/modules/FileUtils.jsm");
+
+// The xpcshell test harness sets PYTHON so we can read it here.
+var gEnv = Cc["@mozilla.org/process/environment;1"]
+ .getService(Ci.nsIEnvironment);
+var gPythonName = gEnv.get("PYTHON");
+
+// If we're testing locally, the executable file is in "CurProcD". Otherwise,
+// it is in another location that we have to find.
+function getExecutable(aFilename) {
+ let file = FileUtils.getFile("CurProcD", [aFilename]);
+ if (!file.exists()) {
+ file = FileUtils.getFile("CurWorkD", []);
+ while (file.path.includes("xpcshell")) {
+ file = file.parent;
+ }
+ file.append("bin");
+ file.append(aFilename);
+ }
+ return file;
+}
+
+var gIsWindows = Cc["@mozilla.org/xre/app-info;1"]
+ .getService(Ci.nsIXULRuntime).OS === "WINNT";
+var gDmdTestFile = getExecutable("SmokeDMD" + (gIsWindows ? ".exe" : ""));
+
+var gDmdScriptFile = getExecutable("dmd.py");
+
+var gScanTestFile = FileUtils.getFile("CurWorkD", ["scan-test.py"]);
+
+function readFile(aFile) {
+ let fstream = Cc["@mozilla.org/network/file-input-stream;1"]
+ .createInstance(Ci.nsIFileInputStream);
+ let cstream = Cc["@mozilla.org/intl/converter-input-stream;1"]
+ .createInstance(Ci.nsIConverterInputStream);
+ fstream.init(aFile, -1, 0, 0);
+ cstream.init(fstream, "UTF-8", 0, 0);
+
+ let data = "";
+ let str = {};
+ let read = 0;
+ do {
+ // Read as much as we can and put it in str.value.
+ read = cstream.readString(0xffffffff, str);
+ data += str.value;
+ } while (read != 0);
+
+ cstream.close(); // this closes fstream
+ return data.replace(/\r/g, ""); // normalize line endings
+}
+
+function runProcess(aExeFile, aArgs) {
+ let process = Cc["@mozilla.org/process/util;1"]
+ .createInstance(Components.interfaces.nsIProcess);
+ process.init(aExeFile);
+ process.run(/* blocking = */true, aArgs, aArgs.length);
+ return process.exitValue;
+}
+
+function test(aPrefix, aArgs) {
+ // DMD writes the JSON files to CurWorkD, so we do likewise here with
+ // |actualFile| for consistency. It is removed once we've finished.
+ let expectedFile = FileUtils.getFile("CurWorkD", [aPrefix + "-expected.txt"]);
+ let actualFile = FileUtils.getFile("CurWorkD", [aPrefix + "-actual.txt"]);
+
+ // Run dmd.py on the JSON file, producing |actualFile|.
+
+ let args = [
+ gDmdScriptFile.path,
+ "--filter-stacks-for-testing",
+ "-o", actualFile.path
+ ].concat(aArgs);
+
+ runProcess(new FileUtils.File(gPythonName), args);
+
+ // Compare |expectedFile| with |actualFile|. We produce nice diffs with
+ // /usr/bin/diff on systems that have it (Mac and Linux). Otherwise (Windows)
+ // we do a string compare of the file contents and then print them both if
+ // they don't match.
+
+ let success;
+ try {
+ let rv = runProcess(new FileUtils.File("/usr/bin/diff"),
+ ["-u", expectedFile.path, actualFile.path]);
+ success = rv == 0;
+
+ } catch (e) {
+ let expectedData = readFile(expectedFile);
+ let actualData = readFile(actualFile);
+ success = expectedData === actualData;
+ if (!success) {
+ expectedData = expectedData.split("\n");
+ actualData = actualData.split("\n");
+ for (let i = 0; i < expectedData.length; i++) {
+ print("EXPECTED:" + expectedData[i]);
+ }
+ for (let i = 0; i < actualData.length; i++) {
+ print(" ACTUAL:" + actualData[i]);
+ }
+ }
+ }
+
+ ok(success, aPrefix);
+
+ actualFile.remove(true);
+}
+
+// Run scan-test.py on the JSON file and see if it succeeds.
+function scanTest(aJsonFilePath, aExtraArgs) {
+ let args = [
+ gScanTestFile.path,
+ aJsonFilePath,
+ ].concat(aExtraArgs);
+
+ return runProcess(new FileUtils.File(gPythonName), args) == 0;
+}
+
+function run_test() {
+ let jsonFile, jsonFile2;
+
+ // These tests do complete end-to-end testing of DMD, i.e. both the C++ code
+ // that generates the JSON output, and the script that post-processes that
+ // output.
+ //
+ // Run these synchronously, because test() updates the complete*.json files
+ // in-place (to fix stacks) when it runs dmd.py, and that's not safe to do
+ // asynchronously.
+
+ gEnv.set(gEnv.get("DMD_PRELOAD_VAR"), gEnv.get("DMD_PRELOAD_VALUE"));
+
+ runProcess(gDmdTestFile, []);
+
+ function test2(aTestName, aMode) {
+ let name = "complete-" + aTestName + "-" + aMode;
+ jsonFile = FileUtils.getFile("CurWorkD", [name + ".json"]);
+ test(name, [jsonFile.path]);
+ jsonFile.remove(true);
+ }
+
+ // Please keep this in sync with RunTests() in SmokeDMD.cpp.
+
+ test2("empty", "live");
+ test2("empty", "dark-matter");
+ test2("empty", "cumulative");
+
+ test2("full1", "live");
+ test2("full1", "dark-matter");
+
+ test2("full2", "dark-matter");
+ test2("full2", "cumulative");
+
+ test2("partial", "live");
+
+ // Heap scan testing.
+ jsonFile = FileUtils.getFile("CurWorkD", ["basic-scan.json"]);
+ ok(scanTest(jsonFile.path), "Basic scan test");
+
+ let is64Bit = Components.classes["@mozilla.org/xre/app-info;1"]
+ .getService(Components.interfaces.nsIXULRuntime).is64Bit;
+ let basicScanFileName = "basic-scan-" + (is64Bit ? "64" : "32");
+ test(basicScanFileName, ["--clamp-contents", jsonFile.path]);
+ ok(scanTest(jsonFile.path, ["--clamp-contents"]), "Scan with address clamping");
+
+ // Run the generic test a second time to ensure that the first time produced
+ // valid JSON output. "--clamp-contents" is passed in so we don't have to have
+ // more variants of the files.
+ test(basicScanFileName, ["--clamp-contents", jsonFile.path]);
+ jsonFile.remove(true);
+
+ // These tests only test the post-processing script. They use hand-written
+ // JSON files as input. Ideally the JSON files would contain comments
+ // explaining how they work, but JSON doesn't allow comments, so I've put
+ // explanations here.
+
+ // This just tests that stack traces of various lengths are truncated
+ // appropriately. The number of records in the output is different for each
+ // of the tested values.
+ jsonFile = FileUtils.getFile("CurWorkD", ["script-max-frames.json"]);
+ test("script-max-frames-8",
+ ["--max-frames=8", jsonFile.path]);
+ test("script-max-frames-3",
+ ["--max-frames=3", "--no-fix-stacks", jsonFile.path]);
+ test("script-max-frames-1",
+ ["--max-frames=1", jsonFile.path]);
+
+ // This file has three records that are shown in a different order for each
+ // of the different sort values. It also tests the handling of gzipped JSON
+ // files.
+ jsonFile = FileUtils.getFile("CurWorkD", ["script-sort-by.json.gz"]);
+ test("script-sort-by-usable",
+ ["--sort-by=usable", jsonFile.path]);
+ test("script-sort-by-req",
+ ["--sort-by=req", "--no-fix-stacks", jsonFile.path]);
+ test("script-sort-by-slop",
+ ["--sort-by=slop", jsonFile.path]);
+ test("script-sort-by-num-blocks",
+ ["--sort-by=num-blocks", jsonFile.path]);
+
+ // This file has several real stack traces taken from Firefox execution, each
+ // of which tests a different allocator function (or functions).
+ jsonFile = FileUtils.getFile("CurWorkD", ["script-ignore-alloc-fns.json"]);
+ test("script-ignore-alloc-fns",
+ ["--ignore-alloc-fns", jsonFile.path]);
+
+ // This tests "live"-mode diffs.
+ jsonFile = FileUtils.getFile("CurWorkD", ["script-diff-live1.json"]);
+ jsonFile2 = FileUtils.getFile("CurWorkD", ["script-diff-live2.json"]);
+ test("script-diff-live",
+ [jsonFile.path, jsonFile2.path]);
+
+ // This tests "dark-matter"-mode diffs.
+ jsonFile = FileUtils.getFile("CurWorkD", ["script-diff-dark-matter1.json"]);
+ jsonFile2 = FileUtils.getFile("CurWorkD", ["script-diff-dark-matter2.json"]);
+ test("script-diff-dark-matter",
+ [jsonFile.path, jsonFile2.path]);
+}
diff --git a/memory/replace/dmd/test/xpcshell.ini b/memory/replace/dmd/test/xpcshell.ini
new file mode 100644
index 000000000..adb82147b
--- /dev/null
+++ b/memory/replace/dmd/test/xpcshell.ini
@@ -0,0 +1,35 @@
+[DEFAULT]
+support-files =
+ basic-scan-32-expected.txt
+ basic-scan-64-expected.txt
+ complete-empty-live-expected.txt
+ complete-empty-dark-matter-expected.txt
+ complete-empty-cumulative-expected.txt
+ complete-full1-live-expected.txt
+ complete-full1-dark-matter-expected.txt
+ complete-full2-dark-matter-expected.txt
+ complete-full2-cumulative-expected.txt
+ complete-partial-live-expected.txt
+ scan-test.py
+ script-max-frames.json
+ script-max-frames-8-expected.txt
+ script-max-frames-3-expected.txt
+ script-max-frames-1-expected.txt
+ script-sort-by.json.gz
+ script-sort-by-usable-expected.txt
+ script-sort-by-req-expected.txt
+ script-sort-by-slop-expected.txt
+ script-sort-by-num-blocks-expected.txt
+ script-ignore-alloc-fns.json
+ script-ignore-alloc-fns-expected.txt
+ script-diff-live1.json
+ script-diff-live2.json
+ script-diff-live-expected.txt
+ script-diff-dark-matter1.json
+ script-diff-dark-matter2.json
+ script-diff-dark-matter-expected.txt
+
+# Bug 1077230 explains why this test is disabled on Mac 10.6.
+[test_dmd.js]
+dmd = true
+skip-if = !(os=='linux' || os=='win' || (os=='mac' && os_version!='10.6'))
diff --git a/memory/replace/dummy/dummy_replace_malloc.c b/memory/replace/dummy/dummy_replace_malloc.c
new file mode 100644
index 000000000..e2f703826
--- /dev/null
+++ b/memory/replace/dummy/dummy_replace_malloc.c
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Types.h"
+
+/*
+ * Dummy functions for linking purpose on OSX with older XCode.
+ * See details in configure.in, under "Replace-malloc Mac linkage quirks"
+ */
+#define MALLOC_FUNCS MALLOC_FUNCS_ALL
+#define MALLOC_DECL(name, ...) \
+ MOZ_EXPORT void replace_ ## name() { }
+
+#include "malloc_decls.h"
diff --git a/memory/replace/dummy/moz.build b/memory/replace/dummy/moz.build
new file mode 100644
index 000000000..5006db801
--- /dev/null
+++ b/memory/replace/dummy/moz.build
@@ -0,0 +1,14 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+DIST_INSTALL = False
+
+SOURCES += [
+ 'dummy_replace_malloc.c',
+]
+
+SharedLibrary('dummy_replace_malloc')
+
+DISABLE_STL_WRAPPING = True
diff --git a/memory/replace/jemalloc/moz.build b/memory/replace/jemalloc/moz.build
new file mode 100644
index 000000000..5e907af55
--- /dev/null
+++ b/memory/replace/jemalloc/moz.build
@@ -0,0 +1,31 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ '../../build/jemalloc_config.cpp',
+ '../../build/mozjemalloc_compat.c',
+]
+
+# Android doesn't have pthread_atfork, so just implement a dummy function.
+# It shouldn't make much problem, as the use of fork is pretty limited on
+# Android.
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
+ SOURCES += [
+ 'pthread_atfork.c',
+ ]
+
+SharedLibrary('replace_jemalloc')
+
+DEFINES['MOZ_JEMALLOC4'] = True
+DEFINES['MOZ_REPLACE_JEMALLOC'] = True
+
+LOCAL_INCLUDES += ['!../../jemalloc/src/include']
+if CONFIG['_MSC_VER']:
+ LOCAL_INCLUDES += ['/memory/jemalloc/src/include/msvc_compat']
+ if not CONFIG['HAVE_INTTYPES_H']:
+ LOCAL_INCLUDES += ['/memory/jemalloc/src/include/msvc_compat/C99']
+
+DISABLE_STL_WRAPPING = True
diff --git a/memory/replace/jemalloc/pthread_atfork.c b/memory/replace/jemalloc/pthread_atfork.c
new file mode 100644
index 000000000..6e737f072
--- /dev/null
+++ b/memory/replace/jemalloc/pthread_atfork.c
@@ -0,0 +1,10 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+int pthread_atfork(void (*prefork)(void),
+ void (*postfork_parent)(void),
+ void (*postfork_child)(void))
+{
+ return 0;
+}
diff --git a/memory/replace/logalloc/FdPrintf.cpp b/memory/replace/logalloc/FdPrintf.cpp
new file mode 100644
index 000000000..c34dddcaa
--- /dev/null
+++ b/memory/replace/logalloc/FdPrintf.cpp
@@ -0,0 +1,132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdarg>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <cstring>
+#include "mozilla/Assertions.h"
+#include "mozilla/Unused.h"
+
+/* Template class allowing a limited number of increments on a value */
+template <typename T>
+class CheckedIncrement
+{
+public:
+ CheckedIncrement(T aValue, size_t aMaxIncrement)
+ : mValue(aValue), mMaxIncrement(aMaxIncrement)
+ {}
+
+ T operator ++(int)
+ {
+ if (!mMaxIncrement) {
+ MOZ_CRASH("overflow detected");
+ }
+ mMaxIncrement--;
+ return mValue++;
+ }
+
+ T& operator ++()
+ {
+ (*this)++;
+ return mValue;
+ }
+
+ operator T() { return mValue; }
+
+private:
+ T mValue;
+ size_t mMaxIncrement;
+};
+
+void
+FdPrintf(intptr_t aFd, const char* aFormat, ...)
+{
+ if (aFd == 0) {
+ return;
+ }
+ char buf[256];
+ CheckedIncrement<char*> b(buf, sizeof(buf));
+ CheckedIncrement<const char*> f(aFormat, strlen(aFormat) + 1);
+ va_list ap;
+ va_start(ap, aFormat);
+ while (true) {
+ switch (*f) {
+ case '\0':
+ goto out;
+
+ case '%':
+ switch (*++f) {
+ case 'z': {
+ if (*(++f) == 'u') {
+ size_t i = va_arg(ap, size_t);
+ size_t x = 1;
+ // Compute the number of digits.
+ while (x <= i / 10) {
+ x *= 10;
+ }
+ // Write the digits into the buffer.
+ do {
+ *(b++) = "0123456789"[(i / x) % 10];
+ x /= 10;
+ } while (x > 0);
+ } else {
+ // Write out the format specifier if it's unknown.
+ *(b++) = '%';
+ *(b++) = 'z';
+ *(b++) = *f;
+ }
+ break;
+ }
+
+ case 'p': {
+ intptr_t ptr = va_arg(ap, intptr_t);
+ *(b++) = '0';
+ *(b++) = 'x';
+ int x = sizeof(intptr_t) * 8;
+ bool wrote_msb = false;
+ do {
+ x -= 4;
+ size_t hex_digit = ptr >> x & 0xf;
+ if (hex_digit || wrote_msb) {
+ *(b++) = "0123456789abcdef"[hex_digit];
+ wrote_msb = true;
+ }
+ } while (x > 0);
+ if (!wrote_msb) {
+ *(b++) = '0';
+ }
+ break;
+ }
+
+ default:
+ // Write out the format specifier if it's unknown.
+ *(b++) = '%';
+ *(b++) = *f;
+ break;
+ }
+ break;
+
+ default:
+ *(b++) = *f;
+ break;
+ }
+ f++;
+ }
+out:
+#ifdef _WIN32
+ // See comment in FdPrintf.h as to why WriteFile is used.
+ DWORD written;
+ WriteFile(reinterpret_cast<HANDLE>(aFd), buf, b - buf, &written, nullptr);
+#else
+ MOZ_UNUSED(write(aFd, buf, b - buf));
+#endif
+ va_end(ap);
+}
diff --git a/memory/replace/logalloc/FdPrintf.h b/memory/replace/logalloc/FdPrintf.h
new file mode 100644
index 000000000..9ad3e54a6
--- /dev/null
+++ b/memory/replace/logalloc/FdPrintf.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FdPrintf_h__
+#define __FdPrintf_h__
+
+/* We can't use libc's (f)printf because it would reenter in replace_malloc,
+ * So use a custom and simplified version.
+ * Only %p and %z are supported.
+ * /!\ This function used a fixed-size internal buffer. The caller is
+ * expected to not use a format string that may overflow.
+ * The aFd argument is a file descriptor on UNIX and a native win32 file
+ * handle on Windows (from CreateFile). We can't use the windows POSIX
+ * APIs is that they don't support O_APPEND in a multi-process-safe way,
+ * while CreateFile does.
+ */
+extern void FdPrintf(intptr_t aFd, const char* aFormat, ...)
+#ifdef __GNUC__
+__attribute__((format(printf, 2, 3)))
+#endif
+;
+
+#endif /* __FdPrintf_h__ */
diff --git a/memory/replace/logalloc/LogAlloc.cpp b/memory/replace/logalloc/LogAlloc.cpp
new file mode 100644
index 000000000..f9b375048
--- /dev/null
+++ b/memory/replace/logalloc/LogAlloc.cpp
@@ -0,0 +1,276 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdlib>
+#include <cstdio>
+#include <fcntl.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#include <io.h>
+#include <process.h>
+#else
+#include <unistd.h>
+#include <pthread.h>
+#endif
+
+#include "replace_malloc.h"
+#include "FdPrintf.h"
+
+#include "base/lock.h"
+
+static const malloc_table_t* sFuncs = nullptr;
+static intptr_t sFd = 0;
+static bool sStdoutOrStderr = false;
+
+static Lock sLock;
+
+static void
+prefork() {
+ sLock.Acquire();
+}
+
+static void
+postfork() {
+ sLock.Release();
+}
+
+static size_t
+GetPid()
+{
+ return size_t(getpid());
+}
+
+static size_t
+GetTid()
+{
+#if defined(_WIN32)
+ return size_t(GetCurrentThreadId());
+#else
+ return size_t(pthread_self());
+#endif
+}
+
+#ifdef ANDROID
+/* See mozglue/android/APKOpen.cpp */
+extern "C" MOZ_EXPORT __attribute__((weak))
+void* __dso_handle;
+
+/* Android doesn't have pthread_atfork defined in pthread.h */
+extern "C" MOZ_EXPORT
+int pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
+#endif
+
+class LogAllocBridge : public ReplaceMallocBridge
+{
+ virtual void InitDebugFd(mozilla::DebugFdRegistry& aRegistry) override {
+ if (!sStdoutOrStderr) {
+ aRegistry.RegisterHandle(sFd);
+ }
+ }
+};
+
+void
+replace_init(const malloc_table_t* aTable)
+{
+ sFuncs = aTable;
+
+#ifndef _WIN32
+ /* When another thread has acquired a lock before forking, the child
+ * process will inherit the lock state but the thread, being nonexistent
+ * in the child process, will never release it, leading to a dead-lock
+ * whenever the child process gets the lock. We thus need to ensure no
+ * other thread is holding the lock before forking, by acquiring it
+ * ourselves, and releasing it after forking, both in the parent and child
+ * processes.
+ * Windows doesn't have this problem since there is no fork().
+ * The real allocator, however, might be doing the same thing (jemalloc
+ * does). But pthread_atfork `prepare` handlers (first argument) are
+ * processed in reverse order they were established. But replace_init
+ * runs before the real allocator has had any chance to initialize and
+ * call pthread_atfork itself. This leads to its prefork running before
+ * ours. This leads to a race condition that can lead to a deadlock like
+ * the following:
+ * - thread A forks.
+ * - libc calls real allocator's prefork, so thread A holds the real
+ * allocator lock.
+ * - thread B calls malloc, which calls our replace_malloc.
+ * - consequently, thread B holds our lock.
+ * - thread B then proceeds to call the real allocator's malloc, and
+ * waits for the real allocator's lock, which thread A holds.
+ * - libc calls our prefork, so thread A waits for our lock, which
+ * thread B holds.
+ * To avoid this race condition, the real allocator's prefork must be
+ * called after ours, which means it needs to be registered before ours.
+ * So trick the real allocator into initializing itself without more side
+ * effects by calling malloc with a size it can't possibly allocate. */
+ sFuncs->malloc(-1);
+ pthread_atfork(prefork, postfork, postfork);
+#endif
+
+ /* Initialize output file descriptor from the MALLOC_LOG environment
+ * variable. Numbers up to 9999 are considered as a preopened file
+ * descriptor number. Other values are considered as a file name. */
+ char* log = getenv("MALLOC_LOG");
+ if (log && *log) {
+ int fd = 0;
+ const char *fd_num = log;
+ while (*fd_num) {
+ /* Reject non digits. */
+ if (*fd_num < '0' || *fd_num > '9') {
+ fd = -1;
+ break;
+ }
+ fd = fd * 10 + (*fd_num - '0');
+ /* Reject values >= 10000. */
+ if (fd >= 10000) {
+ fd = -1;
+ break;
+ }
+ fd_num++;
+ }
+ if (fd == 1 || fd == 2) {
+ sStdoutOrStderr = true;
+ }
+#ifdef _WIN32
+ // See comment in FdPrintf.h as to why CreateFile is used.
+ HANDLE handle;
+ if (fd > 0) {
+ handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+ } else {
+ handle = CreateFileA(log, FILE_APPEND_DATA, FILE_SHARE_READ |
+ FILE_SHARE_WRITE, nullptr, OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ }
+ if (handle != INVALID_HANDLE_VALUE) {
+ sFd = reinterpret_cast<intptr_t>(handle);
+ }
+#else
+ if (fd == -1) {
+ fd = open(log, O_WRONLY | O_CREAT | O_APPEND, 0644);
+ }
+ if (fd > 0) {
+ sFd = fd;
+ }
+#endif
+ }
+}
+
+ReplaceMallocBridge*
+replace_get_bridge()
+{
+ static LogAllocBridge bridge;
+ return &bridge;
+}
+
+/* Do a simple, text-form, log of all calls to replace-malloc functions.
+ * Use locking to guarantee that an allocation that did happen is logged
+ * before any other allocation/free happens.
+ * TODO: Add a thread id to the log: different allocators, or even different
+ * configurations of jemalloc behave differently when allocations are coming
+ * from different threads. Reproducing those multi-threaded workloads would be
+ * useful to test those differences.
+ */
+
+void*
+replace_malloc(size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* ptr = sFuncs->malloc(aSize);
+ if (ptr) {
+ FdPrintf(sFd, "%zu %zu malloc(%zu)=%p\n", GetPid(), GetTid(), aSize, ptr);
+ }
+ return ptr;
+}
+
+int
+replace_posix_memalign(void** aPtr, size_t aAlignment, size_t aSize)
+{
+ AutoLock lock(sLock);
+ int ret = sFuncs->posix_memalign(aPtr, aAlignment, aSize);
+ if (ret == 0) {
+ FdPrintf(sFd, "%zu %zu posix_memalign(%zu,%zu)=%p\n", GetPid(), GetTid(),
+ aAlignment, aSize, *aPtr);
+ }
+ return ret;
+}
+
+void*
+replace_aligned_alloc(size_t aAlignment, size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* ptr = sFuncs->aligned_alloc(aAlignment, aSize);
+ if (ptr) {
+ FdPrintf(sFd, "%zu %zu aligned_alloc(%zu,%zu)=%p\n", GetPid(), GetTid(),
+ aAlignment, aSize, ptr);
+ }
+ return ptr;
+}
+
+void*
+replace_calloc(size_t aNum, size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* ptr = sFuncs->calloc(aNum, aSize);
+ if (ptr) {
+ FdPrintf(sFd, "%zu %zu calloc(%zu,%zu)=%p\n", GetPid(), GetTid(), aNum,
+ aSize, ptr);
+ }
+ return ptr;
+}
+
+void*
+replace_realloc(void* aPtr, size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* new_ptr = sFuncs->realloc(aPtr, aSize);
+ if (new_ptr || !aSize) {
+ FdPrintf(sFd, "%zu %zu realloc(%p,%zu)=%p\n", GetPid(), GetTid(), aPtr,
+ aSize, new_ptr);
+ }
+ return new_ptr;
+}
+
+void
+replace_free(void* aPtr)
+{
+ AutoLock lock(sLock);
+ if (aPtr) {
+ FdPrintf(sFd, "%zu %zu free(%p)\n", GetPid(), GetTid(), aPtr);
+ }
+ sFuncs->free(aPtr);
+}
+
+void*
+replace_memalign(size_t aAlignment, size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* ptr = sFuncs->memalign(aAlignment, aSize);
+ if (ptr) {
+ FdPrintf(sFd, "%zu %zu memalign(%zu,%zu)=%p\n", GetPid(), GetTid(),
+ aAlignment, aSize, ptr);
+ }
+ return ptr;
+}
+
+void*
+replace_valloc(size_t aSize)
+{
+ AutoLock lock(sLock);
+ void* ptr = sFuncs->valloc(aSize);
+ if (ptr) {
+ FdPrintf(sFd, "%zu %zu valloc(%zu)=%p\n", GetPid(), GetTid(), aSize, ptr);
+ }
+ return ptr;
+}
+
+void
+replace_jemalloc_stats(jemalloc_stats_t* aStats)
+{
+ AutoLock lock(sLock);
+ sFuncs->jemalloc_stats(aStats);
+ FdPrintf(sFd, "%zu %zu jemalloc_stats()\n", GetPid(), GetTid());
+}
diff --git a/memory/replace/logalloc/README b/memory/replace/logalloc/README
new file mode 100644
index 000000000..ce0d82be8
--- /dev/null
+++ b/memory/replace/logalloc/README
@@ -0,0 +1,107 @@
+Logalloc is a replace-malloc library for Firefox (see
+memory/build/replace_malloc.h) that dumps a log of memory allocations to a
+given file descriptor or file name. That log can then be replayed against
+Firefox's default memory allocator independently or through another
+replace-malloc library, allowing the testing of other allocators under the
+exact same workload.
+
+To get an allocation log the following environment variables need to be set
+when starting Firefox:
+- on Linux:
+ LD_PRELOAD=/path/to/liblogalloc.so
+- on Mac OSX:
+ DYLD_INSERT_LIBRARIES=/path/to/liblogalloc.dylib
+- on Windows:
+ MOZ_REPLACE_MALLOC_LIB=/path/to/logalloc.dll
+- on Android:
+ MOZ_REPLACE_MALLOC_LIB=/path/to/liblogalloc.so
+ (see https://wiki.mozilla.org/Mobile/Fennec/Android#Arguments_and_Environment_Variables
+ for how to pass environment variables to Firefox for Android)
+
+- on all platforms:
+ MALLOC_LOG=/path/to/log-file
+ or
+ MALLOC_LOG=number
+
+When MALLOC_LOG is a number below 10000, it is considered as a file
+descriptor number that is fed to Firefox when it is started. Otherwise,
+it is considered as a file name.
+
+As those allocation logs can grow large quite quickly, it can be useful
+to pipe the output to a compression tool.
+
+MALLOC_LOG=1 would send to Firefox's stdout, MALLOC_LOG=2 would send to
+its stderr. Since in both cases that could be mixed with other output
+from Firefox, it is usually better to use another file descriptor
+by shell redirections, such as:
+
+ MALLOC_LOG=3 firefox 3>&1 1>&2 | gzip -c > log.gz
+
+(3>&1 copies the `| gzip` pipe file descriptor to file descriptor #3, 1>&2
+then copies stderr to stdout. This leads to: fd1 and fd2 sending to stderr
+of the parent process (the shell), and fd3 sending to gzip.)
+
+Each line of the allocations log is formatted as follows:
+ <pid> <tid> <function>([<args>])[=<result>]
+where <args> is a comma separated list of values. The number of <args> and
+the presence of <result> depend on the <function>.
+
+Example log:
+ 18545 18545 malloc(32)=0x7f90495120e0
+ 18545 18545 calloc(1,148)=0x7f9049537480
+ 18545 18545 realloc(0x7f90495120e0,64)=0x7f9049536680
+ 18545 18545 posix_memalign(256,240)=0x7f9049583300
+ 18545 18545 jemalloc_stats()
+ 18545 18545 free(0x7f9049536680)
+
+This log can be replayed with the logalloc-replay tool in
+memory/replace/logalloc/replay. However, as the goal of that tool is to
+reproduce the recorded memory allocations, it needs to avoid as much as
+possible doing its own allocations for bookkeeping. Reading the logs as
+they are would require data structures and memory allocations. As a
+consequence, the logs need to be preprocessed beforehand.
+
+The logalloc_munge.py script is responsible for that preprocessing. It simply
+takes a raw log on its stdin, and outputs the preprocessed log on its stdout.
+It replaces pointer addresses with indexes the logalloc-replay tool can use
+in a large (almost) linear array of allocation tracking slots (prefixed with
+'#'). It also replaces the pids with numbers starting from 1 (such as the
+first seen pid number is 1, the second is 2, etc.).
+
+The above example log would become the following, once preprocessed:
+ 1 1 malloc(32)=#1
+ 1 1 calloc(1,148)=#2
+ 1 1 realloc(#1,64)=#1
+ 1 1 posix_memalign(256,240)=#3
+ 1 1 jemalloc_stats()
+ 1 1 free(#1)
+
+The logalloc-replay tool then takes the preprocessed log on its stdin and
+replays the allocations printed there, but will only replay those with the
+same process id as the first line (which normally is 1).
+
+As the log files are simple text files, though, it is easy to separate out
+the different processes log with e.g. grep, and feed the separate processes
+logs to logalloc-replay.
+
+The logalloc-replay program won't output anything unless jemalloc_stats
+records appears in the log. You can expect those to be recorded when going
+to about:memory in Firefox, but they can also be added after preprocessing.
+
+Here is an example of what one can do:
+
+ gunzip -c log.gz | python logalloc_munge.py | \
+ awk '$1 == "2" { print $0 } !(NR % 10000) { print "2 1 jemalloc_stats()" }' | \
+ ./logalloc-replay
+
+The above command replays the allocations of process #2, with some stats
+output every 10000 records.
+
+The logalloc-replay tool itself being hooked with replace-malloc, it is possible
+to set LD_PRELOAD/DYLD_INSERT_LIBRARIES/MOZ_REPLACE_MALLOC_LIB and replay a log
+through a different allocator. For example:
+
+ LD_PRELOAD=libreplace_jemalloc.so logalloc-replay < log
+
+Will replay the log against jemalloc4 (which is, as of writing, what
+libreplace_jemalloc.so contains).
diff --git a/memory/replace/logalloc/moz.build b/memory/replace/logalloc/moz.build
new file mode 100644
index 000000000..e549dca76
--- /dev/null
+++ b/memory/replace/logalloc/moz.build
@@ -0,0 +1,41 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SharedLibrary('logalloc')
+
+SOURCES += [
+ 'FdPrintf.cpp',
+ 'LogAlloc.cpp',
+]
+
+DISABLE_STL_WRAPPING = True
+USE_STATIC_LIBS = True
+DEFINES['MOZ_NO_MOZALLOC'] = True
+# Avoid Lock_impl code depending on mozilla::Logger.
+DEFINES['NDEBUG'] = True
+DEFINES['DEBUG'] = False
+
+# Use locking code from the chromium stack.
+if CONFIG['OS_TARGET'] == 'WINNT':
+ SOURCES += [
+ '../../../ipc/chromium/src/base/lock_impl_win.cc',
+ ]
+else:
+ SOURCES += [
+ '../../../ipc/chromium/src/base/lock_impl_posix.cc',
+ ]
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+# Android doesn't have pthread_atfork, but we have our own in mozglue.
+if CONFIG['OS_TARGET'] == 'Android':
+ USE_LIBS += [
+ 'mozglue',
+ ]
+
+DIRS += [
+ 'replay',
+]
diff --git a/memory/replace/logalloc/replay/Makefile.in b/memory/replace/logalloc/replay/Makefile.in
new file mode 100644
index 000000000..47e8c43ca
--- /dev/null
+++ b/memory/replace/logalloc/replay/Makefile.in
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+include $(topsrcdir)/mozglue/build/replace_malloc.mk
+
+ifndef CROSS_COMPILE
+
+ifeq ($(OS_TARGET),WINNT)
+LOGALLOC = MOZ_REPLACE_MALLOC_LIB=$(CURDIR)/../logalloc.dll
+else
+ifeq ($(OS_TARGET),Darwin)
+LOGALLOC = DYLD_INSERT_LIBRARIES=$(CURDIR)/../liblogalloc.dylib
+else
+LOGALLOC = LD_PRELOAD=$(CURDIR)/../$(DLL_PREFIX)logalloc$(DLL_SUFFIX)
+endif
+endif
+
+expected_output.log: $(srcdir)/replay.log
+# The logalloc-replay program will only replay entries from the first pid,
+# so the expected output only contains entries beginning with "1 "
+ grep "^1 " $< > $@
+
+check:: $(srcdir)/replay.log expected_output.log
+# Test with MALLOC_LOG as a file descriptor number
+ MALLOC_LOG=1 $(LOGALLOC) ./$(PROGRAM) < $< | $(PYTHON) $(srcdir)/logalloc_munge.py | diff -w - expected_output.log
+# Test with MALLOC_LOG as a file name
+ $(RM) test_output.log
+ MALLOC_LOG=test_output.log $(LOGALLOC) ./$(PROGRAM) < $<
+ $(PYTHON) $(srcdir)/logalloc_munge.py < test_output.log | diff -w - expected_output.log
+
+endif
diff --git a/memory/replace/logalloc/replay/Replay.cpp b/memory/replace/logalloc/replay/Replay.cpp
new file mode 100644
index 000000000..30fcd21e5
--- /dev/null
+++ b/memory/replace/logalloc/replay/Replay.cpp
@@ -0,0 +1,558 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#define MOZ_MEMORY_IMPL
+#include "mozmemory_wrap.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#include <io.h>
+typedef int ssize_t;
+#else
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+#include <algorithm>
+#include <cstdio>
+#include <cstring>
+
+#include "mozilla/Assertions.h"
+#include "FdPrintf.h"
+
+static void
+die(const char* message)
+{
+ /* Here, it doesn't matter that fprintf may allocate memory. */
+ fprintf(stderr, "%s\n", message);
+ exit(1);
+}
+
+/* We don't want to be using malloc() to allocate our internal tracking
+ * data, because that would change the parameters of what is being measured,
+ * so we want to use data types that directly use mmap/VirtualAlloc. */
+template <typename T, size_t Len>
+class MappedArray
+{
+public:
+ MappedArray(): mPtr(nullptr) {}
+
+ ~MappedArray()
+ {
+ if (mPtr) {
+#ifdef _WIN32
+ VirtualFree(mPtr, sizeof(T) * Len, MEM_RELEASE);
+#else
+ munmap(mPtr, sizeof(T) * Len);
+#endif
+ }
+ }
+
+ T& operator[] (size_t aIndex) const
+ {
+ if (mPtr) {
+ return mPtr[aIndex];
+ }
+
+#ifdef _WIN32
+ mPtr = reinterpret_cast<T*>(VirtualAlloc(nullptr, sizeof(T) * Len,
+ MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE));
+ if (mPtr == nullptr) {
+ die("VirtualAlloc error");
+ }
+#else
+ mPtr = reinterpret_cast<T*>(mmap(nullptr, sizeof(T) * Len,
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
+ if (mPtr == MAP_FAILED) {
+ die("Mmap error");
+ }
+#endif
+ return mPtr[aIndex];
+ }
+
+private:
+ mutable T* mPtr;
+};
+
+/* Type for records of allocations. */
+struct MemSlot
+{
+ void* mPtr;
+ size_t mSize;
+};
+
+/* An almost infinite list of slots.
+ * In essence, this is a linked list of arrays of groups of slots.
+ * Each group is 1MB. On 64-bits, one group allows to store 64k allocations.
+ * Each MemSlotList instance can store 1023 such groups, which means more
+ * than 65M allocations. In case more would be needed, we chain to another
+ * MemSlotList, and so on.
+ * Using 1023 groups makes the MemSlotList itself page sized on 32-bits
+ * and 2 pages-sized on 64-bits.
+ */
+class MemSlotList
+{
+ static const size_t kGroups = 1024 - 1;
+ static const size_t kGroupSize = (1024 * 1024) / sizeof(MemSlot);
+
+ MappedArray<MemSlot, kGroupSize> mSlots[kGroups];
+ MappedArray<MemSlotList, 1> mNext;
+
+public:
+ MemSlot& operator[] (size_t aIndex) const
+ {
+ if (aIndex < kGroupSize * kGroups) {
+ return mSlots[aIndex / kGroupSize][aIndex % kGroupSize];
+ }
+ aIndex -= kGroupSize * kGroups;
+ return mNext[0][aIndex];
+ }
+};
+
+/* Helper class for memory buffers */
+class Buffer
+{
+public:
+ Buffer() : mBuf(nullptr), mLength(0) {}
+
+ Buffer(const void* aBuf, size_t aLength)
+ : mBuf(reinterpret_cast<const char*>(aBuf)), mLength(aLength)
+ {}
+
+ /* Constructor for string literals. */
+ template <size_t Size>
+ explicit Buffer(const char (&aStr)[Size])
+ : mBuf(aStr), mLength(Size - 1)
+ {}
+
+ /* Returns a sub-buffer up-to but not including the given aNeedle character.
+ * The "parent" buffer itself is altered to begin after the aNeedle
+ * character.
+ * If the aNeedle character is not found, return the entire buffer, and empty
+ * the "parent" buffer. */
+ Buffer SplitChar(char aNeedle)
+ {
+ char* buf = const_cast<char*>(mBuf);
+ char* c = reinterpret_cast<char*>(memchr(buf, aNeedle, mLength));
+ if (!c) {
+ return Split(mLength);
+ }
+
+ Buffer result = Split(c - buf);
+ // Remove the aNeedle character itself.
+ Split(1);
+ return result;
+ }
+
+ /* Returns a sub-buffer of at most aLength characters. The "parent" buffer is
+ * amputated of those aLength characters. If the "parent" buffer is smaller
+ * than aLength, then its length is used instead. */
+ Buffer Split(size_t aLength)
+ {
+ Buffer result(mBuf, std::min(aLength, mLength));
+ mLength -= result.mLength;
+ mBuf += result.mLength;
+ return result;
+ }
+
+ /* Move the buffer (including its content) to the memory address of the aOther
+ * buffer. */
+ void Slide(Buffer aOther)
+ {
+ memmove(const_cast<char*>(aOther.mBuf), mBuf, mLength);
+ mBuf = aOther.mBuf;
+ }
+
+ /* Returns whether the two involved buffers have the same content. */
+ bool operator ==(Buffer aOther)
+ {
+ return mLength == aOther.mLength && (mBuf == aOther.mBuf ||
+ !strncmp(mBuf, aOther.mBuf, mLength));
+ }
+
+ /* Returns whether the buffer is empty. */
+ explicit operator bool() { return mLength; }
+
+ /* Returns the memory location of the buffer. */
+ const char* get() { return mBuf; }
+
+ /* Returns the memory location of the end of the buffer (technically, the
+ * first byte after the buffer). */
+ const char* GetEnd() { return mBuf + mLength; }
+
+ /* Extend the buffer over the content of the other buffer, assuming it is
+ * adjacent. */
+ void Extend(Buffer aOther)
+ {
+ MOZ_ASSERT(aOther.mBuf == GetEnd());
+ mLength += aOther.mLength;
+ }
+
+private:
+ const char* mBuf;
+ size_t mLength;
+};
+
+/* Helper class to read from a file descriptor line by line. */
+class FdReader {
+public:
+ explicit FdReader(int aFd)
+ : mFd(aFd)
+ , mData(&mRawBuf, 0)
+ , mBuf(&mRawBuf, sizeof(mRawBuf))
+ {}
+
+ /* Read a line from the file descriptor and returns it as a Buffer instance */
+ Buffer ReadLine()
+ {
+ while (true) {
+ Buffer result = mData.SplitChar('\n');
+
+ /* There are essentially three different cases here:
+ * - '\n' was found "early". In this case, the end of the result buffer
+ * is before the beginning of the mData buffer (since SplitChar
+ * amputated it).
+ * - '\n' was found as the last character of mData. In this case, mData
+ * is empty, but still points at the end of mBuf. result points to what
+ * used to be in mData, without the last character.
+ * - '\n' was not found. In this case too, mData is empty and points at
+ * the end of mBuf. But result points to the entire buffer that used to
+ * be pointed by mData.
+ * Only in the latter case do both result and mData's end match, and it's
+ * the only case where we need to refill the buffer.
+ */
+ if (result.GetEnd() != mData.GetEnd()) {
+ return result;
+ }
+
+ /* Since SplitChar emptied mData, make it point to what it had before. */
+ mData = result;
+
+ /* And move it to the beginning of the read buffer. */
+ mData.Slide(mBuf);
+
+ FillBuffer();
+
+ if (!mData) {
+ return Buffer();
+ }
+ }
+ }
+
+private:
+ /* Fill the read buffer. */
+ void FillBuffer()
+ {
+ size_t size = mBuf.GetEnd() - mData.GetEnd();
+ Buffer remainder(mData.GetEnd(), size);
+
+ ssize_t len = 1;
+ while (remainder && len > 0) {
+ len = ::read(mFd, const_cast<char*>(remainder.get()), size);
+ if (len < 0) {
+ die("Read error");
+ }
+ size -= len;
+ mData.Extend(remainder.Split(len));
+ }
+ }
+
+ /* File descriptor to read from. */
+ int mFd;
+ /* Part of data that was read from the file descriptor but not returned with
+ * ReadLine yet. */
+ Buffer mData;
+ /* Buffer representation of mRawBuf */
+ Buffer mBuf;
+ /* read() buffer */
+ char mRawBuf[4096];
+};
+
+MOZ_BEGIN_EXTERN_C
+
+/* Function declarations for all the replace_malloc _impl functions.
+ * See memory/build/replace_malloc.c */
+#define MALLOC_DECL(name, return_type, ...) \
+ return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
+#include "malloc_decls.h"
+
+#define MALLOC_DECL(name, return_type, ...) \
+ return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
+#include "malloc_decls.h"
+
+/* mozjemalloc relies on DllMain to initialize, but DllMain is not invoked
+ * for executables, so manually invoke mozjemalloc initialization. */
+#if defined(_WIN32) && !defined(MOZ_JEMALLOC4)
+void malloc_init_hard(void);
+#endif
+
+#ifdef ANDROID
+/* mozjemalloc uses MozTagAnonymousMemory, which doesn't have an inline
+ * implementation on Android */
+void
+MozTagAnonymousMemory(const void* aPtr, size_t aLength, const char* aTag) {}
+
+/* mozjemalloc and jemalloc use pthread_atfork, which Android doesn't have.
+ * While gecko has one in libmozglue, the replay program can't use that.
+ * Since we're not going to fork anyways, make it a dummy function. */
+int
+pthread_atfork(void (*aPrepare)(void), void (*aParent)(void),
+ void (*aChild)(void))
+{
+ return 0;
+}
+#endif
+
+MOZ_END_EXTERN_C
+
+size_t parseNumber(Buffer aBuf)
+{
+ if (!aBuf) {
+ die("Malformed input");
+ }
+
+ size_t result = 0;
+ for (const char* c = aBuf.get(), *end = aBuf.GetEnd(); c < end; c++) {
+ if (*c < '0' || *c > '9') {
+ die("Malformed input");
+ }
+ result *= 10;
+ result += *c - '0';
+ }
+ return result;
+}
+
+/* Class to handle dispatching the replay function calls to replace-malloc. */
+class Replay
+{
+public:
+ Replay(): mOps(0) {
+#ifdef _WIN32
+ // See comment in FdPrintf.h as to why native win32 handles are used.
+ mStdErr = reinterpret_cast<intptr_t>(GetStdHandle(STD_ERROR_HANDLE));
+#else
+ mStdErr = fileno(stderr);
+#endif
+ }
+
+ MemSlot& operator[] (size_t index) const
+ {
+ return mSlots[index];
+ }
+
+ void malloc(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t size = parseNumber(aArgs);
+ aSlot.mPtr = ::malloc_impl(size);
+ aSlot.mSize = size;
+ Commit(aSlot);
+ }
+
+ void posix_memalign(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t alignment = parseNumber(aArgs.SplitChar(','));
+ size_t size = parseNumber(aArgs);
+ void* ptr;
+ if (::posix_memalign_impl(&ptr, alignment, size) == 0) {
+ aSlot.mPtr = ptr;
+ aSlot.mSize = size;
+ } else {
+ aSlot.mPtr = nullptr;
+ aSlot.mSize = 0;
+ }
+ Commit(aSlot);
+ }
+
+ void aligned_alloc(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t alignment = parseNumber(aArgs.SplitChar(','));
+ size_t size = parseNumber(aArgs);
+ aSlot.mPtr = ::aligned_alloc_impl(alignment, size);
+ aSlot.mSize = size;
+ Commit(aSlot);
+ }
+
+ void calloc(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t num = parseNumber(aArgs.SplitChar(','));
+ size_t size = parseNumber(aArgs);
+ aSlot.mPtr = ::calloc_impl(num, size);
+ aSlot.mSize = size * num;
+ Commit(aSlot);
+ }
+
+ void realloc(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ Buffer dummy = aArgs.SplitChar('#');
+ if (dummy) {
+ die("Malformed input");
+ }
+ size_t slot_id = parseNumber(aArgs.SplitChar(','));
+ size_t size = parseNumber(aArgs);
+ MemSlot& old_slot = (*this)[slot_id];
+ void* old_ptr = old_slot.mPtr;
+ old_slot.mPtr = nullptr;
+ old_slot.mSize = 0;
+ aSlot.mPtr = ::realloc_impl(old_ptr, size);
+ aSlot.mSize = size;
+ Commit(aSlot);
+ }
+
+ void free(Buffer& aArgs)
+ {
+ mOps++;
+ Buffer dummy = aArgs.SplitChar('#');
+ if (dummy) {
+ die("Malformed input");
+ }
+ size_t slot_id = parseNumber(aArgs);
+ MemSlot& slot = (*this)[slot_id];
+ ::free_impl(slot.mPtr);
+ slot.mPtr = nullptr;
+ slot.mSize = 0;
+ }
+
+ void memalign(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t alignment = parseNumber(aArgs.SplitChar(','));
+ size_t size = parseNumber(aArgs);
+ aSlot.mPtr = ::memalign_impl(alignment, size);
+ aSlot.mSize = size;
+ Commit(aSlot);
+ }
+
+ void valloc(MemSlot& aSlot, Buffer& aArgs)
+ {
+ mOps++;
+ size_t size = parseNumber(aArgs);
+ aSlot.mPtr = ::valloc_impl(size);
+ aSlot.mSize = size;
+ Commit(aSlot);
+ }
+
+ void jemalloc_stats(Buffer& aArgs)
+ {
+ if (aArgs) {
+ die("Malformed input");
+ }
+ jemalloc_stats_t stats;
+ ::jemalloc_stats_impl(&stats);
+ FdPrintf(mStdErr,
+ "#%zu mapped: %zu; allocated: %zu; waste: %zu; dirty: %zu; "
+ "bookkeep: %zu; binunused: %zu\n", mOps, stats.mapped,
+ stats.allocated, stats.waste, stats.page_cache,
+ stats.bookkeeping, stats.bin_unused);
+ /* TODO: Add more data, like actual RSS as measured by OS, but compensated
+ * for the replay internal data. */
+ }
+
+private:
+ void Commit(MemSlot& aSlot)
+ {
+ memset(aSlot.mPtr, 0x5a, aSlot.mSize);
+ }
+
+ intptr_t mStdErr;
+ size_t mOps;
+ MemSlotList mSlots;
+};
+
+
+int
+main()
+{
+ size_t first_pid = 0;
+ FdReader reader(0);
+ Replay replay;
+
+#if defined(_WIN32) && !defined(MOZ_JEMALLOC4)
+ malloc_init_hard();
+#endif
+
+ /* Read log from stdin and dispatch function calls to the Replay instance.
+ * The log format is essentially:
+ * <pid> <function>([<args>])[=<result>]
+ * <args> is a comma separated list of arguments.
+ *
+ * The logs are expected to be preprocessed so that allocations are
+ * attributed a tracking slot. The input is trusted not to have crazy
+ * values for these slot numbers.
+ *
+ * <result>, as well as some of the args to some of the function calls are
+ * such slot numbers.
+ */
+ while (true) {
+ Buffer line = reader.ReadLine();
+
+ if (!line) {
+ break;
+ }
+
+ size_t pid = parseNumber(line.SplitChar(' '));
+ if (!first_pid) {
+ first_pid = pid;
+ }
+
+ /* The log may contain data for several processes, only entries for the
+ * very first that appears are treated. */
+ if (first_pid != pid) {
+ continue;
+ }
+
+ /* The log contains thread ids for manual analysis, but we just ignore them
+ * for now. */
+ parseNumber(line.SplitChar(' '));
+
+ Buffer func = line.SplitChar('(');
+ Buffer args = line.SplitChar(')');
+
+ /* jemalloc_stats and free are functions with no result. */
+ if (func == Buffer("jemalloc_stats")) {
+ replay.jemalloc_stats(args);
+ continue;
+ } else if (func == Buffer("free")) {
+ replay.free(args);
+ continue;
+ }
+
+ /* Parse result value and get the corresponding slot. */
+ Buffer dummy = line.SplitChar('=');
+ Buffer dummy2 = line.SplitChar('#');
+ if (dummy || dummy2) {
+ die("Malformed input");
+ }
+
+ size_t slot_id = parseNumber(line);
+ MemSlot& slot = replay[slot_id];
+
+ if (func == Buffer("malloc")) {
+ replay.malloc(slot, args);
+ } else if (func == Buffer("posix_memalign")) {
+ replay.posix_memalign(slot, args);
+ } else if (func == Buffer("aligned_alloc")) {
+ replay.aligned_alloc(slot, args);
+ } else if (func == Buffer("calloc")) {
+ replay.calloc(slot, args);
+ } else if (func == Buffer("realloc")) {
+ replay.realloc(slot, args);
+ } else if (func == Buffer("memalign")) {
+ replay.memalign(slot, args);
+ } else if (func == Buffer("valloc")) {
+ replay.valloc(slot, args);
+ } else {
+ die("Malformed input");
+ }
+ }
+
+ return 0;
+}
diff --git a/memory/replace/logalloc/replay/logalloc_munge.py b/memory/replace/logalloc/replay/logalloc_munge.py
new file mode 100644
index 000000000..a244c3c3d
--- /dev/null
+++ b/memory/replace/logalloc/replay/logalloc_munge.py
@@ -0,0 +1,147 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+This script takes a log from the replace-malloc logalloc library on stdin
+and munges it so that it can be used with the logalloc-replay tool.
+
+Given the following output:
+ 13663 malloc(42)=0x7f0c33502040
+ 13663 malloc(24)=0x7f0c33503040
+ 13663 free(0x7f0c33502040)
+The resulting output is:
+ 1 malloc(42)=#1
+ 1 malloc(24)=#2
+ 1 free(#1)
+
+See README for more details.
+"""
+
+from __future__ import print_function
+import sys
+from collections import (
+ defaultdict,
+ deque,
+)
+
+class IdMapping(object):
+ """Class to map values to ids.
+
+ Each value is associated to an increasing id, starting from 1.
+ When a value is removed, its id is recycled and will be reused for
+ subsequent values.
+ """
+ def __init__(self):
+ self.id = 1
+ self._values = {}
+ self._recycle = deque()
+
+ def __getitem__(self, value):
+ if value not in self._values:
+ if self._recycle:
+ self._values[value] = self._recycle.popleft()
+ else:
+ self._values[value] = self.id
+ self.id += 1
+ return self._values[value]
+
+ def __delitem__(self, value):
+ if value == 0:
+ return
+ self._recycle.append(self._values[value])
+ del self._values[value]
+
+ def __contains__(self, value):
+ return value == 0 or value in self._values
+
+
+class Ignored(Exception): pass
+
+
+def split_log_line(line):
+ try:
+ # The format for each line is:
+ # <pid> [<tid>] <function>([<args>])[=<result>]
+ #
+ # The original format didn't include the tid, so we try to parse
+ # lines whether they have one or not.
+ pid, func_call = line.split(' ', 1)
+ call, result = func_call.split(')')
+ func, args = call.split('(')
+ args = args.split(',') if args else []
+ if result:
+ if result[0] != '=':
+ raise Ignored('Malformed input')
+ result = result[1:]
+ if ' ' in func:
+ tid, func = func.split(' ', 1)
+ else:
+ tid = pid
+ return pid, tid, func, args, result
+ except:
+ raise Ignored('Malformed input')
+
+
+NUM_ARGUMENTS = {
+ 'jemalloc_stats': 0,
+ 'free': 1,
+ 'malloc': 1,
+ 'posix_memalign': 2,
+ 'aligned_alloc': 2,
+ 'calloc': 2,
+ 'realloc': 2,
+ 'memalign': 2,
+ 'valloc': 1,
+}
+
+
+def main():
+ pids = IdMapping()
+ processes = defaultdict(lambda: { 'pointers': IdMapping(),
+ 'tids': IdMapping() })
+ for line in sys.stdin:
+ line = line.strip()
+
+ try:
+ pid, tid, func, args, result = split_log_line(line)
+
+ # Replace pid with an id.
+ pid = pids[int(pid)]
+
+ process = processes[pid]
+ tid = process['tids'][int(tid)]
+
+ pointers = process['pointers']
+
+ if func not in NUM_ARGUMENTS:
+ raise Ignored('Unknown function')
+
+ if len(args) != NUM_ARGUMENTS[func]:
+ raise Ignored('Malformed input')
+
+ if func in ('jemalloc_stats', 'free') and result:
+ raise Ignored('Malformed input')
+
+ if func in ('free', 'realloc'):
+ ptr = int(args[0], 16)
+ if ptr and ptr not in pointers:
+ raise Ignored('Did not see an alloc for pointer')
+ args[0] = "#%d" % pointers[ptr]
+ del pointers[ptr]
+
+ if result:
+ result = int(result, 16)
+ if not result:
+ raise Ignored('Result is NULL')
+ result = "#%d" % pointers[result]
+
+ print('%d %d %s(%s)%s' % (pid, tid, func, ','.join(args),
+ '=%s' % result if result else ''))
+
+ except Exception as e:
+ print('Ignored "%s": %s' % (line, e.message), file=sys.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/memory/replace/logalloc/replay/moz.build b/memory/replace/logalloc/replay/moz.build
new file mode 100644
index 000000000..bb976908f
--- /dev/null
+++ b/memory/replace/logalloc/replay/moz.build
@@ -0,0 +1,27 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Program('logalloc-replay')
+
+SOURCES += [
+ '../FdPrintf.cpp',
+ '/mfbt/Assertions.cpp',
+ 'Replay.cpp',
+]
+
+LOCAL_INCLUDES += [
+ '..',
+]
+
+# Link replace-malloc and the default allocator.
+USE_LIBS += [
+ 'memory',
+]
+
+# The memory library defines this, so it's needed here too.
+DEFINES['IMPL_MFBT'] = True
+
+DISABLE_STL_WRAPPING = True
diff --git a/memory/replace/logalloc/replay/replay.log b/memory/replace/logalloc/replay/replay.log
new file mode 100644
index 000000000..c56dfab12
--- /dev/null
+++ b/memory/replace/logalloc/replay/replay.log
@@ -0,0 +1,17 @@
+1 1 malloc(42)=#1
+1 1 malloc(24)=#2
+2 2 malloc(42)=#1
+1 1 free(#1)
+1 1 posix_memalign(4096,1024)=#1
+1 1 calloc(4,42)=#3
+1 1 free(#2)
+1 1 realloc(#3,84)=#2
+1 1 aligned_alloc(512,1024)=#3
+1 1 memalign(512,1024)=#4
+1 1 valloc(1024)=#5
+1 1 jemalloc_stats()
+1 1 free(#5)
+1 1 free(#4)
+1 1 free(#3)
+1 1 free(#2)
+1 1 free(#1)
diff --git a/memory/replace/moz.build b/memory/replace/moz.build
new file mode 100644
index 000000000..111b8456b
--- /dev/null
+++ b/memory/replace/moz.build
@@ -0,0 +1,20 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ 'logalloc',
+ 'replace',
+]
+
+# Build jemalloc3 as a replace-malloc lib when building with mozjemalloc
+if not CONFIG['MOZ_JEMALLOC4']:
+ DIRS += ['jemalloc']
+
+if CONFIG['MOZ_REPLACE_MALLOC_LINKAGE'] == 'dummy library':
+ DIRS += ['dummy']
+
+if CONFIG['MOZ_DMD']:
+ DIRS += ['dmd']
diff --git a/memory/replace/replace/ReplaceMalloc.cpp b/memory/replace/replace/ReplaceMalloc.cpp
new file mode 100644
index 000000000..67373092b
--- /dev/null
+++ b/memory/replace/replace/ReplaceMalloc.cpp
@@ -0,0 +1,253 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "replace_malloc.h"
+#include <errno.h>
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Atomics.h"
+
+/* Replace-malloc library allowing different kinds of dispatch.
+ * The long term goal is to allow multiple replace-malloc libraries
+ * to be loaded and coexist properly.
+ * This is however a limited version to fulfil more immediate needs.
+ */
+static const malloc_table_t* gFuncs = nullptr;
+static mozilla::Atomic<const malloc_hook_table_t*> gHookTable(nullptr);
+
+class GenericReplaceMallocBridge : public ReplaceMallocBridge
+{
+ virtual const malloc_table_t*
+ RegisterHook(const char* aName, const malloc_table_t* aTable,
+ const malloc_hook_table_t* aHookTable) override
+ {
+ // Can't register a hook before replace_init is called.
+ if (!gFuncs) {
+ return nullptr;
+ }
+
+ // Expect a name to be given.
+ if (!aName) {
+ return nullptr;
+ }
+
+ // Giving a malloc_table_t is not supported yet.
+ if (aTable) {
+ return nullptr;
+ }
+
+ if (aHookTable) {
+ // Expect at least a malloc and a free hook.
+ if (!aHookTable->malloc_hook || !aHookTable->free_hook) {
+ return nullptr;
+ }
+ gHookTable = const_cast<malloc_hook_table_t*>(aHookTable);
+ return gFuncs;
+ }
+ gHookTable = nullptr;
+
+ return nullptr;
+ }
+};
+
+void
+replace_init(const malloc_table_t* aTable)
+{
+ gFuncs = aTable;
+}
+
+ReplaceMallocBridge*
+replace_get_bridge()
+{
+ static GenericReplaceMallocBridge bridge;
+ return &bridge;
+}
+
+void*
+replace_malloc(size_t aSize)
+{
+ void* ptr = gFuncs->malloc(aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ return hook_table->malloc_hook(ptr, aSize);
+ }
+ return ptr;
+}
+
+int
+replace_posix_memalign(void** aPtr, size_t aAlignment, size_t aSize)
+{
+ int ret = gFuncs->posix_memalign(aPtr, aAlignment, aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->posix_memalign_hook) {
+ return hook_table->posix_memalign_hook(ret, aPtr, aAlignment, aSize);
+ }
+ void* ptr = hook_table->malloc_hook(*aPtr, aSize);
+ if (!ptr && *aPtr) {
+ *aPtr = ptr;
+ ret = ENOMEM;
+ }
+ }
+ return ret;
+}
+
+void*
+replace_aligned_alloc(size_t aAlignment, size_t aSize)
+{
+ void* ptr = gFuncs->aligned_alloc(aAlignment, aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->aligned_alloc_hook) {
+ return hook_table->aligned_alloc_hook(ptr, aAlignment, aSize);
+ }
+ return hook_table->malloc_hook(ptr, aSize);
+ }
+ return ptr;
+}
+
+void*
+replace_calloc(size_t aNum, size_t aSize)
+{
+ void* ptr = gFuncs->calloc(aNum, aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->calloc_hook) {
+ return hook_table->calloc_hook(ptr, aNum, aSize);
+ }
+ mozilla::CheckedInt<size_t> size = mozilla::CheckedInt<size_t>(aNum) * aSize;
+ if (size.isValid()) {
+ return hook_table->malloc_hook(ptr, size.value());
+ }
+ /* If the multiplication above overflows, calloc will have failed, so ptr
+ * is null. But the hook might still be interested in knowing about the
+ * allocation attempt. The choice made is to indicate the overflow with
+ * the biggest value of a size_t, which is not that bad an indicator:
+ * there are only 5 prime factors to 2^32 - 1 and 7 prime factors to
+ * 2^64 - 1 and none of them is going to come directly out of sizeof().
+ * IOW, the likelyhood of aNum * aSize being exactly SIZE_MAX is low
+ * enough, and SIZE_MAX still conveys that the attempted allocation was
+ * too big anyways. */
+ return hook_table->malloc_hook(ptr, SIZE_MAX);
+ }
+ return ptr;
+}
+
+void*
+replace_realloc(void* aPtr, size_t aSize)
+{
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->realloc_hook_before) {
+ hook_table->realloc_hook_before(aPtr);
+ } else {
+ hook_table->free_hook(aPtr);
+ }
+ }
+ void* new_ptr = gFuncs->realloc(aPtr, aSize);
+ /* The hook table might have changed since before realloc was called,
+ * either because of unregistration or registration of a new table.
+ * We however go with consistency and use the same hook table as the
+ * one that was used before the call to realloc. */
+ if (hook_table) {
+ if (hook_table->realloc_hook) {
+ /* aPtr is likely invalid when reaching here, it is only given for
+ * tracking purposes, and should not be dereferenced. */
+ return hook_table->realloc_hook(new_ptr, aPtr, aSize);
+ }
+ return hook_table->malloc_hook(new_ptr, aSize);
+ }
+ return new_ptr;
+}
+
+void
+replace_free(void* aPtr)
+{
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ hook_table->free_hook(aPtr);
+ }
+ gFuncs->free(aPtr);
+}
+
+void*
+replace_memalign(size_t aAlignment, size_t aSize)
+{
+ void* ptr = gFuncs->memalign(aAlignment, aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->memalign_hook) {
+ return hook_table->memalign_hook(ptr, aAlignment, aSize);
+ }
+ return hook_table->malloc_hook(ptr, aSize);
+ }
+ return ptr;
+}
+
+void*
+replace_valloc(size_t aSize)
+{
+ void* ptr = gFuncs->valloc(aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table) {
+ if (hook_table->valloc_hook) {
+ return hook_table->valloc_hook(ptr, aSize);
+ }
+ return hook_table->malloc_hook(ptr, aSize);
+ }
+ return ptr;
+}
+
+size_t
+replace_malloc_usable_size(usable_ptr_t aPtr)
+{
+ size_t ret = gFuncs->malloc_usable_size(aPtr);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table && hook_table->malloc_usable_size_hook) {
+ return hook_table->malloc_usable_size_hook(ret, aPtr);
+ }
+ return ret;
+}
+
+size_t
+replace_malloc_good_size(size_t aSize)
+{
+ size_t ret = gFuncs->malloc_good_size(aSize);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table && hook_table->malloc_good_size_hook) {
+ return hook_table->malloc_good_size_hook(ret, aSize);
+ }
+ return ret;
+}
+
+void
+replace_jemalloc_stats(jemalloc_stats_t* aStats)
+{
+ gFuncs->jemalloc_stats(aStats);
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table && hook_table->jemalloc_stats_hook) {
+ hook_table->jemalloc_stats_hook(aStats);
+ }
+}
+
+void
+replace_jemalloc_purge_freed_pages(void)
+{
+ gFuncs->jemalloc_purge_freed_pages();
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table && hook_table->jemalloc_purge_freed_pages_hook) {
+ hook_table->jemalloc_purge_freed_pages_hook();
+ }
+}
+
+void
+replace_jemalloc_free_dirty_pages(void)
+{
+ gFuncs->jemalloc_free_dirty_pages();
+ const malloc_hook_table_t* hook_table = gHookTable;
+ if (hook_table && hook_table->jemalloc_free_dirty_pages_hook) {
+ hook_table->jemalloc_free_dirty_pages_hook();
+ }
+}
diff --git a/memory/replace/replace/moz.build b/memory/replace/replace/moz.build
new file mode 100644
index 000000000..494b552a1
--- /dev/null
+++ b/memory/replace/replace/moz.build
@@ -0,0 +1,13 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SharedLibrary('replace_malloc')
+
+SOURCES += [
+ 'ReplaceMalloc.cpp',
+]
+
+DISABLE_STL_WRAPPING = True
diff --git a/memory/volatile/VolatileBuffer.h b/memory/volatile/VolatileBuffer.h
new file mode 100644
index 000000000..ebb471332
--- /dev/null
+++ b/memory/volatile/VolatileBuffer.h
@@ -0,0 +1,173 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozalloc_VolatileBuffer_h
+#define mozalloc_VolatileBuffer_h
+
+#include "mozilla/mozalloc.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/RefCounted.h"
+
+/* VolatileBuffer
+ *
+ * This class represents a piece of memory that can potentially be reclaimed
+ * by the OS when not in use. As long as there are one or more
+ * VolatileBufferPtrs holding on to a VolatileBuffer, the memory will remain
+ * available. However, when there are no VolatileBufferPtrs holding a
+ * VolatileBuffer, the OS can purge the pages if it wants to. The OS can make
+ * better decisions about what pages to purge than we can.
+ *
+ * VolatileBuffers may not always be volatile - if the allocation is too small,
+ * or if the OS doesn't support the feature, or if the OS doesn't want to,
+ * the buffer will be allocated on heap.
+ *
+ * VolatileBuffer allocations are fallible. They are intended for uses where
+ * one may allocate large buffers for caching data. Init() must be called
+ * exactly once.
+ *
+ * After getting a reference to VolatileBuffer using VolatileBufferPtr,
+ * WasPurged() can be used to check if the OS purged any pages in the buffer.
+ * The OS cannot purge a buffer immediately after a VolatileBuffer is
+ * initialized. At least one VolatileBufferPtr must be created before the
+ * buffer can be purged, so the first use of VolatileBufferPtr does not need
+ * to check WasPurged().
+ *
+ * When a buffer is purged, some or all of the buffer is zeroed out. This
+ * API cannot tell which parts of the buffer were lost.
+ *
+ * VolatileBuffer and VolatileBufferPtr are threadsafe.
+ */
+
+namespace mozilla {
+
+class VolatileBuffer
+{
+ friend class VolatileBufferPtr_base;
+public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(VolatileBuffer)
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VolatileBuffer)
+
+ VolatileBuffer();
+
+ /* aAlignment must be a multiple of the pointer size */
+ bool Init(size_t aSize, size_t aAlignment = sizeof(void*));
+
+ size_t HeapSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
+ size_t NonHeapSizeOfExcludingThis() const;
+ bool OnHeap() const;
+
+protected:
+ bool Lock(void** aBuf);
+ void Unlock();
+
+private:
+ ~VolatileBuffer();
+
+ /**
+ * Protects mLockCount, mFirstLock, and changes to the volatility of our
+ * buffer. Other member variables are read-only except in Init() and the
+ * destructor.
+ */
+ Mutex mMutex;
+
+ void* mBuf;
+ size_t mSize;
+ int mLockCount;
+#if defined(ANDROID)
+ int mFd;
+#elif defined(XP_DARWIN)
+ bool mHeap;
+#elif defined(XP_WIN)
+ bool mHeap;
+ bool mFirstLock;
+#endif
+};
+
+class VolatileBufferPtr_base {
+public:
+ explicit VolatileBufferPtr_base(VolatileBuffer* vbuf)
+ : mVBuf(vbuf)
+ , mMapping(nullptr)
+ , mPurged(false)
+ {
+ Lock();
+ }
+
+ ~VolatileBufferPtr_base() {
+ Unlock();
+ }
+
+ bool WasBufferPurged() const {
+ return mPurged;
+ }
+
+protected:
+ RefPtr<VolatileBuffer> mVBuf;
+ void* mMapping;
+
+ void Set(VolatileBuffer* vbuf) {
+ Unlock();
+ mVBuf = vbuf;
+ Lock();
+ }
+
+private:
+ bool mPurged;
+
+ void Lock() {
+ if (mVBuf) {
+ mPurged = !mVBuf->Lock(&mMapping);
+ } else {
+ mMapping = nullptr;
+ mPurged = false;
+ }
+ }
+
+ void Unlock() {
+ if (mVBuf) {
+ mVBuf->Unlock();
+ }
+ }
+};
+
+template <class T>
+class VolatileBufferPtr : public VolatileBufferPtr_base
+{
+public:
+ explicit VolatileBufferPtr(VolatileBuffer* vbuf) : VolatileBufferPtr_base(vbuf) {}
+ VolatileBufferPtr() : VolatileBufferPtr_base(nullptr) {}
+
+ VolatileBufferPtr(VolatileBufferPtr&& aOther)
+ : VolatileBufferPtr_base(aOther.mVBuf)
+ {
+ aOther.Set(nullptr);
+ }
+
+ operator T*() const {
+ return (T*) mMapping;
+ }
+
+ VolatileBufferPtr& operator=(VolatileBuffer* aVBuf)
+ {
+ Set(aVBuf);
+ return *this;
+ }
+
+ VolatileBufferPtr& operator=(VolatileBufferPtr&& aOther)
+ {
+ MOZ_ASSERT(this != &aOther, "Self-moves are prohibited");
+ Set(aOther.mVBuf);
+ aOther.Set(nullptr);
+ return *this;
+ }
+
+private:
+ VolatileBufferPtr(VolatileBufferPtr const& vbufptr) = delete;
+};
+
+} // namespace mozilla
+
+#endif /* mozalloc_VolatileBuffer_h */
diff --git a/memory/volatile/VolatileBufferAshmem.cpp b/memory/volatile/VolatileBufferAshmem.cpp
new file mode 100644
index 000000000..09904d6ef
--- /dev/null
+++ b/memory/volatile/VolatileBufferAshmem.cpp
@@ -0,0 +1,139 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VolatileBuffer.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/mozalloc.h"
+
+#include <fcntl.h>
+#include <linux/ashmem.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifdef MOZ_MEMORY
+extern "C" int posix_memalign(void** memptr, size_t alignment, size_t size);
+#endif
+
+#define MIN_VOLATILE_ALLOC_SIZE 8192
+
+namespace mozilla {
+
+VolatileBuffer::VolatileBuffer()
+ : mMutex("VolatileBuffer")
+ , mBuf(nullptr)
+ , mSize(0)
+ , mLockCount(0)
+ , mFd(-1)
+{
+}
+
+bool
+VolatileBuffer::Init(size_t aSize, size_t aAlignment)
+{
+ MOZ_ASSERT(!mSize && !mBuf, "Init called twice");
+ MOZ_ASSERT(!(aAlignment % sizeof(void *)),
+ "Alignment must be multiple of pointer size");
+
+ mSize = aSize;
+ if (aSize < MIN_VOLATILE_ALLOC_SIZE) {
+ goto heap_alloc;
+ }
+
+ mFd = open("/" ASHMEM_NAME_DEF, O_RDWR);
+ if (mFd < 0) {
+ goto heap_alloc;
+ }
+
+ if (ioctl(mFd, ASHMEM_SET_SIZE, mSize) < 0) {
+ goto heap_alloc;
+ }
+
+ mBuf = mmap(nullptr, mSize, PROT_READ | PROT_WRITE, MAP_SHARED, mFd, 0);
+ if (mBuf != MAP_FAILED) {
+ return true;
+ }
+
+heap_alloc:
+ mBuf = nullptr;
+ if (mFd >= 0) {
+ close(mFd);
+ mFd = -1;
+ }
+
+#ifdef MOZ_MEMORY
+ posix_memalign(&mBuf, aAlignment, aSize);
+#else
+ mBuf = memalign(aAlignment, aSize);
+#endif
+ return !!mBuf;
+}
+
+VolatileBuffer::~VolatileBuffer()
+{
+ MOZ_ASSERT(mLockCount == 0, "Being destroyed with non-zero lock count?");
+
+ if (OnHeap()) {
+ free(mBuf);
+ } else {
+ munmap(mBuf, mSize);
+ close(mFd);
+ }
+}
+
+bool
+VolatileBuffer::Lock(void** aBuf)
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mBuf, "Attempting to lock an uninitialized VolatileBuffer");
+
+ *aBuf = mBuf;
+ if (++mLockCount > 1 || OnHeap()) {
+ return true;
+ }
+
+ // Zero offset and zero length means we want to pin/unpin the entire thing.
+ struct ashmem_pin pin = { 0, 0 };
+ return ioctl(mFd, ASHMEM_PIN, &pin) == ASHMEM_NOT_PURGED;
+}
+
+void
+VolatileBuffer::Unlock()
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mLockCount > 0, "VolatileBuffer unlocked too many times!");
+ if (--mLockCount || OnHeap()) {
+ return;
+ }
+
+ struct ashmem_pin pin = { 0, 0 };
+ ioctl(mFd, ASHMEM_UNPIN, &pin);
+}
+
+bool
+VolatileBuffer::OnHeap() const
+{
+ return mFd < 0;
+}
+
+size_t
+VolatileBuffer::HeapSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+{
+ return OnHeap() ? aMallocSizeOf(mBuf) : 0;
+}
+
+size_t
+VolatileBuffer::NonHeapSizeOfExcludingThis() const
+{
+ if (OnHeap()) {
+ return 0;
+ }
+
+ return (mSize + (PAGE_SIZE - 1)) & PAGE_MASK;
+}
+
+} // namespace mozilla
diff --git a/memory/volatile/VolatileBufferFallback.cpp b/memory/volatile/VolatileBufferFallback.cpp
new file mode 100644
index 000000000..f4bfe39c6
--- /dev/null
+++ b/memory/volatile/VolatileBufferFallback.cpp
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VolatileBuffer.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/mozalloc.h"
+
+#ifdef MOZ_MEMORY
+int posix_memalign(void** memptr, size_t alignment, size_t size);
+#endif
+
+namespace mozilla {
+
+VolatileBuffer::VolatileBuffer()
+ : mMutex("VolatileBuffer")
+ , mBuf(nullptr)
+ , mSize(0)
+ , mLockCount(0)
+{
+}
+
+bool VolatileBuffer::Init(size_t aSize, size_t aAlignment)
+{
+ MOZ_ASSERT(!mSize && !mBuf, "Init called twice");
+ MOZ_ASSERT(!(aAlignment % sizeof(void *)),
+ "Alignment must be multiple of pointer size");
+
+ mSize = aSize;
+#if defined(MOZ_MEMORY)
+ if (posix_memalign(&mBuf, aAlignment, aSize) != 0) {
+ return false;
+ }
+#elif defined(HAVE_POSIX_MEMALIGN)
+ if (moz_posix_memalign(&mBuf, aAlignment, aSize) != 0) {
+ return false;
+ }
+#else
+#error "No memalign implementation found"
+#endif
+ return !!mBuf;
+}
+
+VolatileBuffer::~VolatileBuffer()
+{
+ MOZ_ASSERT(mLockCount == 0, "Being destroyed with non-zero lock count?");
+
+ free(mBuf);
+}
+
+bool
+VolatileBuffer::Lock(void** aBuf)
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mBuf, "Attempting to lock an uninitialized VolatileBuffer");
+
+ *aBuf = mBuf;
+ mLockCount++;
+
+ return true;
+}
+
+void
+VolatileBuffer::Unlock()
+{
+ MutexAutoLock lock(mMutex);
+
+ mLockCount--;
+ MOZ_ASSERT(mLockCount >= 0, "VolatileBuffer unlocked too many times!");
+}
+
+bool
+VolatileBuffer::OnHeap() const
+{
+ return true;
+}
+
+size_t
+VolatileBuffer::HeapSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+{
+ return aMallocSizeOf(mBuf);
+}
+
+size_t
+VolatileBuffer::NonHeapSizeOfExcludingThis() const
+{
+ return 0;
+}
+
+} // namespace mozilla
diff --git a/memory/volatile/VolatileBufferOSX.cpp b/memory/volatile/VolatileBufferOSX.cpp
new file mode 100644
index 000000000..af39bcae1
--- /dev/null
+++ b/memory/volatile/VolatileBufferOSX.cpp
@@ -0,0 +1,129 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VolatileBuffer.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/mozalloc.h"
+
+#include <mach/mach.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#define MIN_VOLATILE_ALLOC_SIZE 8192
+
+namespace mozilla {
+
+VolatileBuffer::VolatileBuffer()
+ : mMutex("VolatileBuffer")
+ , mBuf(nullptr)
+ , mSize(0)
+ , mLockCount(0)
+ , mHeap(false)
+{
+}
+
+bool
+VolatileBuffer::Init(size_t aSize, size_t aAlignment)
+{
+ MOZ_ASSERT(!mSize && !mBuf, "Init called twice");
+ MOZ_ASSERT(!(aAlignment % sizeof(void *)),
+ "Alignment must be multiple of pointer size");
+
+ mSize = aSize;
+
+ kern_return_t ret = 0;
+ if (aSize < MIN_VOLATILE_ALLOC_SIZE) {
+ goto heap_alloc;
+ }
+
+ ret = vm_allocate(mach_task_self(),
+ (vm_address_t*)&mBuf,
+ mSize,
+ VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE);
+ if (ret == KERN_SUCCESS) {
+ return true;
+ }
+
+heap_alloc:
+ (void)moz_posix_memalign(&mBuf, aAlignment, aSize);
+ mHeap = true;
+ return !!mBuf;
+}
+
+VolatileBuffer::~VolatileBuffer()
+{
+ MOZ_ASSERT(mLockCount == 0, "Being destroyed with non-zero lock count?");
+
+ if (OnHeap()) {
+ free(mBuf);
+ } else {
+ vm_deallocate(mach_task_self(), (vm_address_t)mBuf, mSize);
+ }
+}
+
+bool
+VolatileBuffer::Lock(void** aBuf)
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mBuf, "Attempting to lock an uninitialized VolatileBuffer");
+
+ *aBuf = mBuf;
+ if (++mLockCount > 1 || OnHeap()) {
+ return true;
+ }
+
+ int state = VM_PURGABLE_NONVOLATILE;
+ kern_return_t ret =
+ vm_purgable_control(mach_task_self(),
+ (vm_address_t)mBuf,
+ VM_PURGABLE_SET_STATE,
+ &state);
+ return ret == KERN_SUCCESS && !(state & VM_PURGABLE_EMPTY);
+}
+
+void
+VolatileBuffer::Unlock()
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mLockCount > 0, "VolatileBuffer unlocked too many times!");
+ if (--mLockCount || OnHeap()) {
+ return;
+ }
+
+ int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT;
+ DebugOnly<kern_return_t> ret =
+ vm_purgable_control(mach_task_self(),
+ (vm_address_t)mBuf,
+ VM_PURGABLE_SET_STATE,
+ &state);
+ MOZ_ASSERT(ret == KERN_SUCCESS, "Failed to set buffer as purgable");
+}
+
+bool
+VolatileBuffer::OnHeap() const
+{
+ return mHeap;
+}
+
+size_t
+VolatileBuffer::HeapSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+{
+ return OnHeap() ? aMallocSizeOf(mBuf) : 0;
+}
+
+size_t
+VolatileBuffer::NonHeapSizeOfExcludingThis() const
+{
+ if (OnHeap()) {
+ return 0;
+ }
+
+ unsigned long pagemask = getpagesize() - 1;
+ return (mSize + pagemask) & ~pagemask;
+}
+
+} // namespace mozilla
diff --git a/memory/volatile/VolatileBufferWindows.cpp b/memory/volatile/VolatileBufferWindows.cpp
new file mode 100644
index 000000000..b12e0eccb
--- /dev/null
+++ b/memory/volatile/VolatileBufferWindows.cpp
@@ -0,0 +1,160 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VolatileBuffer.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/mozalloc.h"
+#include "mozilla/WindowsVersion.h"
+
+#include <windows.h>
+
+#ifdef MOZ_MEMORY
+extern "C" int posix_memalign(void** memptr, size_t alignment, size_t size);
+#endif
+
+#ifndef MEM_RESET_UNDO
+#define MEM_RESET_UNDO 0x1000000
+#endif
+
+#define MIN_VOLATILE_ALLOC_SIZE 8192
+
+namespace mozilla {
+
+VolatileBuffer::VolatileBuffer()
+ : mMutex("VolatileBuffer")
+ , mBuf(nullptr)
+ , mSize(0)
+ , mLockCount(0)
+ , mHeap(false)
+ , mFirstLock(true)
+{
+}
+
+bool
+VolatileBuffer::Init(size_t aSize, size_t aAlignment)
+{
+ MOZ_ASSERT(!mSize && !mBuf, "Init called twice");
+ MOZ_ASSERT(!(aAlignment % sizeof(void *)),
+ "Alignment must be multiple of pointer size");
+
+ mSize = aSize;
+ if (aSize < MIN_VOLATILE_ALLOC_SIZE) {
+ goto heap_alloc;
+ }
+
+ static bool sUndoSupported = IsWin8OrLater();
+ if (!sUndoSupported) {
+ goto heap_alloc;
+ }
+
+ mBuf = VirtualAllocEx(GetCurrentProcess(),
+ nullptr,
+ mSize,
+ MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+ if (mBuf) {
+ return true;
+ }
+
+heap_alloc:
+#ifdef MOZ_MEMORY
+ posix_memalign(&mBuf, aAlignment, aSize);
+#else
+ mBuf = _aligned_malloc(aSize, aAlignment);
+#endif
+ mHeap = true;
+ return !!mBuf;
+}
+
+VolatileBuffer::~VolatileBuffer()
+{
+ MOZ_ASSERT(mLockCount == 0, "Being destroyed with non-zero lock count?");
+
+ if (OnHeap()) {
+#ifdef MOZ_MEMORY
+ free(mBuf);
+#else
+ _aligned_free(mBuf);
+#endif
+ } else {
+ VirtualFreeEx(GetCurrentProcess(), mBuf, 0, MEM_RELEASE);
+ }
+}
+
+bool
+VolatileBuffer::Lock(void** aBuf)
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mBuf, "Attempting to lock an uninitialized VolatileBuffer");
+
+ *aBuf = mBuf;
+ if (++mLockCount > 1 || OnHeap()) {
+ return true;
+ }
+
+ // MEM_RESET_UNDO's behavior is undefined when called on memory that
+ // hasn't been MEM_RESET.
+ if (mFirstLock) {
+ mFirstLock = false;
+ return true;
+ }
+
+ void* addr = VirtualAllocEx(GetCurrentProcess(),
+ mBuf,
+ mSize,
+ MEM_RESET_UNDO,
+ PAGE_READWRITE);
+ return !!addr;
+}
+
+void
+VolatileBuffer::Unlock()
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mLockCount > 0, "VolatileBuffer unlocked too many times!");
+ if (--mLockCount || OnHeap()) {
+ return;
+ }
+
+ void* addr = VirtualAllocEx(GetCurrentProcess(),
+ mBuf,
+ mSize,
+ MEM_RESET,
+ PAGE_READWRITE);
+ MOZ_ASSERT(addr, "Failed to MEM_RESET");
+}
+
+bool
+VolatileBuffer::OnHeap() const
+{
+ return mHeap;
+}
+
+size_t
+VolatileBuffer::HeapSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+{
+ if (OnHeap()) {
+#ifdef MOZ_MEMORY
+ return aMallocSizeOf(mBuf);
+#else
+ return mSize;
+#endif
+ }
+
+ return 0;
+}
+
+size_t
+VolatileBuffer::NonHeapSizeOfExcludingThis() const
+{
+ if (OnHeap()) {
+ return 0;
+ }
+
+ return (mSize + 4095) & ~4095;
+}
+
+} // namespace mozilla
diff --git a/memory/volatile/moz.build b/memory/volatile/moz.build
new file mode 100644
index 000000000..deacdf433
--- /dev/null
+++ b/memory/volatile/moz.build
@@ -0,0 +1,31 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+NO_VISIBILITY_FLAGS = True
+
+EXPORTS.mozilla += [
+ 'VolatileBuffer.h',
+]
+
+if CONFIG['OS_TARGET'] == 'Android':
+ UNIFIED_SOURCES += [
+ 'VolatileBufferAshmem.cpp',
+ ]
+elif CONFIG['OS_TARGET'] == 'Darwin':
+ UNIFIED_SOURCES += [
+ 'VolatileBufferOSX.cpp',
+ ]
+elif CONFIG['OS_TARGET'] == 'WINNT':
+ UNIFIED_SOURCES += [
+ 'VolatileBufferWindows.cpp',
+ ]
+else:
+ UNIFIED_SOURCES += [
+ 'VolatileBufferFallback.cpp',
+ ]
+
+FINAL_LIBRARY = 'xul'
+
+TEST_DIRS += ['tests']
diff --git a/memory/volatile/tests/TestVolatileBuffer.cpp b/memory/volatile/tests/TestVolatileBuffer.cpp
new file mode 100644
index 000000000..9a4a8781d
--- /dev/null
+++ b/memory/volatile/tests/TestVolatileBuffer.cpp
@@ -0,0 +1,102 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+#include "mozilla/VolatileBuffer.h"
+#include <string.h>
+
+#if defined(ANDROID)
+#include <fcntl.h>
+#include <linux/ashmem.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#elif defined(XP_DARWIN)
+#include <mach/mach.h>
+#endif
+
+using namespace mozilla;
+
+TEST(VolatileBufferTest, HeapVolatileBuffersWork)
+{
+ RefPtr<VolatileBuffer> heapbuf = new VolatileBuffer();
+
+ ASSERT_TRUE(heapbuf) << "Failed to create VolatileBuffer";
+ ASSERT_TRUE(heapbuf->Init(512)) << "Failed to initialize VolatileBuffer";
+
+ VolatileBufferPtr<char> ptr(heapbuf);
+
+ EXPECT_FALSE(ptr.WasBufferPurged())
+ << "Buffer should not be purged immediately after initialization";
+ EXPECT_TRUE(ptr) << "Couldn't get pointer from VolatileBufferPtr";
+}
+
+TEST(VolatileBufferTest, RealVolatileBuffersWork)
+{
+ RefPtr<VolatileBuffer> buf = new VolatileBuffer();
+
+ ASSERT_TRUE(buf) << "Failed to create VolatileBuffer";
+ ASSERT_TRUE(buf->Init(16384)) << "Failed to initialize VolatileBuffer";
+
+ const char teststr[] = "foobar";
+
+ {
+ VolatileBufferPtr<char> ptr(buf);
+
+ EXPECT_FALSE(ptr.WasBufferPurged())
+ << "Buffer should not be purged immediately after initialization";
+ EXPECT_TRUE(ptr) << "Couldn't get pointer from VolatileBufferPtr";
+
+ {
+ VolatileBufferPtr<char> ptr2(buf);
+
+ EXPECT_FALSE(ptr.WasBufferPurged())
+ << "Failed to lock buffer again while currently locked";
+ ASSERT_TRUE(ptr2) << "Didn't get a pointer on the second lock";
+
+ strcpy(ptr2, teststr);
+ }
+ }
+
+ {
+ VolatileBufferPtr<char> ptr(buf);
+
+ EXPECT_FALSE(ptr.WasBufferPurged())
+ << "Buffer was immediately purged after unlock";
+ EXPECT_STREQ(ptr, teststr) << "Buffer failed to retain data after unlock";
+ }
+
+ // Test purging if we know how to
+#if defined(MOZ_WIDGET_GONK)
+ // This also works on Android, but we need root.
+ int fd = open("/" ASHMEM_NAME_DEF, O_RDWR);
+
+ ASSERT_GE(fd, 0) << "Failed to open ashmem device";
+ ASSERT_GE(ioctl(fd, ASHMEM_PURGE_ALL_CACHES, NULL), 0)
+ << "Failed to purge ashmem caches";
+#elif defined(XP_DARWIN)
+ int state;
+ vm_purgable_control(mach_task_self(), (vm_address_t)NULL,
+ VM_PURGABLE_PURGE_ALL, &state);
+#else
+ return;
+#endif
+
+ EXPECT_GT(buf->NonHeapSizeOfExcludingThis(), 0ul)
+ << "Buffer should not be allocated on heap";
+
+ {
+ VolatileBufferPtr<char> ptr(buf);
+
+ EXPECT_TRUE(ptr.WasBufferPurged())
+ << "Buffer should not be unpurged after forced purge";
+ EXPECT_STRNE(ptr, teststr) << "Purge did not actually purge data";
+ }
+
+ {
+ VolatileBufferPtr<char> ptr(buf);
+
+ EXPECT_FALSE(ptr.WasBufferPurged()) << "Buffer still purged after lock";
+ }
+}
diff --git a/memory/volatile/tests/moz.build b/memory/volatile/tests/moz.build
new file mode 100644
index 000000000..eea962e71
--- /dev/null
+++ b/memory/volatile/tests/moz.build
@@ -0,0 +1,11 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES = [
+ 'TestVolatileBuffer.cpp',
+]
+
+FINAL_LIBRARY = 'xul-gtest'