summaryrefslogtreecommitdiffstats
path: root/memory/jemalloc/src/test/integration
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /memory/jemalloc/src/test/integration
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'memory/jemalloc/src/test/integration')
-rw-r--r--memory/jemalloc/src/test/integration/MALLOCX_ARENA.c69
-rw-r--r--memory/jemalloc/src/test/integration/aligned_alloc.c139
-rw-r--r--memory/jemalloc/src/test/integration/allocated.c125
-rw-r--r--memory/jemalloc/src/test/integration/chunk.c293
-rw-r--r--memory/jemalloc/src/test/integration/mallocx.c234
-rw-r--r--memory/jemalloc/src/test/integration/overflow.c49
-rw-r--r--memory/jemalloc/src/test/integration/posix_memalign.c133
-rw-r--r--memory/jemalloc/src/test/integration/rallocx.c259
-rw-r--r--memory/jemalloc/src/test/integration/sdallocx.c57
-rw-r--r--memory/jemalloc/src/test/integration/thread_arena.c79
-rw-r--r--memory/jemalloc/src/test/integration/thread_tcache_enabled.c113
-rw-r--r--memory/jemalloc/src/test/integration/xallocx.c497
12 files changed, 2047 insertions, 0 deletions
diff --git a/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 000000000..30c203ae6
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,69 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Error in arenas.extend");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_MALLOCX_ARENA)
+{
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_MALLOCX_ARENA));
+}
diff --git a/memory/jemalloc/src/test/integration/aligned_alloc.c b/memory/jemalloc/src/test/integration/aligned_alloc.c
new file mode 100644
index 000000000..58438421d
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/aligned_alloc.c
@@ -0,0 +1,139 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors)
+{
+ size_t alignment;
+ void *p;
+
+ alignment = 0;
+ set_errno(0);
+ p = aligned_alloc(alignment, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ set_errno(0);
+ p = aligned_alloc(alignment + 1, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ ps[i] = aligned_alloc(alignment, size);
+ if (ps[i] == NULL) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/allocated.c b/memory/jemalloc/src/test/integration/allocated.c
new file mode 100644
index 000000000..3630e80ce
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/allocated.c
@@ -0,0 +1,125 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", &a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
+ assert_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ assert_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = malloc_usable_size(p);
+ assert_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", &d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
+ assert_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ assert_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ assert_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return (NULL);
+label_ENOENT:
+ assert_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/memory/jemalloc/src/test/integration/chunk.c b/memory/jemalloc/src/test/integration/chunk.c
new file mode 100644
index 000000000..ff9bf967a
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/chunk.c
@@ -0,0 +1,293 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static chunk_hooks_t orig_hooks;
+static chunk_hooks_t old_hooks;
+
+static bool do_dalloc = true;
+static bool do_decommit;
+
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+# define TRACE_HOOK(fmt, ...)
+#endif
+
+void *
+chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, "
+ "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment,
+ *zero ? "true" : "false", *commit ? "true" : "false", arena_ind);
+ did_alloc = true;
+ return (old_hooks.alloc(new_addr, size, alignment, zero, commit,
+ arena_ind));
+}
+
+bool
+chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n",
+ __func__, chunk, size, committed ? "true" : "false", arena_ind);
+ did_dalloc = true;
+ if (!do_dalloc)
+ return (true);
+ return (old_hooks.dalloc(chunk, size, committed, arena_ind));
+}
+
+bool
+chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ err = old_hooks.commit(chunk, size, offset, length, arena_ind);
+ did_commit = !err;
+ return (err);
+}
+
+bool
+chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ if (!do_decommit)
+ return (true);
+ err = old_hooks.decommit(chunk, size, offset, length, arena_ind);
+ did_decommit = !err;
+ return (err);
+}
+
+bool
+chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ did_purge = true;
+ return (old_hooks.purge(chunk, size, offset, length, arena_ind));
+}
+
+bool
+chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_split = true;
+ return (old_hooks.split(chunk, size, size_a, size_b, committed,
+ arena_ind));
+}
+
+bool
+chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_merge = true;
+ return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
+ committed, arena_ind));
+}
+
+TEST_BEGIN(test_chunk)
+{
+ void *p;
+ size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
+ unsigned arena_ind;
+ int flags;
+ size_t hooks_mib[3], purge_mib[3];
+ size_t hooks_miblen, purge_miblen;
+ chunk_hooks_t new_hooks = {
+ chunk_alloc,
+ chunk_dalloc,
+ chunk_commit,
+ chunk_decommit,
+ chunk_purge,
+ chunk_split,
+ chunk_merge
+ };
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Install custom chunk hooks. */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = (size_t)arena_ind;
+ old_size = sizeof(chunk_hooks_t);
+ new_size = sizeof(chunk_hooks_t);
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
+ &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
+ orig_hooks = old_hooks;
+ assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
+ assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_ne(old_hooks.commit, chunk_commit,
+ "Unexpected commit error");
+ assert_ptr_ne(old_hooks.decommit, chunk_decommit,
+ "Unexpected decommit error");
+ assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
+ assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
+ assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.0.size failure");
+ assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.1.size failure");
+
+ /* Get huge size classes. */
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.0.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.1.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ purge_miblen = sizeof(purge_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ purge_mib[1] = (size_t)arena_ind;
+ do_dalloc = false;
+ do_decommit = false;
+ p = mallocx(huge0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_dalloc = false;
+ did_decommit = false;
+ did_purge = false;
+ did_split = false;
+ xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_a) {
+ assert_true(did_dalloc, "Expected dalloc");
+ assert_false(did_decommit, "Unexpected decommit");
+ assert_true(did_purge, "Expected purge");
+ }
+ assert_true(did_split, "Expected split");
+ dallocx(p, flags);
+ do_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ do_dalloc = false;
+ do_decommit = true;
+ p = mallocx(huge0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_b)
+ assert_true(did_split, "Expected split");
+ xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2);
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ if (xallocx_success_b && xallocx_success_c)
+ assert_true(did_merge, "Expected merge");
+ dallocx(p, flags);
+ do_dalloc = true;
+ do_decommit = false;
+
+ /* Test purge for partial-chunk huge allocations. */
+ if (huge0 * 2 > huge2) {
+ /*
+ * There are at least four size classes per doubling, so a
+ * successful xallocx() from size=huge2 to size=huge1 is
+ * guaranteed to leave trailing purgeable memory.
+ */
+ p = mallocx(huge2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_purge = false;
+ assert_zu_eq(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() failure");
+ assert_true(did_purge, "Expected purge");
+ dallocx(p, flags);
+ }
+
+ /* Test decommit for large allocations. */
+ do_decommit = true;
+ p = mallocx(large1, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ did_decommit = false;
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() failure");
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ did_commit = false;
+ assert_zu_eq(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() failure");
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ dallocx(p, flags);
+ do_decommit = false;
+
+ /* Make sure non-huge allocation succeeds. */
+ p = mallocx(42, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, flags);
+
+ /* Restore chunk hooks. */
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
+ &old_hooks, new_size), 0, "Unexpected chunk_hooks error");
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
+ NULL, 0), 0, "Unexpected chunk_hooks error");
+ assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
+ "Unexpected alloc error");
+ assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_eq(old_hooks.commit, orig_hooks.commit,
+ "Unexpected commit error");
+ assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit,
+ "Unexpected decommit error");
+ assert_ptr_eq(old_hooks.purge, orig_hooks.purge,
+ "Unexpected purge error");
+ assert_ptr_eq(old_hooks.split, orig_hooks.split,
+ "Unexpected split error");
+ assert_ptr_eq(old_hooks.merge, orig_hooks.merge,
+ "Unexpected merge error");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(test_chunk));
+}
diff --git a/memory/jemalloc/src/test/integration/mallocx.c b/memory/jemalloc/src/test/integration/mallocx.c
new file mode 100644
index 000000000..43b76ebac
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/mallocx.c
@@ -0,0 +1,234 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_overflow)
+{
+ size_t hugemax;
+
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ assert_ptr_null(mallocx(hugemax+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
+
+ assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(mallocx(SIZE_T_MAX, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+TEST_BEGIN(test_oom)
+{
+ size_t hugemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ hugemax = get_huge_size(get_nhuge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = mallocx(hugemax, 0);
+ if (ptrs[i] == NULL)
+ oom = true;
+ }
+ assert_true(oom,
+ "Expected OOM during series of calls to mallocx(size=%zu, 0)",
+ hugemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL)
+ dallocx(ptrs[i], 0);
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)),
+ "Expected OOM for mallocx()");
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)),
+ "Expected OOM for mallocx()");
+#else
+ assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
+ "Expected OOM for mallocx()");
+#endif
+}
+TEST_END
+
+TEST_BEGIN(test_basic)
+{
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_overflow,
+ test_oom,
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/overflow.c b/memory/jemalloc/src/test/integration/overflow.c
new file mode 100644
index 000000000..303d9b2d3
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/overflow.c
@@ -0,0 +1,49 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_overflow)
+{
+ unsigned nhchunks;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nhchunks - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
+
+ assert_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ assert_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() OOM");
+ assert_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_overflow));
+}
diff --git a/memory/jemalloc/src/test/integration/posix_memalign.c b/memory/jemalloc/src/test/integration/posix_memalign.c
new file mode 100644
index 000000000..e22e10200
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/posix_memalign.c
@@ -0,0 +1,133 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void)
+{
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors)
+{
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/rallocx.c b/memory/jemalloc/src/test/integration/rallocx.c
new file mode 100644
index 000000000..66ad8660a
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/rallocx.c
@@ -0,0 +1,259 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_grow_and_shrink)
+{
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 2500
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to be at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ bool ret = false;
+ const uint8_t *buf = (const uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
+ ret = true;
+ }
+ }
+
+ return (ret);
+}
+
+TEST_BEGIN(test_zero)
+{
+ void *p, *q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ assert_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ assert_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ assert_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ assert_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align)
+{
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero)
+{
+ void *p, *q;
+ unsigned lg_align;
+ size_t sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = 0;
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%u", lg_align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%u", q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ assert_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ assert_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_overflow)
+{
+ size_t hugemax;
+ void *p;
+
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_ptr_null(rallocx(p, hugemax+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
+
+ assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_lg_align_and_zero,
+ test_overflow));
+}
diff --git a/memory/jemalloc/src/test/integration/sdallocx.c b/memory/jemalloc/src/test/integration/sdallocx.c
new file mode 100644
index 000000000..b84817d76
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/sdallocx.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/memory/jemalloc/src/test/integration/thread_arena.c b/memory/jemalloc/src/test/integration/thread_arena.c
new file mode 100644
index 000000000..67be53513
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/thread_arena.c
@@ -0,0 +1,79 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg)
+{
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
+ sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ assert_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_thread_arena)
+{
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ assert_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_thread_arena));
+}
diff --git a/memory/jemalloc/src/test/integration/thread_tcache_enabled.c b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c
new file mode 100644
index 000000000..f4e89c682
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,113 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ size_t sz;
+ bool e0, e1;
+
+ sz = sizeof(bool);
+ if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ assert_false(config_tcache,
+ "ENOENT should only be returned if tcache is "
+ "disabled");
+ }
+ goto label_ENOENT;
+ }
+
+ if (e0) {
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
+ 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return (NULL);
+label_ENOENT:
+ test_skip("\"thread.tcache.enabled\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/memory/jemalloc/src/test/integration/xallocx.c b/memory/jemalloc/src/test/integration/xallocx.c
new file mode 100644
index 000000000..ad292bb56
--- /dev/null
+++ b/memory/jemalloc/src/test/integration/xallocx.c
@@ -0,0 +1,497 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void)
+{
+ static unsigned ind = 0;
+
+ if (ind == 0) {
+ size_t sz = sizeof(ind);
+ assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure creating arena");
+ }
+
+ return (ind);
+}
+
+TEST_BEGIN(test_same_size)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nsmall(void)
+{
+
+ return (get_nsizes_impl("arenas.nbins"));
+}
+
+static unsigned
+get_nlarge(void)
+{
+
+ return (get_nsizes_impl("arenas.nlruns"));
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_small_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.bin.0.size", ind));
+}
+
+static size_t
+get_large_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.lrun.0.size", ind));
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_size)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ assert_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that hugemax-size underflows. */
+ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small)
+{
+ size_t small0, small1, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large)
+{
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t smallmax, large0, large1, large2, huge0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ huge0 = get_huge_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(large2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_huge)
+{
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t largemax, huge1, huge2, huge3, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ largemax = get_large_size(get_nlarge()-1);
+ huge1 = get_huge_size(1);
+ huge2 = get_huge_size(2);
+ huge3 = get_huge_size(3);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(huge3, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_le(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c)
+ err = true;
+ }
+
+ if (err)
+ print_filled_extents(p, c, offset + len);
+
+ return (err);
+}
+
+static void
+test_zero(size_t szmin, size_t szmax)
+{
+ int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ assert_zu_eq(xallocx(p, sz, 0, flags), sz,
+ "Unexpected xallocx() error");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, flags);
+ assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz,
+ "Unexpected xallocx() failure");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ assert_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large)
+{
+ size_t large0, largemax;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ test_zero(large0, largemax);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+ size_t huge0, huge1;
+
+ /* Get size classes. */
+ huge0 = get_huge_size(0);
+ huge1 = get_huge_size(1);
+
+ test_zero(huge1, huge0 * 2);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_extra_huge,
+ test_zero_large,
+ test_zero_huge));
+}