summaryrefslogtreecommitdiffstats
path: root/memory
diff options
context:
space:
mode:
authorwolfbeast <mcwerewolf@wolfbeast.com>2019-12-06 23:39:47 +0100
committerwolfbeast <mcwerewolf@wolfbeast.com>2019-12-06 23:39:47 +0100
commit642032029f65e9dc0d38fbb6d35ef656c73a292c (patch)
tree78a22bd12d679ab532db490d631ee69fa085aec1 /memory
parent50ef259a2df60d020ccb02d76dc5aa4835ee319e (diff)
parent2529b2edece0a0ed86553d1e73eef13c3848bf64 (diff)
downloadUXP-642032029f65e9dc0d38fbb6d35ef656c73a292c.tar
UXP-642032029f65e9dc0d38fbb6d35ef656c73a292c.tar.gz
UXP-642032029f65e9dc0d38fbb6d35ef656c73a292c.tar.lz
UXP-642032029f65e9dc0d38fbb6d35ef656c73a292c.tar.xz
UXP-642032029f65e9dc0d38fbb6d35ef656c73a292c.zip
Merge branch 'master' into release
Diffstat (limited to 'memory')
-rw-r--r--memory/mozalloc/mozalloc_abort.cpp2
-rw-r--r--memory/mozjemalloc/jemalloc.c405
-rw-r--r--memory/mozjemalloc/jemalloc_types.h3
-rw-r--r--memory/mozjemalloc/rb.h61
4 files changed, 75 insertions, 396 deletions
diff --git a/memory/mozalloc/mozalloc_abort.cpp b/memory/mozalloc/mozalloc_abort.cpp
index a998d8164..85e566db0 100644
--- a/memory/mozalloc/mozalloc_abort.cpp
+++ b/memory/mozalloc/mozalloc_abort.cpp
@@ -68,7 +68,7 @@ void fillAbortMessage(char (&msg)[N], uintptr_t retAddress) {
//
// That segmentation fault will be interpreted as another bug by ASan and as a
// result, ASan will just exit(1) instead of aborting.
-void abort(void)
+extern "C" void abort(void)
{
#ifdef MOZ_WIDGET_ANDROID
char msg[64] = {};
diff --git a/memory/mozjemalloc/jemalloc.c b/memory/mozjemalloc/jemalloc.c
index 9a97bbb09..ecc9d2985 100644
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -2,7 +2,7 @@
/* vim:set softtabstop=8 shiftwidth=8 noet: */
/*-
* Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
- * Copyright (C) 2015-2018 Mark Straver <moonchild@palemoon.org>
+ * Copyright (C) 2015-2019 Mark Straver <moonchild@palemoon.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -167,11 +167,6 @@
*/
# define MALLOC_DEBUG
- /* Allocation tracing. */
-# ifndef MOZ_MEMORY_WINDOWS
-# define MALLOC_UTRACE
-# endif
-
/* Support optional abort() on OOM. */
# define MALLOC_XMALLOC
@@ -179,18 +174,8 @@
# define MALLOC_SYSV
#endif
-/*
- * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
- * validation. There are many possible errors that validation does not even
- * attempt to detect.
- */
-#define MALLOC_VALIDATE
-
#if defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
#define _GNU_SOURCE /* For mremap(2). */
-#if 0 /* Enable in order to test decommit code on Linux. */
-# define MALLOC_DECOMMIT
-#endif
#endif
#include <sys/types.h>
@@ -280,19 +265,12 @@ typedef long ssize_t;
#define JEMALLOC_RECYCLE
#ifndef MOZ_MEMORY_WINDOWS
+#ifndef MOZ_MEMORY_SOLARIS
#include <sys/cdefs.h>
+#endif
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#ifndef MOZ_MEMORY
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
-#include "libc_private.h"
-#ifdef MALLOC_DEBUG
-# define _LOCK_DEBUG
-#endif
-#include "spinlock.h"
-#include "namespace.h"
-#endif
#include <sys/mman.h>
#ifndef MADV_FREE
# define MADV_FREE MADV_DONTNEED
@@ -301,22 +279,9 @@ __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z ja
# define MAP_NOSYNC 0
#endif
#include <sys/param.h>
-#ifndef MOZ_MEMORY
-#include <sys/stddef.h>
-#endif
#include <sys/time.h>
#include <sys/types.h>
-#if !defined(MOZ_MEMORY_ANDROID)
-#include <sys/sysctl.h>
-#endif
#include <sys/uio.h>
-#ifndef MOZ_MEMORY
-#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
-
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/vmparam.h>
-#endif
#include <errno.h>
#include <limits.h>
@@ -324,13 +289,6 @@ __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z ja
# define SIZE_T_MAX SIZE_MAX
#endif
#include <pthread.h>
-#ifdef MOZ_MEMORY_DARWIN
-#define _pthread_self pthread_self
-#define _pthread_mutex_init pthread_mutex_init
-#define _pthread_mutex_trylock pthread_mutex_trylock
-#define _pthread_mutex_lock pthread_mutex_lock
-#define _pthread_mutex_unlock pthread_mutex_unlock
-#endif
#include <sched.h>
#include <stdarg.h>
#include <stdio.h>
@@ -351,10 +309,6 @@ __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z ja
#include <malloc/malloc.h>
#endif
-#ifndef MOZ_MEMORY
-#include "un-namespace.h"
-#endif
-
#endif
#include "jemalloc_types.h"
@@ -408,14 +362,14 @@ void *_mmap(void *addr, size_t length, int prot, int flags,
#endif
#endif
+#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
+#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */
+#endif
+
#ifndef __DECONST
#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#ifdef MOZ_MEMORY_WINDOWS
- /* MSVC++ does not support C99 variable-length arrays. */
-# define RB_NO_C99_VARARRAYS
-#endif
#include "rb.h"
#ifdef MALLOC_DEBUG
@@ -441,46 +395,6 @@ void *_mmap(void *addr, size_t length, int prot, int flags,
#ifdef MOZ_MEMORY_DARWIN
# define NO_TLS
#endif
-#if 0
-#ifdef __i386__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 2
-# define CPU_SPINWAIT __asm__ volatile("pause")
-#endif
-#ifdef __ia64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-#endif
-#ifdef __alpha__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define NO_TLS
-#endif
-#ifdef __sparc64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define NO_TLS
-#endif
-#ifdef __amd64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define CPU_SPINWAIT __asm__ volatile("pause")
-#endif
-#ifdef __arm__
-# define QUANTUM_2POW_MIN 3
-# define SIZEOF_PTR_2POW 2
-# define NO_TLS
-#endif
-#ifdef __mips__
-# define QUANTUM_2POW_MIN 3
-# define SIZEOF_PTR_2POW 2
-# define NO_TLS
-#endif
-#ifdef __powerpc__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 2
-#endif
-#endif
#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
@@ -550,29 +464,6 @@ void *_mmap(void *addr, size_t length, int prot, int flags,
#define RUN_MAX_OVRHD 0x0000003dU
#define RUN_MAX_OVRHD_RELAX 0x00001800U
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU. If no such instruction is defined
- * above, make CPU_SPINWAIT a no-op.
- */
-#ifndef CPU_SPINWAIT
-# define CPU_SPINWAIT
-#endif
-
-/*
- * Adaptive spinning must eventually switch to blocking, in order to avoid the
- * potential for priority inversion deadlock. Backing off past a certain point
- * can actually waste time.
- */
-#define SPIN_LIMIT_2POW 11
-
-/*
- * Conversion from spinning to blocking is expensive; we use (1U <<
- * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
- * worst-case spinning.
- */
-#define BLOCK_COST_2POW 4
-
/******************************************************************************/
/* MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive. */
@@ -595,15 +486,9 @@ typedef struct {
typedef struct {
OSSpinLock lock;
} malloc_spinlock_t;
-#elif defined(MOZ_MEMORY)
+#else
typedef pthread_mutex_t malloc_mutex_t;
typedef pthread_mutex_t malloc_spinlock_t;
-#else
-/* XXX these should #ifdef these for freebsd (and linux?) only */
-typedef struct {
- spinlock_t lock;
-} malloc_mutex_t;
-typedef malloc_spinlock_t malloc_mutex_t;
#endif
/* Set to true once the allocator has been initialized. */
@@ -615,10 +500,8 @@ static bool malloc_initialized = false;
static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
-#elif defined(MOZ_MEMORY)
-static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
#else
-static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
+static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
/******************************************************************************/
@@ -720,16 +603,15 @@ typedef rb_tree(extent_node_t) extent_tree_t;
* Radix tree data structures.
*/
-#ifdef MALLOC_VALIDATE
- /*
- * Size of each radix tree node (must be a power of 2). This impacts tree
- * depth.
- */
-# if (SIZEOF_PTR == 4)
-# define MALLOC_RTREE_NODESIZE (1U << 14)
-# else
-# define MALLOC_RTREE_NODESIZE CACHELINE
-# endif
+/*
+ * Size of each radix tree node (must be a power of 2). This impacts tree
+ * depth.
+ */
+#if (SIZEOF_PTR == 4)
+#define MALLOC_RTREE_NODESIZE (1U << 14)
+#else
+#define MALLOC_RTREE_NODESIZE CACHELINE
+#endif
typedef struct malloc_rtree_s malloc_rtree_t;
struct malloc_rtree_s {
@@ -738,7 +620,6 @@ struct malloc_rtree_s {
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
-#endif
/******************************************************************************/
/*
@@ -925,11 +806,7 @@ struct arena_s {
#endif
/* All operations on this arena require that lock be locked. */
-#ifdef MOZ_MEMORY
malloc_spinlock_t lock;
-#else
- pthread_mutex_t lock;
-#endif
#ifdef MALLOC_STATS
arena_stats_t stats;
@@ -1034,7 +911,7 @@ static const bool config_recycle = false;
* will abort.
* Platform specific page size conditions copied from js/public/HeapAPI.h
*/
-#if (defined(__FreeBSD__)) && \
+#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
(defined(__sparc) || defined(__sparcv9) || defined(__ia64))
#define pagesize_2pow ((size_t) 13)
#elif defined(__powerpc64__)
@@ -1143,9 +1020,7 @@ static size_t recycled_size;
* Chunks.
*/
-#ifdef MALLOC_VALIDATE
static malloc_rtree_t *chunk_rtree;
-#endif
/* Protects chunk-related data structures. */
static malloc_mutex_t chunks_mtx;
@@ -1210,11 +1085,7 @@ static unsigned narenas;
#ifndef NO_TLS
static unsigned next_arena;
#endif
-#ifdef MOZ_MEMORY
static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
-#else
-static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
-#endif
#ifndef NO_TLS
/*
@@ -1260,9 +1131,6 @@ static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
#endif
-#ifdef MALLOC_UTRACE
-static bool opt_utrace = false;
-#endif
#ifdef MALLOC_SYSV
static bool opt_sysv = false;
#endif
@@ -1271,25 +1139,6 @@ static bool opt_xmalloc = false;
#endif
static int opt_narenas_lshift = 0;
-#ifdef MALLOC_UTRACE
-typedef struct {
- void *p;
- size_t s;
- void *r;
-} malloc_utrace_t;
-
-#define UTRACE(a, b, c) \
- if (opt_utrace) { \
- malloc_utrace_t ut; \
- ut.p = (a); \
- ut.s = (b); \
- ut.r = (c); \
- utrace(&ut, sizeof(ut)); \
- }
-#else
-#define UTRACE(a, b, c)
-#endif
-
/******************************************************************************/
/*
* Begin function prototypes for non-inline static functions.
@@ -1494,7 +1343,7 @@ umax2s(uintmax_t x, unsigned base, char *s)
static void
wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
{
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
+#if !defined(MOZ_MEMORY_WINDOWS)
#define _write write
#endif
// Pretend to check _write() errors to suppress gcc warnings about
@@ -1565,13 +1414,9 @@ malloc_mutex_init(malloc_mutex_t *mutex)
return (true);
}
pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
+#else
if (pthread_mutex_init(mutex, NULL) != 0)
return (true);
-#else
- static const spinlock_t lock = _SPINLOCK_INITIALIZER;
-
- mutex->lock = lock;
#endif
return (false);
}
@@ -1584,10 +1429,8 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
AcquireSRWLockExclusive(mutex);
#elif defined(MOZ_MEMORY_DARWIN)
OSSpinLockLock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_lock(mutex);
#else
- _SPINLOCK(&mutex->lock);
+ pthread_mutex_lock(mutex);
#endif
}
@@ -1599,10 +1442,8 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
ReleaseSRWLockExclusive(mutex);
#elif defined(MOZ_MEMORY_DARWIN)
OSSpinLockUnlock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_unlock(mutex);
#else
- _SPINUNLOCK(&mutex->lock);
+ pthread_mutex_unlock(mutex);
#endif
}
@@ -1626,11 +1467,9 @@ malloc_spin_init(malloc_spinlock_t *lock)
return (true);
}
pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
+#else
if (pthread_mutex_init(lock, NULL) != 0)
return (true);
-#else
- lock->lock = _SPINLOCK_INITIALIZER;
#endif
return (false);
}
@@ -1643,10 +1482,8 @@ malloc_spin_lock(malloc_spinlock_t *lock)
AcquireSRWLockExclusive(lock);
#elif defined(MOZ_MEMORY_DARWIN)
OSSpinLockLock(&lock->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_lock(lock);
#else
- _SPINLOCK(&lock->lock);
+ pthread_mutex_lock(lock);
#endif
}
@@ -1657,10 +1494,8 @@ malloc_spin_unlock(malloc_spinlock_t *lock)
ReleaseSRWLockExclusive(lock);
#elif defined(MOZ_MEMORY_DARWIN)
OSSpinLockUnlock(&lock->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_unlock(lock);
#else
- _SPINUNLOCK(&lock->lock);
+ pthread_mutex_unlock(lock);
#endif
}
@@ -1674,80 +1509,12 @@ malloc_spin_unlock(malloc_spinlock_t *lock)
* priority inversion.
*/
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
+#if !defined(MOZ_MEMORY_DARWIN)
# define malloc_spin_init malloc_mutex_init
# define malloc_spin_lock malloc_mutex_lock
# define malloc_spin_unlock malloc_mutex_unlock
#endif
-#ifndef MOZ_MEMORY
-/*
- * We use an unpublished interface to initialize pthread mutexes with an
- * allocation callback, in order to avoid infinite recursion.
- */
-int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
- void *(calloc_cb)(size_t, size_t));
-
-__weak_reference(_pthread_mutex_init_calloc_cb_stub,
- _pthread_mutex_init_calloc_cb);
-
-int
-_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
- void *(calloc_cb)(size_t, size_t))
-{
-
- return (0);
-}
-
-static bool
-malloc_spin_init(pthread_mutex_t *lock)
-{
-
- if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
- return (true);
-
- return (false);
-}
-
-static inline unsigned
-malloc_spin_lock(pthread_mutex_t *lock)
-{
- unsigned ret = 0;
-
- if (_pthread_mutex_trylock(lock) != 0) {
- unsigned i;
- volatile unsigned j;
-
- /* Exponentially back off. */
- for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
- for (j = 0; j < (1U << i); j++)
- ret++;
-
- CPU_SPINWAIT;
- if (_pthread_mutex_trylock(lock) == 0)
- return (ret);
- }
-
- /*
- * Spinning failed. Block until the lock becomes
- * available, in order to avoid indefinite priority
- * inversion.
- */
- _pthread_mutex_lock(lock);
- assert((ret << BLOCK_COST_2POW) != 0);
- return (ret << BLOCK_COST_2POW);
- }
-
- return (ret);
-}
-
-static inline void
-malloc_spin_unlock(pthread_mutex_t *lock)
-{
- _pthread_mutex_unlock(lock);
-}
-#endif
-
/*
* End spin lock.
*/
@@ -1798,52 +1565,6 @@ pow2_ceil(size_t x)
return (x);
}
-#ifdef MALLOC_UTRACE
-static int
-utrace(const void *addr, size_t len)
-{
- malloc_utrace_t *ut = (malloc_utrace_t *)addr;
- char buf_a[UMAX2S_BUFSIZE];
- char buf_b[UMAX2S_BUFSIZE];
-
- assert(len == sizeof(malloc_utrace_t));
-
- if (ut->p == NULL && ut->s == 0 && ut->r == NULL) {
- _malloc_message(
- umax2s(getpid(), 10, buf_a),
- " x USER malloc_init()\n", "", "");
- } else if (ut->p == NULL && ut->r != NULL) {
- _malloc_message(
- umax2s(getpid(), 10, buf_a),
- " x USER 0x",
- umax2s((uintptr_t)ut->r, 16, buf_b),
- " = malloc(");
- _malloc_message(
- umax2s(ut->s, 10, buf_a),
- ")\n", "", "");
- } else if (ut->p != NULL && ut->r != NULL) {
- _malloc_message(
- umax2s(getpid(), 10, buf_a),
- " x USER 0x",
- umax2s((uintptr_t)ut->r, 16, buf_b),
- " = realloc(0x");
- _malloc_message(
- umax2s((uintptr_t)ut->p, 16, buf_a),
- ", ",
- umax2s(ut->s, 10, buf_b),
- ")\n");
- } else {
- _malloc_message(
- umax2s(getpid(), 10, buf_a),
- " x USER free(0x",
- umax2s((uintptr_t)ut->p, 16, buf_b),
- ")\n");
- }
-
- return (0);
-}
-#endif
-
static inline const char *
_getprogname(void)
{
@@ -2363,7 +2084,6 @@ pages_copy(void *dest, const void *src, size_t n)
}
#endif
-#ifdef MALLOC_VALIDATE
static inline malloc_rtree_t *
malloc_rtree_new(unsigned bits)
{
@@ -2504,7 +2224,6 @@ malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
return (false);
}
-#endif
/* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
* from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
@@ -2640,8 +2359,13 @@ pages_purge(void *addr, size_t length)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# endif
+#ifdef MOZ_MEMORY_SOLARIS
+ int err = posix_madvise(addr, length, JEMALLOC_MADV_PURGE);
+ unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+#else
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+#endif
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
# endif
@@ -2788,14 +2512,12 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool zero)
ret = NULL;
RETURN:
-#ifdef MALLOC_VALIDATE
if (ret != NULL && base == false) {
if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size);
return (NULL);
}
}
-#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@@ -2913,9 +2635,7 @@ chunk_dealloc(void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
-#ifdef MALLOC_VALIDATE
malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
-#endif
if (chunk_dalloc_mmap(chunk, size))
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
@@ -3597,9 +3317,14 @@ arena_purge(arena_t *arena, bool all)
#endif
#ifndef MALLOC_DECOMMIT
+#ifdef MOZ_MEMORY_SOLARIS
+ posix_madvise((void*)((uintptr_t)chunk + (i << pagesize_2pow)),
+ (npages << pagesize_2pow),MADV_FREE);
+#else
madvise((void *)((uintptr_t)chunk + (i <<
pagesize_2pow)), (npages << pagesize_2pow),
MADV_FREE);
+#endif
# ifdef MALLOC_DOUBLE_PURGE
madvised = true;
# endif
@@ -4268,7 +3993,6 @@ arena_salloc(const void *ptr)
return (ret);
}
-#if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
/*
* Validate ptr before assuming that it points to an allocation. Currently,
* the following validation is performed:
@@ -4309,7 +4033,6 @@ isalloc_validate(const void *ptr)
return (ret);
}
}
-#endif
static inline size_t
isalloc(const void *ptr)
@@ -5120,6 +4843,13 @@ malloc_ncpus(void)
else
return (n);
}
+#elif (defined(MOZ_MEMORY_SOLARIS))
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
#elif (defined(MOZ_MEMORY_WINDOWS))
static inline unsigned
malloc_ncpus(void)
@@ -5163,9 +4893,6 @@ malloc_print_stats(void)
_malloc_message(opt_junk ? "J" : "j", "", "", "");
#endif
_malloc_message("P", "", "", "");
-#ifdef MALLOC_UTRACE
- _malloc_message(opt_utrace ? "U" : "u", "", "", "");
-#endif
#ifdef MALLOC_SYSV
_malloc_message(opt_sysv ? "V" : "v", "", "", "");
#endif
@@ -5517,14 +5244,6 @@ MALLOC_OUT:
opt_small_max_2pow++;
break;
#endif
-#ifdef MALLOC_UTRACE
- case 'u':
- opt_utrace = false;
- break;
- case 'U':
- opt_utrace = true;
- break;
-#endif
#ifdef MALLOC_SYSV
case 'v':
opt_sysv = false;
@@ -5619,8 +5338,6 @@ MALLOC_OUT:
assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
#endif
- UTRACE(0, 0, 0);
-
/* Various sanity checks that regard configuration. */
assert(quantum >= sizeof(void *));
assert(quantum <= pagesize);
@@ -5752,11 +5469,9 @@ MALLOC_OUT:
malloc_spin_init(&arenas_lock);
-#ifdef MALLOC_VALIDATE
chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
if (chunk_rtree == NULL)
return (true);
-#endif
malloc_initialized = true;
@@ -5765,12 +5480,6 @@ MALLOC_OUT:
pthread_atfork(_malloc_prefork, _malloc_postfork, _malloc_postfork);
#endif
-#if defined(NEEDS_PTHREAD_MMAP_UNALIGNED_TSD)
- if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
- malloc_printf("<jemalloc>: Error in pthread_key_create()\n");
- }
-#endif
-
#if defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_REPLACE_MALLOC)
/*
* Overwrite the default memory allocator to use jemalloc everywhere.
@@ -5896,7 +5605,6 @@ RETURN:
errno = ENOMEM;
}
- UTRACE(0, size, ret);
return (ret);
}
@@ -5916,9 +5624,15 @@ RETURN:
#define MOZ_MEMORY_ELF
#endif
+#ifdef MOZ_MEMORY_SOLARIS
+# if (defined(__GNUC__))
+__attribute__((noinline))
+# endif
+#else
#if (defined(MOZ_MEMORY_ELF))
__attribute__((visibility ("hidden")))
#endif
+#endif
#endif /* MOZ_REPLACE_MALLOC */
#ifdef MOZ_MEMORY_ELF
@@ -5968,7 +5682,6 @@ RETURN:
abort();
}
#endif
- UTRACE(0, size, ret);
return (ret);
}
@@ -6082,7 +5795,6 @@ RETURN:
errno = ENOMEM;
}
- UTRACE(0, num_size, ret);
return (ret);
}
@@ -6146,7 +5858,6 @@ realloc_impl(void *ptr, size_t size)
#ifdef MALLOC_SYSV
RETURN:
#endif
- UTRACE(ptr, size, ret);
return (ret);
}
@@ -6157,8 +5868,6 @@ free_impl(void *ptr)
DARWIN_ONLY((szone->free)(szone, ptr); return);
- UTRACE(ptr, 0, 0);
-
/*
* A version of idalloc that checks for NULL pointer but only for
* huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
@@ -6229,15 +5938,10 @@ malloc_usable_size_impl(MALLOC_USABLE_SIZE_CONST_PTR void *ptr)
{
DARWIN_ONLY(return (szone->size)(szone, ptr));
-#ifdef MALLOC_VALIDATE
return (isalloc_validate(ptr));
-#else
- assert(ptr != NULL);
-
- return (isalloc(ptr));
-#endif
}
+#ifdef MALLOC_STATS
MOZ_JEMALLOC_API void
jemalloc_stats_impl(jemalloc_stats_t *stats)
{
@@ -6259,11 +5963,6 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
opt_poison ? true :
#endif
false;
- stats->opt_utrace =
-#ifdef MALLOC_UTRACE
- opt_utrace ? true :
-#endif
- false;
stats->opt_sysv =
#ifdef MALLOC_SYSV
opt_sysv ? true :
@@ -6385,7 +6084,7 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
assert(stats->mapped >= stats->allocated + stats->waste +
stats->page_cache + stats->bookkeeping);
}
-
+#endif // MALLOC_STATS
#ifdef MALLOC_DOUBLE_PURGE
/* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
diff --git a/memory/mozjemalloc/jemalloc_types.h b/memory/mozjemalloc/jemalloc_types.h
index 96165ae4d..dc778ae0a 100644
--- a/memory/mozjemalloc/jemalloc_types.h
+++ b/memory/mozjemalloc/jemalloc_types.h
@@ -2,7 +2,7 @@
/* vim:set softtabstop=8 shiftwidth=8: */
/*-
* Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
- * Copyright (C) 2015-2018 Mark Straver <moonchild@palemoon.org>
+ * Copyright (C) 2015-2019 Mark Straver <moonchild@palemoon.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,7 +57,6 @@ typedef struct {
jemalloc_bool opt_abort; /* abort(3) on error? */
jemalloc_bool opt_junk; /* Fill allocated memory with 0xe4? */
jemalloc_bool opt_poison; /* Fill free memory with 0xe5? */
- jemalloc_bool opt_utrace; /* Trace all allocation events? */
jemalloc_bool opt_sysv; /* SysV semantics? */
jemalloc_bool opt_xmalloc; /* abort(3) on OOM? */
jemalloc_bool opt_zero; /* Fill allocated memory with 0x0? */
diff --git a/memory/mozjemalloc/rb.h b/memory/mozjemalloc/rb.h
index 53a926d46..a1b08973b 100644
--- a/memory/mozjemalloc/rb.h
+++ b/memory/mozjemalloc/rb.h
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright (C) 2008 Jason Evans <jasone@FreeBSD.org>.
- * Copyright (C) 2015-2018 Mark Straver <moonchild@palemoon.org>
+ * Copyright (C) 2015-2019 Mark Straver <moonchild@palemoon.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,7 +36,6 @@
* (Optional.)
* #define SIZEOF_PTR ...
* #define SIZEOF_PTR_2POW ...
- * #define RB_NO_C99_VARARRAYS
*
* (Optional, see assert(3).)
* #define NDEBUG
@@ -72,11 +71,6 @@
#ifndef RB_H_
#define RB_H_
-#if 0
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 178995 2008-05-14 18:33:13Z jasone $");
-#endif
-
/* Node structure. */
#define rb_node(a_type) \
struct { \
@@ -774,39 +768,26 @@ a_prefix##remove(a_tree_type *tree, a_type *node) { \
* effort.
*/
-#ifdef RB_NO_C99_VARARRAYS
- /*
- * Avoid using variable-length arrays, at the cost of using more stack space.
- * Size the path arrays such that they are always large enough, even if a
- * tree consumes all of memory. Since each node must contain a minimum of
- * two pointers, there can never be more nodes than:
- *
- * 1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
- *
- * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
- * is:
- *
- * (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
- *
- * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
- * systems, respectively (approximatly 348 and 1440 bytes, respectively).
- */
-# define rbp_compute_f_height(a_type, a_field, a_tree)
-# define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
-# define rbp_compute_fr_height(a_type, a_field, a_tree)
-# define rbp_fr_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
-#else
-# define rbp_compute_f_height(a_type, a_field, a_tree) \
- /* Compute the maximum possible tree depth (3X the black height). */\
- unsigned rbp_f_height; \
- rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
- rbp_f_height *= 3;
-# define rbp_compute_fr_height(a_type, a_field, a_tree) \
- /* Compute the maximum possible tree depth (3X the black height). */\
- unsigned rbp_fr_height; \
- rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
- rbp_fr_height *= 3;
-#endif
+/*
+ * Avoid using variable-length arrays.
+ * Size the path arrays such that they are always large enough, even if a
+ * tree consumes all of memory. Since each node must contain a minimum of
+ * two pointers, there can never be more nodes than:
+ *
+ * 1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
+ *
+ * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
+ * is:
+ *
+ * (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+ *
+ * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
+ * systems, respectively (approximatly 348 and 1440 bytes, respectively).
+ */
+#define rbp_compute_f_height(a_type, a_field, a_tree)
+#define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+#define rbp_compute_fr_height(a_type, a_field, a_tree)
+#define rbp_fr_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
rbp_compute_f_height(a_type, a_field, a_tree) \