summaryrefslogtreecommitdiffstats
path: root/memory/mozjemalloc/jemalloc.c
diff options
context:
space:
mode:
authorwolfbeast <mcwerewolf@gmail.com>2018-04-27 22:56:03 +0200
committerwolfbeast <mcwerewolf@gmail.com>2018-04-27 22:56:03 +0200
commit90920ec0bab734c7312042c9a94847150482e6ad (patch)
treeb0fc1bd0baf47afcb648e456542eb43fe844e4a1 /memory/mozjemalloc/jemalloc.c
parenta7ec27a9ccca3c17ca652b7c72f418ba8e3e4e00 (diff)
downloadUXP-90920ec0bab734c7312042c9a94847150482e6ad.tar
UXP-90920ec0bab734c7312042c9a94847150482e6ad.tar.gz
UXP-90920ec0bab734c7312042c9a94847150482e6ad.tar.lz
UXP-90920ec0bab734c7312042c9a94847150482e6ad.tar.xz
UXP-90920ec0bab734c7312042c9a94847150482e6ad.zip
Remove single-threaded considerations.
We assume our applications are always going to have multithreaded access to the malloc.
Diffstat (limited to 'memory/mozjemalloc/jemalloc.c')
-rw-r--r--memory/mozjemalloc/jemalloc.c82
1 files changed, 30 insertions, 52 deletions
diff --git a/memory/mozjemalloc/jemalloc.c b/memory/mozjemalloc/jemalloc.c
index de39c831f..8516658f5 100644
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -433,10 +433,6 @@ void *_mmap(void *addr, size_t length, int prot, int flags,
#endif
#endif
-#ifdef MOZ_MEMORY_DARWIN
-static const bool isthreaded = true;
-#endif
-
#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */
#endif
@@ -471,9 +467,7 @@ static const bool isthreaded = true;
# define SIZEOF_PTR_2POW 2
#endif
#define PIC
-#ifndef MOZ_MEMORY_DARWIN
-static const bool isthreaded = true;
-#else
+#ifdef MOZ_MEMORY_DARWIN
# define NO_TLS
#endif
#if 0
@@ -1635,9 +1629,8 @@ static bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#if defined(MOZ_MEMORY_WINDOWS)
- if (isthreaded)
- if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
- return (true);
+ if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
+ return (true);
#elif defined(MOZ_MEMORY_DARWIN)
mutex->lock = OS_SPINLOCK_INIT;
#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
@@ -1672,8 +1665,7 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
#elif defined(MOZ_MEMORY)
pthread_mutex_lock(mutex);
#else
- if (isthreaded)
- _SPINLOCK(&mutex->lock);
+ _SPINLOCK(&mutex->lock);
#endif
}
@@ -1688,8 +1680,7 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#elif defined(MOZ_MEMORY)
pthread_mutex_unlock(mutex);
#else
- if (isthreaded)
- _SPINUNLOCK(&mutex->lock);
+ _SPINUNLOCK(&mutex->lock);
#endif
}
@@ -1700,9 +1691,8 @@ static bool
malloc_spin_init(malloc_spinlock_t *lock)
{
#if defined(MOZ_MEMORY_WINDOWS)
- if (isthreaded)
- if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
- return (true);
+ if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
+ return (true);
#elif defined(MOZ_MEMORY_DARWIN)
lock->lock = OS_SPINLOCK_INIT;
#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
@@ -1735,8 +1725,7 @@ malloc_spin_lock(malloc_spinlock_t *lock)
#elif defined(MOZ_MEMORY)
pthread_mutex_lock(lock);
#else
- if (isthreaded)
- _SPINLOCK(&lock->lock);
+ _SPINLOCK(&lock->lock);
#endif
}
@@ -1750,8 +1739,7 @@ malloc_spin_unlock(malloc_spinlock_t *lock)
#elif defined(MOZ_MEMORY)
pthread_mutex_unlock(lock);
#else
- if (isthreaded)
- _SPINUNLOCK(&lock->lock);
+ _SPINUNLOCK(&lock->lock);
#endif
}
@@ -1805,30 +1793,28 @@ malloc_spin_lock(pthread_mutex_t *lock)
{
unsigned ret = 0;
- if (isthreaded) {
- if (_pthread_mutex_trylock(lock) != 0) {
- unsigned i;
- volatile unsigned j;
-
- /* Exponentially back off. */
- for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
- for (j = 0; j < (1U << i); j++)
- ret++;
+ if (_pthread_mutex_trylock(lock) != 0) {
+ unsigned i;
+ volatile unsigned j;
- CPU_SPINWAIT;
- if (_pthread_mutex_trylock(lock) == 0)
- return (ret);
- }
+ /* Exponentially back off. */
+ for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
+ for (j = 0; j < (1U << i); j++)
+ ret++;
- /*
- * Spinning failed. Block until the lock becomes
- * available, in order to avoid indefinite priority
- * inversion.
- */
- _pthread_mutex_lock(lock);
- assert((ret << BLOCK_COST_2POW) != 0);
- return (ret << BLOCK_COST_2POW);
+ CPU_SPINWAIT;
+ if (_pthread_mutex_trylock(lock) == 0)
+ return (ret);
}
+
+ /*
+ * Spinning failed. Block until the lock becomes
+ * available, in order to avoid indefinite priority
+ * inversion.
+ */
+ _pthread_mutex_lock(lock);
+ assert((ret << BLOCK_COST_2POW) != 0);
+ return (ret << BLOCK_COST_2POW);
}
return (ret);
@@ -1837,9 +1823,7 @@ malloc_spin_lock(pthread_mutex_t *lock)
static inline void
malloc_spin_unlock(pthread_mutex_t *lock)
{
-
- if (isthreaded)
- _pthread_mutex_unlock(lock);
+ _pthread_mutex_unlock(lock);
}
#endif
@@ -3089,10 +3073,6 @@ choose_arena(void)
* introduces a bootstrapping issue.
*/
#ifndef NO_TLS
- if (isthreaded == false) {
- /* Avoid the overhead of TLS for single-threaded operation. */
- return (arenas[0]);
- }
# ifdef MOZ_MEMORY_WINDOWS
ret = (arena_t*)TlsGetValue(tlsIndex);
@@ -3105,7 +3085,7 @@ choose_arena(void)
RELEASE_ASSERT(ret != NULL);
}
#else
- if (isthreaded && narenas > 1) {
+ if (narenas > 1) {
unsigned long ind;
/*
@@ -3162,8 +3142,6 @@ choose_arena_hard(void)
{
arena_t *ret;
- assert(isthreaded);
-
#ifdef MALLOC_BALANCE
/* Seed the PRNG used for arena load balancing. */
SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));