summaryrefslogtreecommitdiffstats
path: root/nsprpub/pr/src/malloc/prmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'nsprpub/pr/src/malloc/prmem.c')
-rw-r--r--nsprpub/pr/src/malloc/prmem.c210
1 files changed, 97 insertions, 113 deletions
diff --git a/nsprpub/pr/src/malloc/prmem.c b/nsprpub/pr/src/malloc/prmem.c
index e4ae42a91..08a700f7a 100644
--- a/nsprpub/pr/src/malloc/prmem.c
+++ b/nsprpub/pr/src/malloc/prmem.c
@@ -17,7 +17,7 @@
** in cyclic dependency of initialization.
*/
-#include <string.h>
+#include <string.h>
union memBlkHdrUn;
@@ -55,12 +55,13 @@ static void pr_ZoneFree(void *ptr);
void
_PR_DestroyZones(void)
-{
+{
int i, j;
- if (!use_zone_allocator)
+ if (!use_zone_allocator) {
return;
-
+ }
+
for (j = 0; j < THREAD_POOLS; j++) {
for (i = 0; i < MEM_ZONES; i++) {
MemoryZone *mz = &zones[i][j];
@@ -72,9 +73,9 @@ _PR_DestroyZones(void)
mz->elements--;
}
}
- }
+ }
use_zone_allocator = PR_FALSE;
-}
+}
/*
** pr_FindSymbolInProg
@@ -96,8 +97,9 @@ pr_FindSymbolInProg(const char *name)
void *sym;
h = dlopen(0, RTLD_LAZY);
- if (h == NULL)
+ if (h == NULL) {
return NULL;
+ }
sym = dlsym(h, name);
(void)dlclose(h);
return sym;
@@ -113,8 +115,9 @@ pr_FindSymbolInProg(const char *name)
shl_t h = NULL;
void *sym;
- if (shl_findsym(&h, name, TYPE_DATA, &sym) == -1)
+ if (shl_findsym(&h, name, TYPE_DATA, &sym) == -1) {
return NULL;
+ }
return sym;
}
@@ -157,17 +160,18 @@ _PR_InitZones(void)
use_zone_allocator = (atoi(envp) == 1);
}
- if (!use_zone_allocator)
+ if (!use_zone_allocator) {
return;
+ }
- for (j = 0; j < THREAD_POOLS; j++) {
+ for (j = 0; j < THREAD_POOLS; j++) {
for (i = 0; i < MEM_ZONES; i++) {
MemoryZone *mz = &zones[i][j];
int rv = pthread_mutex_init(&mz->lock, NULL);
PR_ASSERT(0 == rv);
if (rv != 0) {
goto loser;
- }
+ }
mz->blockSize = 16 << ( 2 * i);
}
}
@@ -189,11 +193,11 @@ PR_FPrintZoneStats(PRFileDesc *debug_out)
MemoryZone zone = *mz;
if (zone.elements || zone.misses || zone.hits) {
PR_fprintf(debug_out,
-"pool: %d, zone: %d, size: %d, free: %d, hit: %d, miss: %d, contend: %d\n",
- j, i, zone.blockSize, zone.elements,
- zone.hits, zone.misses, zone.contention);
+ "pool: %d, zone: %d, size: %d, free: %d, hit: %d, miss: %d, contend: %d\n",
+ j, i, zone.blockSize, zone.elements,
+ zone.hits, zone.misses, zone.contention);
}
- }
+ }
}
}
@@ -223,8 +227,9 @@ pr_ZoneMalloc(PRUint32 size)
wasLocked = mz->locked;
pthread_mutex_lock(&mz->lock);
mz->locked = 1;
- if (wasLocked)
+ if (wasLocked) {
mz->contention++;
+ }
if (mz->head) {
mb = mz->head;
PR_ASSERT(mb->s.magic == ZONE_MAGIC);
@@ -312,15 +317,16 @@ pr_ZoneRealloc(void *oldptr, PRUint32 bytes)
int ours;
MemBlockHdr phony;
- if (!oldptr)
+ if (!oldptr) {
return pr_ZoneMalloc(bytes);
+ }
mb = (MemBlockHdr *)((char *)oldptr - (sizeof *mb));
if (mb->s.magic != ZONE_MAGIC) {
/* Maybe this just came from ordinary malloc */
#ifdef DEBUG
fprintf(stderr,
- "Warning: reallocing memory block %p from ordinary malloc\n",
- oldptr);
+ "Warning: reallocing memory block %p from ordinary malloc\n",
+ oldptr);
#endif
/*
* We are going to realloc oldptr. If realloc succeeds, the
@@ -358,7 +364,7 @@ pr_ZoneRealloc(void *oldptr, PRUint32 bytes)
PR_ASSERT(mt->s.magic == ZONE_MAGIC);
PR_ASSERT(mt->s.zone == mb->s.zone);
PR_ASSERT(mt->s.blockSize == blockSize);
-
+
if (bytes <= blockSize) {
/* The block is already big enough. */
mt->s.requestedSize = mb->s.requestedSize = bytes;
@@ -370,13 +376,16 @@ pr_ZoneRealloc(void *oldptr, PRUint32 bytes)
return rv;
}
}
-
- if (oldptr && mb->s.requestedSize)
+
+ if (oldptr && mb->s.requestedSize) {
memcpy(rv, oldptr, mb->s.requestedSize);
- if (ours)
+ }
+ if (ours) {
pr_ZoneFree(oldptr);
- else if (oldptr)
+ }
+ else if (oldptr) {
free(oldptr);
+ }
return rv;
}
@@ -388,8 +397,9 @@ pr_ZoneFree(void *ptr)
size_t blockSize;
PRUint32 wasLocked;
- if (!ptr)
+ if (!ptr) {
return;
+ }
mb = (MemBlockHdr *)((char *)ptr - (sizeof *mb));
@@ -397,7 +407,7 @@ pr_ZoneFree(void *ptr)
/* maybe this came from ordinary malloc */
#ifdef DEBUG
fprintf(stderr,
- "Warning: freeing memory block %p from ordinary malloc\n", ptr);
+ "Warning: freeing memory block %p from ordinary malloc\n", ptr);
#endif
free(ptr);
return;
@@ -419,8 +429,9 @@ pr_ZoneFree(void *ptr)
wasLocked = mz->locked;
pthread_mutex_lock(&mz->lock);
mz->locked = 1;
- if (wasLocked)
+ if (wasLocked) {
mz->contention++;
+ }
mt->s.next = mb->s.next = mz->head; /* put on head of list */
mz->head = mb;
mz->elements++;
@@ -430,32 +441,40 @@ pr_ZoneFree(void *ptr)
PR_IMPLEMENT(void *) PR_Malloc(PRUint32 size)
{
- if (!_pr_initialized) _PR_ImplicitInitialization();
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
return use_zone_allocator ? pr_ZoneMalloc(size) : malloc(size);
}
PR_IMPLEMENT(void *) PR_Calloc(PRUint32 nelem, PRUint32 elsize)
{
- if (!_pr_initialized) _PR_ImplicitInitialization();
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
return use_zone_allocator ?
- pr_ZoneCalloc(nelem, elsize) : calloc(nelem, elsize);
+ pr_ZoneCalloc(nelem, elsize) : calloc(nelem, elsize);
}
PR_IMPLEMENT(void *) PR_Realloc(void *ptr, PRUint32 size)
{
- if (!_pr_initialized) _PR_ImplicitInitialization();
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
return use_zone_allocator ? pr_ZoneRealloc(ptr, size) : realloc(ptr, size);
}
PR_IMPLEMENT(void) PR_Free(void *ptr)
{
- if (use_zone_allocator)
+ if (use_zone_allocator) {
pr_ZoneFree(ptr);
- else
+ }
+ else {
free(ptr);
+ }
}
#else /* !defined(_PR_ZONE_ALLOCATOR) */
@@ -481,7 +500,7 @@ PR_IMPLEMENT(void *) PR_Calloc(PRUint32 nelem, PRUint32 elsize)
{
#if defined (WIN16)
return PR_MD_calloc( (size_t)nelem, (size_t)elsize );
-
+
#else
return calloc(nelem, elsize);
#endif
@@ -519,7 +538,7 @@ PR_IMPLEMENT(void) PR_Free(void *ptr)
** PR_AttachThread has been called (on a native thread that nspr has yet
** to be told about) we could get royally screwed if the lock was busy
** and we tried to context switch the thread away. In this scenario
-** PR_CURRENT_THREAD() == NULL
+** PR_CURRENT_THREAD() == NULL
**
** To avoid this unfortunate case, we use the low level locking
** facilities for malloc protection instead of the slightly higher level
@@ -540,80 +559,61 @@ static PRBool _PR_malloc_initialised = PR_FALSE;
#ifdef _PR_PTHREADS
static pthread_mutex_t _PR_MD_malloc_crustylock;
-#define _PR_Lock_Malloc() { \
- if(PR_TRUE == _PR_malloc_initialised) { \
- PRStatus rv; \
- rv = pthread_mutex_lock(&_PR_MD_malloc_crustylock); \
- PR_ASSERT(0 == rv); \
- }
-
-#define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \
- PRStatus rv; \
- rv = pthread_mutex_unlock(&_PR_MD_malloc_crustylock); \
- PR_ASSERT(0 == rv); \
- } \
- }
+#define _PR_Lock_Malloc() { \
+ if(PR_TRUE == _PR_malloc_initialised) { \
+ PRStatus rv; \
+ rv = pthread_mutex_lock(&_PR_MD_malloc_crustylock); \
+ PR_ASSERT(0 == rv); \
+ }
+
+#define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \
+ PRStatus rv; \
+ rv = pthread_mutex_unlock(&_PR_MD_malloc_crustylock); \
+ PR_ASSERT(0 == rv); \
+ } \
+ }
#else /* _PR_PTHREADS */
static _MDLock _PR_MD_malloc_crustylock;
-#ifdef IRIX
-#define _PR_Lock_Malloc() { \
- PRIntn _is; \
- if(PR_TRUE == _PR_malloc_initialised) { \
- if (_PR_MD_GET_ATTACHED_THREAD() && \
- !_PR_IS_NATIVE_THREAD( \
- _PR_MD_GET_ATTACHED_THREAD())) \
- _PR_INTSOFF(_is); \
- _PR_MD_LOCK(&_PR_MD_malloc_crustylock); \
- }
-
-#define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \
- _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock); \
- if (_PR_MD_GET_ATTACHED_THREAD() && \
- !_PR_IS_NATIVE_THREAD( \
- _PR_MD_GET_ATTACHED_THREAD())) \
- _PR_INTSON(_is); \
- } \
- }
-#else /* IRIX */
-#define _PR_Lock_Malloc() { \
- PRIntn _is; \
- if(PR_TRUE == _PR_malloc_initialised) { \
- if (_PR_MD_CURRENT_THREAD() && \
- !_PR_IS_NATIVE_THREAD( \
- _PR_MD_CURRENT_THREAD())) \
- _PR_INTSOFF(_is); \
- _PR_MD_LOCK(&_PR_MD_malloc_crustylock); \
- }
-
-#define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \
- _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock); \
- if (_PR_MD_CURRENT_THREAD() && \
- !_PR_IS_NATIVE_THREAD( \
- _PR_MD_CURRENT_THREAD())) \
- _PR_INTSON(_is); \
- } \
- }
-#endif /* IRIX */
+#define _PR_Lock_Malloc() { \
+ PRIntn _is; \
+ if(PR_TRUE == _PR_malloc_initialised) { \
+ if (_PR_MD_CURRENT_THREAD() && \
+ !_PR_IS_NATIVE_THREAD( \
+ _PR_MD_CURRENT_THREAD())) \
+ _PR_INTSOFF(_is); \
+ _PR_MD_LOCK(&_PR_MD_malloc_crustylock); \
+ }
+
+#define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \
+ _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock); \
+ if (_PR_MD_CURRENT_THREAD() && \
+ !_PR_IS_NATIVE_THREAD( \
+ _PR_MD_CURRENT_THREAD())) \
+ _PR_INTSON(_is); \
+ } \
+ }
#endif /* _PR_PTHREADS */
PR_IMPLEMENT(PRStatus) _PR_MallocInit(void)
{
PRStatus rv = PR_SUCCESS;
- if( PR_TRUE == _PR_malloc_initialised ) return PR_SUCCESS;
+ if( PR_TRUE == _PR_malloc_initialised ) {
+ return PR_SUCCESS;
+ }
#ifdef _PR_PTHREADS
{
- int status;
- pthread_mutexattr_t mattr;
-
- status = _PT_PTHREAD_MUTEXATTR_INIT(&mattr);
- PR_ASSERT(0 == status);
- status = _PT_PTHREAD_MUTEX_INIT(_PR_MD_malloc_crustylock, mattr);
- PR_ASSERT(0 == status);
- status = _PT_PTHREAD_MUTEXATTR_DESTROY(&mattr);
- PR_ASSERT(0 == status);
+ int status;
+ pthread_mutexattr_t mattr;
+
+ status = _PT_PTHREAD_MUTEXATTR_INIT(&mattr);
+ PR_ASSERT(0 == status);
+ status = _PT_PTHREAD_MUTEX_INIT(_PR_MD_malloc_crustylock, mattr);
+ PR_ASSERT(0 == status);
+ status = _PT_PTHREAD_MUTEXATTR_DESTROY(&mattr);
+ PR_ASSERT(0 == status);
}
#else /* _PR_PTHREADS */
_MD_NEW_LOCK(&_PR_MD_malloc_crustylock);
@@ -636,22 +636,6 @@ void *malloc(size_t size)
return p;
}
-#if defined(IRIX)
-void *memalign(size_t alignment, size_t size)
-{
- void *p;
- _PR_Lock_Malloc();
- p = _PR_UnlockedMemalign(alignment, size);
- _PR_Unlock_Malloc();
- return p;
-}
-
-void *valloc(size_t size)
-{
- return(memalign(sysconf(_SC_PAGESIZE),size));
-}
-#endif /* IRIX */
-
void free(void *ptr)
{
_PR_Lock_Malloc();