diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /nsprpub/pr/src/bthreads | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'nsprpub/pr/src/bthreads')
-rw-r--r-- | nsprpub/pr/src/bthreads/Makefile.in | 31 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/bsrcs.mk | 17 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btcvar.c | 244 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btlocks.c | 91 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btmisc.c | 72 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btmon.c | 201 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btsem.c | 98 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/btthread.c | 662 | ||||
-rw-r--r-- | nsprpub/pr/src/bthreads/objs.mk | 11 |
9 files changed, 1427 insertions, 0 deletions
diff --git a/nsprpub/pr/src/bthreads/Makefile.in b/nsprpub/pr/src/bthreads/Makefile.in new file mode 100644 index 000000000..a85e6db67 --- /dev/null +++ b/nsprpub/pr/src/bthreads/Makefile.in @@ -0,0 +1,31 @@ +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#! gmake + +MOD_DEPTH = ../../.. +topsrcdir = @top_srcdir@ +srcdir = @srcdir@ +VPATH = @srcdir@ + +include $(MOD_DEPTH)/config/autoconf.mk + +include $(topsrcdir)/config/config.mk + +include $(srcdir)/bsrcs.mk +CSRCS += $(BTCSRCS) + +TARGETS = $(OBJS) + +INCLUDES = -I$(dist_includedir) -I$(topsrcdir)/pr/include -I$(topsrcdir)/pr/include/private + +include $(topsrcdir)/config/rules.mk + +DEFINES += -D_NSPR_BUILD_ + +export:: $(TARGETS) + + diff --git a/nsprpub/pr/src/bthreads/bsrcs.mk b/nsprpub/pr/src/bthreads/bsrcs.mk new file mode 100644 index 000000000..70832990c --- /dev/null +++ b/nsprpub/pr/src/bthreads/bsrcs.mk @@ -0,0 +1,17 @@ +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# this file lists the source files to be compiled (used in Makefile) and +# then enumerated as object files (in objs.mk) for inclusion in the NSPR +# shared library + +BTCSRCS = \ + btthread.c \ + btlocks.c \ + btcvar.c \ + btmon.c \ + btsem.c \ + btmisc.c \ + $(NULL) diff --git a/nsprpub/pr/src/bthreads/btcvar.c b/nsprpub/pr/src/bthreads/btcvar.c new file mode 100644 index 000000000..c74837907 --- /dev/null +++ b/nsprpub/pr/src/bthreads/btcvar.c @@ -0,0 +1,244 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <kernel/OS.h> + +#include "primpl.h" + +/* +** Create a new condition variable. +** +** "lock" is the lock used to protect the condition variable. +** +** Condition variables are synchronization objects that threads can use +** to wait for some condition to occur. +** +** This may fail if memory is tight or if some operating system resource +** is low. In such cases, a NULL will be returned. +*/ +PR_IMPLEMENT(PRCondVar*) + PR_NewCondVar (PRLock *lock) +{ + PRCondVar *cv = PR_NEW( PRCondVar ); + PR_ASSERT( NULL != lock ); + if( NULL != cv ) + { + cv->lock = lock; + cv->sem = create_sem(0, "CVSem"); + cv->handshakeSem = create_sem(0, "CVHandshake"); + cv->signalSem = create_sem( 0, "CVSignal"); + cv->signalBenCount = 0; + cv->ns = cv->nw = 0; + PR_ASSERT( cv->sem >= B_NO_ERROR ); + PR_ASSERT( cv->handshakeSem >= B_NO_ERROR ); + PR_ASSERT( cv->signalSem >= B_NO_ERROR ); + } + return cv; +} /* PR_NewCondVar */ + +/* +** Destroy a condition variable. There must be no thread +** waiting on the condvar. The caller is responsible for guaranteeing +** that the condvar is no longer in use. +** +*/ +PR_IMPLEMENT(void) + PR_DestroyCondVar (PRCondVar *cvar) +{ + status_t result = delete_sem( cvar->sem ); + PR_ASSERT( result == B_NO_ERROR ); + + result = delete_sem( cvar->handshakeSem ); + PR_ASSERT( result == B_NO_ERROR ); + + result = delete_sem( cvar->signalSem ); + PR_ASSERT( result == B_NO_ERROR ); + + PR_DELETE( cvar ); +} + +/* +** The thread that waits on a condition is blocked in a "waiting on +** condition" state until another thread notifies the condition or a +** caller specified amount of time expires. The lock associated with +** the condition variable will be released, which must have be held +** prior to the call to wait. +** +** Logically a notified thread is moved from the "waiting on condition" +** state and made "ready." When scheduled, it will attempt to reacquire +** the lock that it held when wait was called. +** +** The timeout has two well known values, PR_INTERVAL_NO_TIMEOUT and +** PR_INTERVAL_NO_WAIT. The former value requires that a condition be +** notified (or the thread interrupted) before it will resume from the +** wait. If the timeout has a value of PR_INTERVAL_NO_WAIT, the effect +** is to release the lock, possibly causing a rescheduling within the +** runtime, then immediately attempting to reacquire the lock and resume. +** +** Any other value for timeout will cause the thread to be rescheduled +** either due to explicit notification or an expired interval. The latter +** must be determined by treating time as one part of the monitored data +** being protected by the lock and tested explicitly for an expired +** interval. +** +** Returns PR_FAILURE if the caller has not locked the lock associated +** with the condition variable or the thread was interrupted (PR_Interrupt()). +** The particular reason can be extracted with PR_GetError(). +*/ +PR_IMPLEMENT(PRStatus) + PR_WaitCondVar (PRCondVar *cvar, PRIntervalTime timeout) +{ + status_t err; + if( timeout == PR_INTERVAL_NO_WAIT ) + { + PR_Unlock( cvar->lock ); + PR_Lock( cvar->lock ); + return PR_SUCCESS; + } + + if( atomic_add( &cvar->signalBenCount, 1 ) > 0 ) + { + if (acquire_sem(cvar->signalSem) == B_INTERRUPTED) + { + atomic_add( &cvar->signalBenCount, -1 ); + return PR_FAILURE; + } + } + cvar->nw += 1; + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + + PR_Unlock( cvar->lock ); + if( timeout==PR_INTERVAL_NO_TIMEOUT ) + { + err = acquire_sem(cvar->sem); + } + else + { + err = acquire_sem_etc(cvar->sem, 1, B_RELATIVE_TIMEOUT, PR_IntervalToMicroseconds(timeout) ); + } + + if( atomic_add( &cvar->signalBenCount, 1 ) > 0 ) + { + while (acquire_sem(cvar->signalSem) == B_INTERRUPTED); + } + + if (cvar->ns > 0) + { + release_sem_etc(cvar->handshakeSem, 1, B_DO_NOT_RESCHEDULE); + cvar->ns -= 1; + } + cvar->nw -= 1; + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + + PR_Lock( cvar->lock ); + if(err!=B_NO_ERROR) + { + return PR_FAILURE; + } + return PR_SUCCESS; +} + +/* +** Notify ONE thread that is currently waiting on 'cvar'. Which thread is +** dependent on the implementation of the runtime. Common sense would dictate +** that all threads waiting on a single condition have identical semantics, +** therefore which one gets notified is not significant. +** +** The calling thead must hold the lock that protects the condition, as +** well as the invariants that are tightly bound to the condition, when +** notify is called. +** +** Returns PR_FAILURE if the caller has not locked the lock associated +** with the condition variable. +*/ +PR_IMPLEMENT(PRStatus) + PR_NotifyCondVar (PRCondVar *cvar) +{ + status_t err ; + if( atomic_add( &cvar->signalBenCount, 1 ) > 0 ) + { + if (acquire_sem(cvar->signalSem) == B_INTERRUPTED) + { + atomic_add( &cvar->signalBenCount, -1 ); + return PR_FAILURE; + } + } + if (cvar->nw > cvar->ns) + { + cvar->ns += 1; + release_sem_etc(cvar->sem, 1, B_DO_NOT_RESCHEDULE); + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + + while (acquire_sem(cvar->handshakeSem) == B_INTERRUPTED) + { + err = B_INTERRUPTED; + } + } + else + { + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + } + return PR_SUCCESS; +} + +/* +** Notify all of the threads waiting on the condition variable. The order +** that the threads are notified is indeterminant. The lock that protects +** the condition must be held. +** +** Returns PR_FAILURE if the caller has not locked the lock associated +** with the condition variable. +*/ +PR_IMPLEMENT(PRStatus) + PR_NotifyAllCondVar (PRCondVar *cvar) +{ + int32 handshakes; + status_t err = B_OK; + + if( atomic_add( &cvar->signalBenCount, 1 ) > 0 ) + { + if (acquire_sem(cvar->signalSem) == B_INTERRUPTED) + { + atomic_add( &cvar->signalBenCount, -1 ); + return PR_FAILURE; + } + } + + if (cvar->nw > cvar->ns) + { + handshakes = cvar->nw - cvar->ns; + cvar->ns = cvar->nw; + release_sem_etc(cvar->sem, handshakes, B_DO_NOT_RESCHEDULE); + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + + while (acquire_sem_etc(cvar->handshakeSem, handshakes, 0, 0) == B_INTERRUPTED) + { + err = B_INTERRUPTED; + } + } + else + { + if( atomic_add( &cvar->signalBenCount, -1 ) > 1 ) + { + release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE); + } + } + return PR_SUCCESS; +} diff --git a/nsprpub/pr/src/bthreads/btlocks.c b/nsprpub/pr/src/bthreads/btlocks.c new file mode 100644 index 000000000..994c09c40 --- /dev/null +++ b/nsprpub/pr/src/bthreads/btlocks.c @@ -0,0 +1,91 @@ +/* -*- Mode: C++; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* +** File: btlocks.c +** Description: Implemenation for thread locks using bthreads +** Exports: prlock.h +*/ + +#include "primpl.h" + +#include <string.h> +#include <sys/time.h> + +void +_PR_InitLocks (void) +{ +} + +PR_IMPLEMENT(PRLock*) + PR_NewLock (void) +{ + PRLock *lock; + status_t semresult; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + lock = PR_NEWZAP(PRLock); + if (lock != NULL) { + + lock->benaphoreCount = 0; + lock->semaphoreID = create_sem( 0, "nsprLockSem" ); + if( lock->semaphoreID < B_NO_ERROR ) { + + PR_DELETE( lock ); + lock = NULL; + } + } + + return lock; +} + +PR_IMPLEMENT(void) + PR_DestroyLock (PRLock* lock) +{ + status_t result; + + PR_ASSERT(NULL != lock); + result = delete_sem(lock->semaphoreID); + PR_ASSERT(result == B_NO_ERROR); + PR_DELETE(lock); +} + +PR_IMPLEMENT(void) + PR_Lock (PRLock* lock) +{ + PR_ASSERT(lock != NULL); + + if( atomic_add( &lock->benaphoreCount, 1 ) > 0 ) { + + if( acquire_sem(lock->semaphoreID ) != B_NO_ERROR ) { + + atomic_add( &lock->benaphoreCount, -1 ); + return; + } + } + + lock->owner = find_thread( NULL ); +} + +PR_IMPLEMENT(PRStatus) + PR_Unlock (PRLock* lock) +{ + PR_ASSERT(lock != NULL); + lock->owner = NULL; + if( atomic_add( &lock->benaphoreCount, -1 ) > 1 ) { + + release_sem_etc( lock->semaphoreID, 1, B_DO_NOT_RESCHEDULE ); + } + + return PR_SUCCESS; +} + +PR_IMPLEMENT(void) + PR_AssertCurrentThreadOwnsLock(PRLock *lock) +{ + PR_ASSERT(lock != NULL); + PR_ASSERT(lock->owner == find_thread( NULL )); +} diff --git a/nsprpub/pr/src/bthreads/btmisc.c b/nsprpub/pr/src/bthreads/btmisc.c new file mode 100644 index 000000000..8d84a6069 --- /dev/null +++ b/nsprpub/pr/src/bthreads/btmisc.c @@ -0,0 +1,72 @@ +/* -*- Mode: C++; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "primpl.h" +#include <stdio.h> + +// void _PR_InitCPUs(void) {PT_LOG("_PR_InitCPUs")} +// void _MD_StartInterrupts(void) {PT_LOG("_MD_StartInterrupts")} + +/* this is a total hack.. */ + +struct protoent* getprotobyname(const char* name) +{ + return 0; +} + +struct protoent* getprotobynumber(int number) +{ + return 0; +} + +/* this is needed by prinit for some reason */ +void +_PR_InitStacks (void) +{ +} + +/* this is needed by prinit for some reason */ +void +_PR_InitTPD (void) +{ +} + +/* +** Create extra virtual processor threads. Generally used with MP systems. +*/ +PR_IMPLEMENT(void) + PR_SetConcurrency (PRUintn numCPUs) +{ +} + +/* +** Set thread recycle mode to on (1) or off (0) +*/ +PR_IMPLEMENT(void) + PR_SetThreadRecycleMode (PRUint32 flag) +{ +} + +/* +** Get context registers, return with error for now. +*/ + +PR_IMPLEMENT(PRWord *) +_MD_HomeGCRegisters( PRThread *t, int isCurrent, int *np ) +{ + return 0; +} + +PR_IMPLEMENT(void *) +PR_GetSP( PRThread *t ) +{ + return 0; +} + +PR_IMPLEMENT(PRStatus) +PR_EnumerateThreads( PREnumerator func, void *arg ) +{ + return PR_FAILURE; +} diff --git a/nsprpub/pr/src/bthreads/btmon.c b/nsprpub/pr/src/bthreads/btmon.c new file mode 100644 index 000000000..bd05e73dc --- /dev/null +++ b/nsprpub/pr/src/bthreads/btmon.c @@ -0,0 +1,201 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <kernel/OS.h> + +#include "primpl.h" + +/* +** Create a new monitor. Monitors are re-entrant locks with a single built-in +** condition variable. +** +** This may fail if memory is tight or if some operating system resource +** is low. +*/ +PR_IMPLEMENT(PRMonitor*) + PR_NewMonitor (void) +{ + PRMonitor *mon; + PRCondVar *cvar; + PRLock *lock; + + mon = PR_NEWZAP( PRMonitor ); + if( mon ) + { + lock = PR_NewLock(); + if( !lock ) + { + PR_DELETE( mon ); + return( 0 ); + } + + cvar = PR_NewCondVar( lock ); + if( !cvar ) + { + PR_DestroyLock( lock ); + PR_DELETE( mon ); + return( 0 ); + } + + mon->cvar = cvar; + mon->name = NULL; + } + + return( mon ); +} + +PR_IMPLEMENT(PRMonitor*) PR_NewNamedMonitor(const char* name) +{ + PRMonitor* mon = PR_NewMonitor(); + if( mon ) + { + mon->name = name; + } + return mon; +} + +/* +** Destroy a monitor. The caller is responsible for guaranteeing that the +** monitor is no longer in use. There must be no thread waiting on the +** monitor's condition variable and that the lock is not held. +** +*/ +PR_IMPLEMENT(void) + PR_DestroyMonitor (PRMonitor *mon) +{ + PR_DestroyLock( mon->cvar->lock ); + PR_DestroyCondVar( mon->cvar ); + PR_DELETE( mon ); +} + +/* +** Enter the lock associated with the monitor. If the calling thread currently +** is in the monitor, the call to enter will silently succeed. In either case, +** it will increment the entry count by one. +*/ +PR_IMPLEMENT(void) + PR_EnterMonitor (PRMonitor *mon) +{ + if( mon->cvar->lock->owner == find_thread( NULL ) ) + { + mon->entryCount++; + + } else + { + PR_Lock( mon->cvar->lock ); + mon->entryCount = 1; + } +} + +/* +** Decrement the entry count associated with the monitor. If the decremented +** entry count is zero, the monitor is exited. Returns PR_FAILURE if the +** calling thread has not entered the monitor. +*/ +PR_IMPLEMENT(PRStatus) + PR_ExitMonitor (PRMonitor *mon) +{ + if( mon->cvar->lock->owner != find_thread( NULL ) ) + { + return( PR_FAILURE ); + } + if( --mon->entryCount == 0 ) + { + return( PR_Unlock( mon->cvar->lock ) ); + } + return( PR_SUCCESS ); +} + +/* +** Wait for a notify on the monitor's condition variable. Sleep for "ticks" +** amount of time (if "ticks" is PR_INTERVAL_NO_TIMEOUT then the sleep is +** indefinite). +** +** While the thread is waiting it exits the monitor (as if it called +** PR_ExitMonitor as many times as it had called PR_EnterMonitor). When +** the wait has finished the thread regains control of the monitors lock +** with the same entry count as before the wait began. +** +** The thread waiting on the monitor will be resumed when the monitor is +** notified (assuming the thread is the next in line to receive the +** notify) or when the "ticks" timeout elapses. +** +** Returns PR_FAILURE if the caller has not entered the monitor. +*/ +PR_IMPLEMENT(PRStatus) + PR_Wait (PRMonitor *mon, PRIntervalTime ticks) +{ + PRUint32 entryCount; + PRUintn status; + PRThread *meThread; + thread_id me = find_thread( NULL ); + meThread = PR_GetCurrentThread(); + + if( mon->cvar->lock->owner != me ) return( PR_FAILURE ); + + entryCount = mon->entryCount; + mon->entryCount = 0; + + status = PR_WaitCondVar( mon->cvar, ticks ); + + mon->entryCount = entryCount; + + return( status ); +} + +/* +** Notify a thread waiting on the monitor's condition variable. If a thread +** is waiting on the condition variable (using PR_Wait) then it is awakened +** and attempts to reenter the monitor. +*/ +PR_IMPLEMENT(PRStatus) + PR_Notify (PRMonitor *mon) +{ + if( mon->cvar->lock->owner != find_thread( NULL ) ) + { + return( PR_FAILURE ); + } + + PR_NotifyCondVar( mon->cvar ); + return( PR_SUCCESS ); +} + +/* +** Notify all of the threads waiting on the monitor's condition variable. +** All of threads waiting on the condition are scheduled to reenter the +** monitor. +*/ +PR_IMPLEMENT(PRStatus) + PR_NotifyAll (PRMonitor *mon) +{ + if( mon->cvar->lock->owner != find_thread( NULL ) ) + { + return( PR_FAILURE ); + } + + PR_NotifyAllCondVar( mon->cvar ); + return( PR_SUCCESS ); +} + +/* +** Return the number of times that the current thread has entered the +** lock. Returns zero if the current thread has not entered the lock. +*/ +PR_IMPLEMENT(PRIntn) + PR_GetMonitorEntryCount(PRMonitor *mon) +{ + return( (mon->cvar->lock->owner == find_thread( NULL )) ? + mon->entryCount : 0 ); +} + +/* +** If the current thread is in |mon|, this assertion is guaranteed to +** succeed. Otherwise, the behavior of this function is undefined. +*/ +PR_IMPLEMENT(void) + PR_AssertCurrentThreadInMonitor(PRMonitor *mon) +{ + PR_ASSERT_CURRENT_THREAD_OWNS_LOCK(mon->cvar->lock); +} diff --git a/nsprpub/pr/src/bthreads/btsem.c b/nsprpub/pr/src/bthreads/btsem.c new file mode 100644 index 000000000..011ee6bd3 --- /dev/null +++ b/nsprpub/pr/src/bthreads/btsem.c @@ -0,0 +1,98 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <kernel/OS.h> + +#include "primpl.h" + +/* +** Create a new semaphore object. +*/ +PR_IMPLEMENT(PRSemaphore*) + PR_NewSem (PRUintn value) +{ + PRSemaphore *semaphore; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + semaphore = PR_NEWZAP(PRSemaphore); + if (NULL != semaphore) { + if ((semaphore->sem = create_sem(value, "nspr_sem")) < B_NO_ERROR) + return NULL; + else + return semaphore; + } + return NULL; +} + +/* +** Destroy the given semaphore object. +** +*/ +PR_IMPLEMENT(void) + PR_DestroySem (PRSemaphore *sem) +{ + status_t result; + + PR_ASSERT(sem != NULL); + result = delete_sem(sem->sem); + PR_ASSERT(result == B_NO_ERROR); + PR_DELETE(sem); +} + +/* +** Wait on a Semaphore. +** +** This routine allows a calling thread to wait or proceed depending upon +** the state of the semahore sem. The thread can proceed only if the +** counter value of the semaphore sem is currently greater than 0. If the +** value of semaphore sem is positive, it is decremented by one and the +** routine returns immediately allowing the calling thread to continue. If +** the value of semaphore sem is 0, the calling thread blocks awaiting the +** semaphore to be released by another thread. +** +** This routine can return PR_PENDING_INTERRUPT if the waiting thread +** has been interrupted. +*/ +PR_IMPLEMENT(PRStatus) + PR_WaitSem (PRSemaphore *sem) +{ + PR_ASSERT(sem != NULL); + if (acquire_sem(sem->sem) == B_NO_ERROR) + return PR_SUCCESS; + else + return PR_FAILURE; +} + +/* +** This routine increments the counter value of the semaphore. If other +** threads are blocked for the semaphore, then the scheduler will +** determine which ONE thread will be unblocked. +*/ +PR_IMPLEMENT(void) + PR_PostSem (PRSemaphore *sem) +{ + status_t result; + + PR_ASSERT(sem != NULL); + result = release_sem_etc(sem->sem, 1, B_DO_NOT_RESCHEDULE); + PR_ASSERT(result == B_NO_ERROR); +} + +/* +** Returns the value of the semaphore referenced by sem without affecting +** the state of the semaphore. The value represents the semaphore value +** at the time of the call, but may not be the actual value when the +** caller inspects it. +*/ +PR_IMPLEMENT(PRUintn) + PR_GetValueSem (PRSemaphore *sem) +{ + sem_info info; + + PR_ASSERT(sem != NULL); + get_sem_info(sem->sem, &info); + return info.count; +} diff --git a/nsprpub/pr/src/bthreads/btthread.c b/nsprpub/pr/src/bthreads/btthread.c new file mode 100644 index 000000000..c2a1cd82b --- /dev/null +++ b/nsprpub/pr/src/bthreads/btthread.c @@ -0,0 +1,662 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <kernel/OS.h> +#include <support/TLS.h> + +#include "prlog.h" +#include "primpl.h" +#include "prcvar.h" +#include "prpdce.h" + +#include <stdlib.h> +#include <string.h> +#include <signal.h> + +/* values for PRThread.state */ +#define BT_THREAD_PRIMORD 0x01 /* this is the primordial thread */ +#define BT_THREAD_SYSTEM 0x02 /* this is a system thread */ +#define BT_THREAD_JOINABLE 0x04 /* this is a joinable thread */ + +struct _BT_Bookeeping +{ + PRLock *ml; /* a lock to protect ourselves */ + sem_id cleanUpSem; /* the primoridal thread will block on this + sem while waiting for the user threads */ + PRInt32 threadCount; /* user thred count */ + +} bt_book = { NULL, B_ERROR, 0 }; + + +#define BT_TPD_LIMIT 128 /* number of TPD slots we'll provide (arbitrary) */ + +/* these will be used to map an index returned by PR_NewThreadPrivateIndex() + to the corresponding beos native TLS slot number, and to the destructor + for that slot - note that, because it is allocated globally, this data + will be automatically zeroed for us when the program begins */ +static int32 tpd_beosTLSSlots[BT_TPD_LIMIT]; +static PRThreadPrivateDTOR tpd_dtors[BT_TPD_LIMIT]; + +static vint32 tpd_slotsUsed=0; /* number of currently-allocated TPD slots */ +static int32 tls_prThreadSlot; /* TLS slot in which PRThread will be stored */ + +/* this mutex will be used to synchronize access to every + PRThread.md.joinSem and PRThread.md.is_joining (we could + actually allocate one per thread, but that seems a bit excessive, + especially considering that there will probably be little + contention, PR_JoinThread() is allowed to block anyway, and the code + protected by the mutex is short/fast) */ +static PRLock *joinSemLock; + +static PRUint32 _bt_MapNSPRToNativePriority( PRThreadPriority priority ); +static PRThreadPriority _bt_MapNativeToNSPRPriority( PRUint32 priority ); +static void _bt_CleanupThread(void *arg); +static PRThread *_bt_AttachThread(); + +void +_PR_InitThreads (PRThreadType type, PRThreadPriority priority, + PRUintn maxPTDs) +{ + PRThread *primordialThread; + PRUint32 beThreadPriority; + + /* allocate joinSem mutex */ + joinSemLock = PR_NewLock(); + if (joinSemLock == NULL) + { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return; + } + + /* + ** Create and initialize NSPR structure for our primordial thread. + */ + + primordialThread = PR_NEWZAP(PRThread); + if( NULL == primordialThread ) + { + PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 ); + return; + } + + primordialThread->md.joinSem = B_ERROR; + + /* + ** Set the priority to the desired level. + */ + + beThreadPriority = _bt_MapNSPRToNativePriority( priority ); + + set_thread_priority( find_thread( NULL ), beThreadPriority ); + + primordialThread->priority = priority; + + + /* set the thread's state - note that the thread is not joinable */ + primordialThread->state |= BT_THREAD_PRIMORD; + if (type == PR_SYSTEM_THREAD) + primordialThread->state |= BT_THREAD_SYSTEM; + + /* + ** Allocate a TLS slot for the PRThread structure (just using + ** native TLS, as opposed to NSPR TPD, will make PR_GetCurrentThread() + ** somewhat faster, and will leave one more TPD slot for our client) + */ + + tls_prThreadSlot = tls_allocate(); + + /* + ** Stuff our new PRThread structure into our thread specific + ** slot. + */ + + tls_set(tls_prThreadSlot, primordialThread); + + /* allocate lock for bt_book */ + bt_book.ml = PR_NewLock(); + if( NULL == bt_book.ml ) + { + PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 ); + return; + } +} + +PRUint32 +_bt_MapNSPRToNativePriority( PRThreadPriority priority ) + { + switch( priority ) + { + case PR_PRIORITY_LOW: return( B_LOW_PRIORITY ); + case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY ); + case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY ); + case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY ); + default: return( B_NORMAL_PRIORITY ); + } +} + +PRThreadPriority +_bt_MapNativeToNSPRPriority(PRUint32 priority) + { + if (priority < B_NORMAL_PRIORITY) + return PR_PRIORITY_LOW; + if (priority < B_DISPLAY_PRIORITY) + return PR_PRIORITY_NORMAL; + if (priority < B_URGENT_DISPLAY_PRIORITY) + return PR_PRIORITY_HIGH; + return PR_PRIORITY_URGENT; +} + +PRUint32 +_bt_mapNativeToNSPRPriority( int32 priority ) +{ + switch( priority ) + { + case PR_PRIORITY_LOW: return( B_LOW_PRIORITY ); + case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY ); + case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY ); + case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY ); + default: return( B_NORMAL_PRIORITY ); + } +} + +/* This method is called by all NSPR threads as they exit */ +void _bt_CleanupThread(void *arg) +{ + PRThread *me = PR_GetCurrentThread(); + int32 i; + + /* first, clean up all thread-private data */ + for (i = 0; i < tpd_slotsUsed; i++) + { + void *oldValue = tls_get(tpd_beosTLSSlots[i]); + if ( oldValue != NULL && tpd_dtors[i] != NULL ) + (*tpd_dtors[i])(oldValue); + } + + /* if this thread is joinable, wait for someone to join it */ + if (me->state & BT_THREAD_JOINABLE) + { + /* protect access to our joinSem */ + PR_Lock(joinSemLock); + + if (me->md.is_joining) + { + /* someone is already waiting to join us (they've + allocated a joinSem for us) - let them know we're + ready */ + delete_sem(me->md.joinSem); + + PR_Unlock(joinSemLock); + + } + else + { + /* noone is currently waiting for our demise - it + is our responsibility to allocate the joinSem + and block on it */ + me->md.joinSem = create_sem(0, "join sem"); + + /* we're done accessing our joinSem */ + PR_Unlock(joinSemLock); + + /* wait for someone to join us */ + while (acquire_sem(me->md.joinSem) == B_INTERRUPTED); + } + } + + /* if this is a user thread, we must update our books */ + if ((me->state & BT_THREAD_SYSTEM) == 0) + { + /* synchronize access to bt_book */ + PR_Lock( bt_book.ml ); + + /* decrement the number of currently-alive user threads */ + bt_book.threadCount--; + + if (bt_book.threadCount == 0 && bt_book.cleanUpSem != B_ERROR) { + /* we are the last user thread, and the primordial thread is + blocked in PR_Cleanup() waiting for us to finish - notify + it */ + delete_sem(bt_book.cleanUpSem); + } + + PR_Unlock( bt_book.ml ); + } + + /* finally, delete this thread's PRThread */ + PR_DELETE(me); +} + +/** + * This is a wrapper that all threads invoke that allows us to set some + * things up prior to a thread's invocation and clean up after a thread has + * exited. + */ +static void* +_bt_root (void* arg) + { + PRThread *thred = (PRThread*)arg; + PRIntn rv; + void *privData; + status_t result; + int i; + + /* save our PRThread object into our TLS */ + tls_set(tls_prThreadSlot, thred); + + thred->startFunc(thred->arg); /* run the dang thing */ + + /* clean up */ + _bt_CleanupThread(NULL); + + return 0; +} + +PR_IMPLEMENT(PRThread*) + PR_CreateThread (PRThreadType type, void (*start)(void* arg), void* arg, + PRThreadPriority priority, PRThreadScope scope, + PRThreadState state, PRUint32 stackSize) +{ + PRUint32 bePriority; + + PRThread* thred; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + thred = PR_NEWZAP(PRThread); + if (thred == NULL) + { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return NULL; + } + + thred->md.joinSem = B_ERROR; + + thred->arg = arg; + thred->startFunc = start; + thred->priority = priority; + + if( state == PR_JOINABLE_THREAD ) + { + thred->state |= BT_THREAD_JOINABLE; + } + + /* keep some books */ + + PR_Lock( bt_book.ml ); + + if (type == PR_USER_THREAD) + { + bt_book.threadCount++; + } + + PR_Unlock( bt_book.ml ); + + bePriority = _bt_MapNSPRToNativePriority( priority ); + + thred->md.tid = spawn_thread((thread_func)_bt_root, "moz-thread", + bePriority, thred); + if (thred->md.tid < B_OK) { + PR_SetError(PR_UNKNOWN_ERROR, thred->md.tid); + PR_DELETE(thred); + return NULL; + } + + if (resume_thread(thred->md.tid) < B_OK) { + PR_SetError(PR_UNKNOWN_ERROR, 0); + PR_DELETE(thred); + return NULL; + } + + return thred; + } + +PR_IMPLEMENT(PRThread*) + PR_AttachThread(PRThreadType type, PRThreadPriority priority, + PRThreadStack *stack) +{ + /* PR_GetCurrentThread() will attach a thread if necessary */ + return PR_GetCurrentThread(); +} + +PR_IMPLEMENT(void) + PR_DetachThread() +{ + /* we don't support detaching */ +} + +PR_IMPLEMENT(PRStatus) + PR_JoinThread (PRThread* thred) +{ + status_t eval, status; + + PR_ASSERT(thred != NULL); + + if ((thred->state & BT_THREAD_JOINABLE) == 0) + { + PR_SetError( PR_INVALID_ARGUMENT_ERROR, 0 ); + return( PR_FAILURE ); + } + + /* synchronize access to the thread's joinSem */ + PR_Lock(joinSemLock); + + if (thred->md.is_joining) + { + /* another thread is already waiting to join the specified + thread - we must fail */ + PR_Unlock(joinSemLock); + return PR_FAILURE; + } + + /* let others know we are waiting to join */ + thred->md.is_joining = PR_TRUE; + + if (thred->md.joinSem == B_ERROR) + { + /* the thread hasn't finished yet - it is our responsibility to + allocate a joinSem and wait on it */ + thred->md.joinSem = create_sem(0, "join sem"); + + /* we're done changing the joinSem now */ + PR_Unlock(joinSemLock); + + /* wait for the thread to finish */ + while (acquire_sem(thred->md.joinSem) == B_INTERRUPTED); + + } + else + { + /* the thread has already finished, and has allocated the + joinSem itself - let it know it can finally die */ + delete_sem(thred->md.joinSem); + + PR_Unlock(joinSemLock); + } + + /* make sure the thread is dead */ + wait_for_thread(thred->md.tid, &eval); + + return PR_SUCCESS; +} + +PR_IMPLEMENT(PRThread*) + PR_GetCurrentThread () +{ + PRThread* thred; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + thred = (PRThread *)tls_get( tls_prThreadSlot); + if (thred == NULL) + { + /* this thread doesn't have a PRThread structure (it must be + a native thread not created by the NSPR) - assimilate it */ + thred = _bt_AttachThread(); + } + PR_ASSERT(NULL != thred); + + return thred; +} + +PR_IMPLEMENT(PRThreadScope) + PR_GetThreadScope (const PRThread* thred) +{ + PR_ASSERT(thred != NULL); + return PR_GLOBAL_THREAD; +} + +PR_IMPLEMENT(PRThreadType) + PR_GetThreadType (const PRThread* thred) +{ + PR_ASSERT(thred != NULL); + return (thred->state & BT_THREAD_SYSTEM) ? + PR_SYSTEM_THREAD : PR_USER_THREAD; +} + +PR_IMPLEMENT(PRThreadState) + PR_GetThreadState (const PRThread* thred) +{ + PR_ASSERT(thred != NULL); + return (thred->state & BT_THREAD_JOINABLE)? + PR_JOINABLE_THREAD: PR_UNJOINABLE_THREAD; +} + +PR_IMPLEMENT(PRThreadPriority) + PR_GetThreadPriority (const PRThread* thred) +{ + PR_ASSERT(thred != NULL); + return thred->priority; +} /* PR_GetThreadPriority */ + +PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thred, + PRThreadPriority newPri) +{ + PRUint32 bePriority; + + PR_ASSERT( thred != NULL ); + + thred->priority = newPri; + bePriority = _bt_MapNSPRToNativePriority( newPri ); + set_thread_priority( thred->md.tid, bePriority ); +} + +PR_IMPLEMENT(PRStatus) + PR_NewThreadPrivateIndex (PRUintn* newIndex, + PRThreadPrivateDTOR destructor) +{ + int32 index; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + /* reserve the next available tpd slot */ + index = atomic_add( &tpd_slotsUsed, 1 ); + if (index >= BT_TPD_LIMIT) + { + /* no slots left - decrement value, then fail */ + atomic_add( &tpd_slotsUsed, -1 ); + PR_SetError( PR_TPD_RANGE_ERROR, 0 ); + return( PR_FAILURE ); + } + + /* allocate a beos-native TLS slot for this index (the new slot + automatically contains NULL) */ + tpd_beosTLSSlots[index] = tls_allocate(); + + /* remember the destructor */ + tpd_dtors[index] = destructor; + + *newIndex = (PRUintn)index; + + return( PR_SUCCESS ); +} + +PR_IMPLEMENT(PRStatus) + PR_SetThreadPrivate (PRUintn index, void* priv) +{ + void *oldValue; + + /* + ** Sanity checking + */ + + if(index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT) + { + PR_SetError( PR_TPD_RANGE_ERROR, 0 ); + return( PR_FAILURE ); + } + + /* if the old value isn't NULL, and the dtor for this slot isn't + NULL, we must destroy the data */ + oldValue = tls_get(tpd_beosTLSSlots[index]); + if (oldValue != NULL && tpd_dtors[index] != NULL) + (*tpd_dtors[index])(oldValue); + + /* save new value */ + tls_set(tpd_beosTLSSlots[index], priv); + + return( PR_SUCCESS ); + } + +PR_IMPLEMENT(void*) + PR_GetThreadPrivate (PRUintn index) +{ + /* make sure the index is valid */ + if (index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT) + { + PR_SetError( PR_TPD_RANGE_ERROR, 0 ); + return NULL; + } + + /* return the value */ + return tls_get( tpd_beosTLSSlots[index] ); + } + + +PR_IMPLEMENT(PRStatus) + PR_Interrupt (PRThread* thred) +{ + PRIntn rv; + + PR_ASSERT(thred != NULL); + + /* + ** there seems to be a bug in beos R5 in which calling + ** resume_thread() on a blocked thread returns B_OK instead + ** of B_BAD_THREAD_STATE (beos bug #20000422-19095). as such, + ** to interrupt a thread, we will simply suspend then resume it + ** (no longer call resume_thread(), check for B_BAD_THREAD_STATE, + ** the suspend/resume to wake up a blocked thread). this wakes + ** up blocked threads properly, and doesn't hurt unblocked threads + ** (they simply get stopped then re-started immediately) + */ + + rv = suspend_thread( thred->md.tid ); + if( rv != B_NO_ERROR ) + { + /* this doesn't appear to be a valid thread_id */ + PR_SetError( PR_UNKNOWN_ERROR, rv ); + return PR_FAILURE; + } + + rv = resume_thread( thred->md.tid ); + if( rv != B_NO_ERROR ) + { + PR_SetError( PR_UNKNOWN_ERROR, rv ); + return PR_FAILURE; + } + + return PR_SUCCESS; +} + +PR_IMPLEMENT(void) + PR_ClearInterrupt () +{ +} + +PR_IMPLEMENT(PRStatus) + PR_Yield () +{ + /* we just sleep for long enough to cause a reschedule (100 + microseconds) */ + snooze(100); +} + +#define BT_MILLION 1000000UL + +PR_IMPLEMENT(PRStatus) + PR_Sleep (PRIntervalTime ticks) +{ + bigtime_t tps; + status_t status; + + if (!_pr_initialized) _PR_ImplicitInitialization(); + + tps = PR_IntervalToMicroseconds( ticks ); + + status = snooze(tps); + if (status == B_NO_ERROR) return PR_SUCCESS; + + PR_SetError(PR_NOT_IMPLEMENTED_ERROR, status); + return PR_FAILURE; +} + +PR_IMPLEMENT(PRStatus) + PR_Cleanup () +{ + PRThread *me = PR_GetCurrentThread(); + + PR_ASSERT(me->state & BT_THREAD_PRIMORD); + if ((me->state & BT_THREAD_PRIMORD) == 0) { + return PR_FAILURE; + } + + PR_Lock( bt_book.ml ); + + if (bt_book.threadCount != 0) + { + /* we'll have to wait for some threads to finish - create a + sem to block on */ + bt_book.cleanUpSem = create_sem(0, "cleanup sem"); + } + + PR_Unlock( bt_book.ml ); + + /* note that, if all the user threads were already dead, we + wouldn't have created a sem above, so this acquire_sem() + will fail immediately */ + while (acquire_sem(bt_book.cleanUpSem) == B_INTERRUPTED); + + return PR_SUCCESS; +} + +PR_IMPLEMENT(void) + PR_ProcessExit (PRIntn status) +{ + exit(status); +} + +PRThread *_bt_AttachThread() +{ + PRThread *thread; + thread_info tInfo; + + /* make sure this thread doesn't already have a PRThread structure */ + PR_ASSERT(tls_get(tls_prThreadSlot) == NULL); + + /* allocate a PRThread structure for this thread */ + thread = PR_NEWZAP(PRThread); + if (thread == NULL) + { + PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); + return NULL; + } + + /* get the native thread's current state */ + get_thread_info(find_thread(NULL), &tInfo); + + /* initialize new PRThread */ + thread->md.tid = tInfo.thread; + thread->md.joinSem = B_ERROR; + thread->priority = _bt_MapNativeToNSPRPriority(tInfo.priority); + + /* attached threads are always non-joinable user threads */ + thread->state = 0; + + /* increment user thread count */ + PR_Lock(bt_book.ml); + bt_book.threadCount++; + PR_Unlock(bt_book.ml); + + /* store this thread's PRThread */ + tls_set(tls_prThreadSlot, thread); + + /* the thread must call _bt_CleanupThread() before it dies, in order + to clean up its PRThread, synchronize with the primordial thread, + etc. */ + on_exit_thread(_bt_CleanupThread, NULL); + + return thread; +} diff --git a/nsprpub/pr/src/bthreads/objs.mk b/nsprpub/pr/src/bthreads/objs.mk new file mode 100644 index 000000000..b273ba46e --- /dev/null +++ b/nsprpub/pr/src/bthreads/objs.mk @@ -0,0 +1,11 @@ +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# This makefile appends to the variable OBJS the bthread object modules +# that will be part of the nspr20 library. + +include $(srcdir)/bthreads/bsrcs.mk + +OBJS += $(BTCSRCS:%.c=bthreads/$(OBJDIR)/%.$(OBJ_SUFFIX)) |