summaryrefslogtreecommitdiffstats
path: root/nsprpub/pr/src/md/unix
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /nsprpub/pr/src/md/unix
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'nsprpub/pr/src/md/unix')
-rw-r--r--nsprpub/pr/src/md/unix/Makefile.in99
-rw-r--r--nsprpub/pr/src/md/unix/aix.c301
-rw-r--r--nsprpub/pr/src/md/unix/aixwrap.c33
-rw-r--r--nsprpub/pr/src/md/unix/bsdi.c87
-rw-r--r--nsprpub/pr/src/md/unix/darwin.c113
-rw-r--r--nsprpub/pr/src/md/unix/dgux.c77
-rw-r--r--nsprpub/pr/src/md/unix/freebsd.c87
-rw-r--r--nsprpub/pr/src/md/unix/hpux.c229
-rw-r--r--nsprpub/pr/src/md/unix/irix.c1648
-rw-r--r--nsprpub/pr/src/md/unix/linux.c91
-rw-r--r--nsprpub/pr/src/md/unix/netbsd.c89
-rw-r--r--nsprpub/pr/src/md/unix/nto.c34
-rw-r--r--nsprpub/pr/src/md/unix/objs.mk31
-rw-r--r--nsprpub/pr/src/md/unix/openbsd.c89
-rw-r--r--nsprpub/pr/src/md/unix/os_AIX.s91
-rw-r--r--nsprpub/pr/src/md/unix/os_BSD_386_2.s42
-rw-r--r--nsprpub/pr/src/md/unix/os_Darwin.s13
-rw-r--r--nsprpub/pr/src/md/unix/os_Darwin_ppc.s68
-rw-r--r--nsprpub/pr/src/md/unix/os_Darwin_x86.s80
-rw-r--r--nsprpub/pr/src/md/unix/os_Darwin_x86_64.s67
-rw-r--r--nsprpub/pr/src/md/unix/os_HPUX.s25
-rw-r--r--nsprpub/pr/src/md/unix/os_HPUX_ia64.s80
-rw-r--r--nsprpub/pr/src/md/unix/os_Irix.s134
-rw-r--r--nsprpub/pr/src/md/unix/os_Linux_ia64.s71
-rw-r--r--nsprpub/pr/src/md/unix/os_Linux_ppc.s75
-rw-r--r--nsprpub/pr/src/md/unix/os_Linux_x86.s85
-rw-r--r--nsprpub/pr/src/md/unix/os_Linux_x86_64.s74
-rw-r--r--nsprpub/pr/src/md/unix/os_SunOS_sparcv9.s173
-rw-r--r--nsprpub/pr/src/md/unix/os_SunOS_ultrasparc.s173
-rw-r--r--nsprpub/pr/src/md/unix/os_SunOS_x86.s126
-rw-r--r--nsprpub/pr/src/md/unix/os_SunOS_x86_64.s63
-rw-r--r--nsprpub/pr/src/md/unix/osf1.c75
-rw-r--r--nsprpub/pr/src/md/unix/pthreads_user.c448
-rw-r--r--nsprpub/pr/src/md/unix/qnx.c70
-rw-r--r--nsprpub/pr/src/md/unix/riscos.c88
-rw-r--r--nsprpub/pr/src/md/unix/scoos.c149
-rw-r--r--nsprpub/pr/src/md/unix/solaris.c161
-rw-r--r--nsprpub/pr/src/md/unix/symbian.c16
-rw-r--r--nsprpub/pr/src/md/unix/unix.c3787
-rw-r--r--nsprpub/pr/src/md/unix/unix_errors.c829
-rw-r--r--nsprpub/pr/src/md/unix/unixware.c551
-rw-r--r--nsprpub/pr/src/md/unix/uxpoll.c676
-rw-r--r--nsprpub/pr/src/md/unix/uxproces.c885
-rw-r--r--nsprpub/pr/src/md/unix/uxrng.c252
-rw-r--r--nsprpub/pr/src/md/unix/uxshm.c643
-rw-r--r--nsprpub/pr/src/md/unix/uxwrap.c513
46 files changed, 13591 insertions, 0 deletions
diff --git a/nsprpub/pr/src/md/unix/Makefile.in b/nsprpub/pr/src/md/unix/Makefile.in
new file mode 100644
index 000000000..f241840f8
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/Makefile.in
@@ -0,0 +1,99 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+MOD_DEPTH = ../../../..
+topsrcdir = @top_srcdir@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+include $(MOD_DEPTH)/config/autoconf.mk
+
+include $(topsrcdir)/config/config.mk
+
+CSRCS = \
+ unix.c \
+ unix_errors.c \
+ uxproces.c \
+ uxrng.c \
+ uxshm.c \
+ uxwrap.c \
+ $(NULL)
+
+ifneq ($(USE_PTHREADS),1)
+CSRCS += uxpoll.c
+endif
+
+ifeq ($(PTHREADS_USER),1)
+CSRCS += pthreads_user.c
+endif
+
+CSRCS += $(PR_MD_CSRCS)
+ASFILES += $(PR_MD_ASFILES)
+
+TARGETS = $(OBJS)
+
+ifeq ($(OS_ARCH),SunOS)
+ ifeq ($(CPU_ARCH),sparc)
+ ifdef USE_64
+ ULTRASPARC_ASFILES = os_SunOS_sparcv9.s
+ ULTRASPARC_ASOBJS = $(addprefix $(OBJDIR)/,$(ULTRASPARC_ASFILES:.s=.$(OBJ_SUFFIX)))
+ else
+ LIBRARY_NAME = $(ULTRASPARC_LIBRARY)
+ LIBRARY_VERSION = $(MOD_MAJOR_VERSION)
+ ULTRASPARC_ASFILES = os_SunOS_ultrasparc.s
+ ULTRASPARC_ASOBJS = $(addprefix $(OBJDIR)/,$(ULTRASPARC_ASFILES:.s=.$(OBJ_SUFFIX)))
+ TARGETS += $(ULTRASPARC_ASOBJS) $(SHARED_LIBRARY)
+ RELEASE_LIBS = $(SHARED_LIBRARY)
+ RELEASE_LIBS_DEST = $(RELEASE_LIB_DIR)/cpu/sparcv8plus
+ lib_subdir = cpu/sparcv8plus
+ endif
+ endif
+endif
+
+INCLUDES = -I$(dist_includedir) -I$(topsrcdir)/pr/include -I$(topsrcdir)/pr/include/private
+
+DEFINES += -D_NSPR_BUILD_
+
+include $(topsrcdir)/config/rules.mk
+
+export:: $(TARGETS)
+
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(CPU_ARCH),sparc)
+
+ifdef USE_64
+$(ULTRASPARC_ASOBJS): $(ULTRASPARC_ASFILES)
+ /usr/ccs/bin/as -o $@ -K PIC -P -D_ASM -D__STDC__=0 -xarch=v9 $<
+else
+$(SHARED_LIBRARY): $(ULTRASPARC_ASOBJS)
+ $(LD) -G -z text -z endfiltee -o $@ $(ULTRASPARC_ASOBJS)
+ $(INSTALL) -m 444 $@ $(dist_libdir)/cpu/sparcv8plus
+ $(INSTALL) -m 444 $@ $(dist_bindir)/cpu/sparcv8plus
+#
+# The -f $(ORIGIN)/... linker flag uses the real file, after symbolic links
+# are resolved, as the origin. If NSDISTMODE is not "copy", libnspr4.so
+# will be installed as a symbolic link in $(dist_libdir), pointing to the
+# real libnspr4.so file in pr/src. Therefore we need to install an
+# additional copy of libnspr_flt4.so in pr/src/cpu/sparcv8plus.
+#
+ifneq ($(NSDISTMODE),copy)
+ $(INSTALL) -m 444 $@ ../../cpu/sparcv8plus
+endif
+
+ifneq ($(NSDISTMODE),copy)
+clobber realclean clobber_all distclean::
+ rm -rf ../../cpu
+endif
+
+$(ULTRASPARC_ASOBJS): $(ULTRASPARC_ASFILES)
+ /usr/ccs/bin/as -o $@ -K PIC -P -D_ASM -D__STDC__=0 -xarch=v8plus $<
+
+clean::
+ rm -rf $(ULTRASPARC_ASOBJS)
+endif
+
+endif
+endif
diff --git a/nsprpub/pr/src/md/unix/aix.c b/nsprpub/pr/src/md/unix/aix.c
new file mode 100644
index 000000000..43f0d58de
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/aix.c
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#ifdef AIX_HAVE_ATOMIC_OP_H
+#include <sys/atomic_op.h>
+
+PRInt32 _AIX_AtomicSet(PRInt32 *val, PRInt32 newval)
+{
+ PRIntn oldval;
+ boolean_t stored;
+ oldval = fetch_and_add((atomic_p)val, 0);
+ do
+ {
+ stored = compare_and_swap((atomic_p)val, &oldval, newval);
+ } while (!stored);
+ return oldval;
+} /* _AIX_AtomicSet */
+#endif /* AIX_HAVE_ATOMIC_OP_H */
+
+#if defined(AIX_TIMERS)
+
+#include <sys/time.h>
+
+static PRUint32 _aix_baseline_epoch;
+
+static void _MD_AixIntervalInit(void)
+{
+ timebasestruct_t real_time;
+ read_real_time(&real_time, TIMEBASE_SZ);
+ (void)time_base_to_time(&real_time, TIMEBASE_SZ);
+ _aix_baseline_epoch = real_time.tb_high;
+} /* _MD_AixIntervalInit */
+
+PRIntervalTime _MD_AixGetInterval(void)
+{
+ PRIntn rv;
+ PRUint64 temp;
+ timebasestruct_t real_time;
+ read_real_time(&real_time, TIMEBASE_SZ);
+ (void)time_base_to_time(&real_time, TIMEBASE_SZ);
+ /* tb_high is in seconds, tb_low in 10(-9)seconds */
+ temp = 1000000000ULL * (PRUint64)(real_time.tb_high - _aix_baseline_epoch);
+ temp += (PRUint64)real_time.tb_low; /* everything's 10(-9) seconds */
+ temp >>= 16; /* now it's something way different */
+ return (PRIntervalTime)temp;
+} /* _MD_AixGetInterval */
+
+PRIntervalTime _MD_AixIntervalPerSec(void)
+{
+ return 1000000000ULL >> 16; /* that's 15258, I think */
+} /* _MD_AixIntervalPerSec */
+
+#endif /* defined(AIX_TIMERS) */
+
+#if !defined(PTHREADS_USER)
+
+#if defined(_PR_PTHREADS)
+
+/*
+ * AIX 4.3 has sched_yield(). AIX 4.2 has pthread_yield().
+ * So we look up the appropriate function pointer at run time.
+ */
+
+#include <dlfcn.h>
+
+int (*_PT_aix_yield_fcn)() = NULL;
+int _pr_aix_send_file_use_disabled = 0;
+
+void _MD_EarlyInit(void)
+{
+ void *main_app_handle;
+ char *evp;
+
+ main_app_handle = dlopen(NULL, RTLD_NOW);
+ PR_ASSERT(NULL != main_app_handle);
+
+ _PT_aix_yield_fcn = (int(*)())dlsym(main_app_handle, "sched_yield");
+ if (!_PT_aix_yield_fcn) {
+ _PT_aix_yield_fcn = (int(*)())dlsym(main_app_handle,"pthread_yield");
+ PR_ASSERT(NULL != _PT_aix_yield_fcn);
+ }
+ dlclose(main_app_handle);
+
+ if (evp = getenv("NSPR_AIX_SEND_FILE_USE_DISABLED")) {
+ if (1 == atoi(evp))
+ _pr_aix_send_file_use_disabled = 1;
+ }
+
+#if defined(AIX_TIMERS)
+ _MD_AixIntervalInit();
+#endif
+}
+
+#else /* _PR_PTHREADS */
+
+void _MD_EarlyInit(void)
+{
+#if defined(AIX_TIMERS)
+ _MD_AixIntervalInit();
+#endif
+}
+
+#endif /* _PR_PTHREADS */
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+PR_IMPLEMENT(void)
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for AIX */
+PR_IMPLEMENT(void)
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for AIX.");
+}
+#endif /* _PR_PTHREADS */
+#endif /* PTHREADS_USER */
+
+/*
+ * NSPR 2.0 overrides the system select() and poll() functions.
+ * On AIX 4.2, we use dlopen("/unix", RTLD_NOW) and dlsym() to get
+ * at the original system select() and poll() functions.
+ */
+
+#if !defined(AIX_RENAME_SELECT)
+
+#include <sys/select.h>
+#include <sys/poll.h>
+#include <dlfcn.h>
+
+static int (*aix_select_fcn)() = NULL;
+static int (*aix_poll_fcn)() = NULL;
+
+int _MD_SELECT(int width, fd_set *r, fd_set *w, fd_set *e, struct timeval *t)
+{
+ int rv;
+
+ if (!aix_select_fcn) {
+ void *aix_handle;
+
+ aix_handle = dlopen("/unix", RTLD_NOW);
+ if (!aix_handle) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ aix_select_fcn = (int(*)())dlsym(aix_handle,"select");
+ dlclose(aix_handle);
+ if (!aix_select_fcn) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ rv = (*aix_select_fcn)(width, r, w, e, t);
+ return rv;
+}
+
+int _MD_POLL(void *listptr, unsigned long nfds, long timeout)
+{
+ int rv;
+
+ if (!aix_poll_fcn) {
+ void *aix_handle;
+
+ aix_handle = dlopen("/unix", RTLD_NOW);
+ if (!aix_handle) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ aix_poll_fcn = (int(*)())dlsym(aix_handle,"poll");
+ dlclose(aix_handle);
+ if (!aix_poll_fcn) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ rv = (*aix_poll_fcn)(listptr, nfds, timeout);
+ return rv;
+}
+
+#else
+
+/*
+ * In AIX versions prior to 4.2, we use the two-step rename/link trick.
+ * The binary must contain at least one "poll" symbol for linker's rename
+ * to work. So we must have this dummy function that references poll().
+ */
+#include <sys/poll.h>
+void _pr_aix_dummy()
+{
+ poll(0,0,0);
+}
+
+#endif /* !defined(AIX_RENAME_SELECT) */
+
+#ifdef _PR_HAVE_ATOMIC_CAS
+
+#include "pratom.h"
+
+#define _PR_AIX_ATOMIC_LOCK -1
+
+PR_IMPLEMENT(void)
+PR_StackPush(PRStack *stack, PRStackElem *stack_elem)
+{
+PRStackElem *addr;
+boolean_t locked = TRUE;
+
+ /* Is it safe to cast a pointer to an int? */
+ PR_ASSERT(sizeof(int) == sizeof(PRStackElem *));
+ do {
+ while ((addr = stack->prstk_head.prstk_elem_next) ==
+ (PRStackElem *)_PR_AIX_ATOMIC_LOCK)
+ ;
+ locked = _check_lock((atomic_p) &stack->prstk_head.prstk_elem_next,
+ (int) addr, _PR_AIX_ATOMIC_LOCK);
+ } while (locked == TRUE);
+ stack_elem->prstk_elem_next = addr;
+ _clear_lock((atomic_p)&stack->prstk_head.prstk_elem_next, (int)stack_elem);
+ return;
+}
+
+PR_IMPLEMENT(PRStackElem *)
+PR_StackPop(PRStack *stack)
+{
+PRStackElem *element;
+boolean_t locked = TRUE;
+
+ /* Is it safe to cast a pointer to an int? */
+ PR_ASSERT(sizeof(int) == sizeof(PRStackElem *));
+ do {
+ while ((element = stack->prstk_head.prstk_elem_next) ==
+ (PRStackElem *) _PR_AIX_ATOMIC_LOCK)
+ ;
+ locked = _check_lock((atomic_p) &stack->prstk_head.prstk_elem_next,
+ (int)element, _PR_AIX_ATOMIC_LOCK);
+ } while (locked == TRUE);
+
+ if (element == NULL) {
+ _clear_lock((atomic_p) &stack->prstk_head.prstk_elem_next, NULL);
+ } else {
+ _clear_lock((atomic_p) &stack->prstk_head.prstk_elem_next,
+ (int) element->prstk_elem_next);
+ }
+ return element;
+}
+
+#endif /* _PR_HAVE_ATOMIC_CAS */
diff --git a/nsprpub/pr/src/md/unix/aixwrap.c b/nsprpub/pr/src/md/unix/aixwrap.c
new file mode 100644
index 000000000..cb5b413f1
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/aixwrap.c
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * File: aixwrap.c
+ * Description:
+ * This file contains a single function, _MD_SELECT(), which simply
+ * invokes the select() function. This file is used in an ugly
+ * hack to override the system select() function on AIX releases
+ * prior to 4.2. (On AIX 4.2, we use a different mechanism to
+ * override select().)
+ */
+
+#ifndef AIX_RENAME_SELECT
+#error aixwrap.c should only be used on AIX 3.2 or 4.1
+#else
+
+#include <sys/select.h>
+#include <sys/poll.h>
+
+int _MD_SELECT(int width, fd_set *r, fd_set *w, fd_set *e, struct timeval *t)
+{
+ return select(width, r, w, e, t);
+}
+
+int _MD_POLL(void *listptr, unsigned long nfds, long timeout)
+{
+ return poll(listptr, nfds, timeout);
+}
+
+#endif /* AIX_RENAME_SELECT */
diff --git a/nsprpub/pr/src/md/unix/bsdi.c b/nsprpub/pr/src/md/unix/bsdi.c
new file mode 100644
index 000000000..095547ed5
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/bsdi.c
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for BSDI */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for BSDI.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for BSDI.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/darwin.c b/nsprpub/pr/src/md/unix/darwin.c
new file mode 100644
index 000000000..719fc30f2
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/darwin.c
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <mach/mach_time.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+/*
+ * The multiplier (as a fraction) for converting the Mach absolute time
+ * unit to nanoseconds.
+ */
+static mach_timebase_info_data_t machTimebaseInfo;
+
+void _PR_Mach_IntervalInit(void)
+{
+ kern_return_t rv;
+
+ rv = mach_timebase_info(&machTimebaseInfo);
+ PR_ASSERT(rv == KERN_SUCCESS);
+}
+
+PRIntervalTime _PR_Mach_GetInterval(void)
+{
+ uint64_t time;
+
+ /*
+ * mach_absolute_time returns the time in the Mach absolute time unit.
+ * Convert it to milliseconds. See Mac Technical Q&A QA1398.
+ */
+ time = mach_absolute_time();
+ time = time * machTimebaseInfo.numer / machTimebaseInfo.denom /
+ PR_NSEC_PER_MSEC;
+ return (PRIntervalTime)time;
+} /* _PR_Mach_GetInterval */
+
+PRIntervalTime _PR_Mach_TicksPerSecond(void)
+{
+ return 1000;
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#if !defined(_PR_PTHREADS)
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#if !defined(_PR_PTHREADS)
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Darwin */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Darwin.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Darwin.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
+
+/* darwin.c */
+
diff --git a/nsprpub/pr/src/md/unix/dgux.c b/nsprpub/pr/src/md/unix/dgux.c
new file mode 100644
index 000000000..a1b3602c5
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/dgux.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+/*
+ * using only NSPR threads here
+ *
+ * Copied from the UnixWare implementation. Should be kept in sync
+ * with ../../../include/md/_dgux.h.
+ */
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for DG/UX */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for DG/UX.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for DG/UX.");
+}
+
diff --git a/nsprpub/pr/src/md/unix/freebsd.c b/nsprpub/pr/src/md/unix/freebsd.c
new file mode 100644
index 000000000..a66a81284
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/freebsd.c
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) sigsetjmp(CONTEXT(t), 1);
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for FreeBSD */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for FreeBSD.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for FreeBSD.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/hpux.c b/nsprpub/pr/src/md/unix/hpux.c
new file mode 100644
index 000000000..56fddba29
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/hpux.c
@@ -0,0 +1,229 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+#include <setjmp.h>
+
+#if defined(HPUX_LW_TIMER)
+
+#include <machine/inline.h>
+#include <machine/clock.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/pstat.h>
+
+int __lw_get_thread_times(int which, int64_t *sample, int64_t *time);
+
+static double msecond_per_itick;
+
+void _PR_HPUX_LW_IntervalInit(void)
+{
+ struct pst_processor psp;
+ int iticksperclktick, clk_tck;
+ int rv;
+
+ rv = pstat_getprocessor(&psp, sizeof(psp), 1, 0);
+ PR_ASSERT(rv != -1);
+
+ iticksperclktick = psp.psp_iticksperclktick;
+ clk_tck = sysconf(_SC_CLK_TCK);
+ msecond_per_itick = (1000.0)/(double)(iticksperclktick * clk_tck);
+}
+
+PRIntervalTime _PR_HPUX_LW_GetInterval(void)
+{
+ int64_t time, sample;
+
+ __lw_get_thread_times(1, &sample, &time);
+ /*
+ * Division is slower than float multiplication.
+ * return (time / iticks_per_msecond);
+ */
+ return (time * msecond_per_itick);
+}
+#endif /* HPUX_LW_TIMER */
+
+#if !defined(PTHREADS_USER)
+
+void _MD_EarlyInit(void)
+{
+#ifndef _PR_PTHREADS
+ /*
+ * The following piece of code is taken from ns/nspr/src/md_HP-UX.c.
+ * In the comment for revision 1.6, dated 1995/09/11 23:33:34,
+ * robm says:
+ * This version has some problems which need to be addressed.
+ * First, intercept all system calls and prevent them from
+ * executing the library code which performs stack switches
+ * before normal system call invocation. In order for library
+ * calls which make system calls to work (like stdio), however,
+ * we must also allocate our own stack and switch the primordial
+ * stack to use it. This isn't so bad, except that I fudged the
+ * backtrace length when copying the old stack to the new one.
+ *
+ * This is the original comment of robm in the code:
+ * XXXrobm Horrific. To avoid a problem with HP's system call
+ * code, we allocate a new stack for the primordial thread and
+ * use it. However, we don't know how far back the original stack
+ * goes. We should create a routine that performs a backtrace and
+ * finds out just how much we need to copy. As a temporary measure,
+ * I just copy an arbitrary guess.
+ *
+ * In an email to servereng dated 2 Jan 1997, Mike Patnode (mikep)
+ * suggests that this only needs to be done for HP-UX 9.
+ */
+#ifdef HPUX9
+#define PIDOOMA_STACK_SIZE 524288
+#define BACKTRACE_SIZE 8192
+ {
+ jmp_buf jb;
+ char *newstack;
+ char *oldstack;
+
+ if(!setjmp(jb)) {
+ newstack = (char *) PR_MALLOC(PIDOOMA_STACK_SIZE);
+ oldstack = (char *) (*(((int *) jb) + 1) - BACKTRACE_SIZE);
+ memcpy(newstack, oldstack, BACKTRACE_SIZE);
+ *(((int *) jb) + 1) = (int) (newstack + BACKTRACE_SIZE);
+ longjmp(jb, 1);
+ }
+ }
+#endif /* HPUX9 */
+#endif /* !_PR_PTHREADS */
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for HP-UX */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for HP-UX.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for HP-UX.");
+}
+#endif /* _PR_PTHREADS */
+
+void
+_MD_suspend_thread(PRThread *thread)
+{
+#ifdef _PR_PTHREADS
+#endif
+}
+
+void
+_MD_resume_thread(PRThread *thread)
+{
+#ifdef _PR_PTHREADS
+#endif
+}
+#endif /* PTHREADS_USER */
+
+/*
+ * The HP version of strchr is buggy. It looks past the end of the
+ * string and causes a segmentation fault when our (NSPR) version
+ * of malloc is used.
+ *
+ * A better solution might be to put a cushion in our malloc just in
+ * case HP's version of strchr somehow gets used instead of this one.
+ */
+char *
+strchr(const char *s, int c)
+{
+ char ch;
+
+ if (!s) {
+ return NULL;
+ }
+
+ ch = (char) c;
+
+ while ((*s) && ((*s) != ch)) {
+ s++;
+ }
+
+ if ((*s) == ch) {
+ return (char *) s;
+ }
+
+ return NULL;
+}
+
+/*
+ * Implemementation of memcmp in HP-UX (verified on releases A.09.03,
+ * A.09.07, and B.10.10) dumps core if called with:
+ * 1. First operand with address = 1(mod 4).
+ * 2. Size = 1(mod 4)
+ * 3. Last byte of the second operand is the last byte of the page and
+ * next page is not accessible(not mapped or protected)
+ * Thus, using the following naive version (tons of optimizations are
+ * possible;^)
+ */
+
+int memcmp(const void *s1, const void *s2, size_t n)
+{
+ register unsigned char *p1 = (unsigned char *) s1,
+ *p2 = (unsigned char *) s2;
+
+ while (n-- > 0) {
+ register int r = ((int) ((unsigned int) *p1))
+ - ((int) ((unsigned int) *p2));
+ if (r) return r;
+ p1++; p2++;
+ }
+ return 0;
+}
diff --git a/nsprpub/pr/src/md/unix/irix.c b/nsprpub/pr/src/md/unix/irix.c
new file mode 100644
index 000000000..c57a07b33
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/irix.c
@@ -0,0 +1,1648 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syssgi.h>
+#include <sys/time.h>
+#include <sys/immu.h>
+#include <sys/utsname.h>
+#include <sys/sysmp.h>
+#include <sys/pda.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <sys/resource.h>
+#include <sys/procfs.h>
+#include <task.h>
+#include <dlfcn.h>
+
+static void _MD_IrixIntervalInit(void);
+
+#if defined(_PR_PTHREADS)
+/*
+ * for compatibility with classic nspr
+ */
+void _PR_IRIX_CHILD_PROCESS()
+{
+}
+#else /* defined(_PR_PTHREADS) */
+
+static void irix_detach_sproc(void);
+char *_nspr_sproc_private; /* ptr. to private region in every sproc */
+
+extern PRUintn _pr_numCPU;
+
+typedef struct nspr_arena {
+ PRCList links;
+ usptr_t *usarena;
+} nspr_arena;
+
+#define ARENA_PTR(qp) \
+ ((nspr_arena *) ((char*) (qp) - offsetof(nspr_arena , links)))
+
+static usptr_t *alloc_new_arena(void);
+
+PRCList arena_list = PR_INIT_STATIC_CLIST(&arena_list);
+ulock_t arena_list_lock;
+nspr_arena first_arena;
+int _nspr_irix_arena_cnt = 1;
+
+PRCList sproc_list = PR_INIT_STATIC_CLIST(&sproc_list);
+ulock_t sproc_list_lock;
+
+typedef struct sproc_data {
+ void (*entry) (void *, size_t);
+ unsigned inh;
+ void *arg;
+ caddr_t sp;
+ size_t len;
+ int *pid;
+ int creator_pid;
+} sproc_data;
+
+typedef struct sproc_params {
+ PRCList links;
+ sproc_data sd;
+} sproc_params;
+
+#define SPROC_PARAMS_PTR(qp) \
+ ((sproc_params *) ((char*) (qp) - offsetof(sproc_params , links)))
+
+long _nspr_irix_lock_cnt = 0;
+long _nspr_irix_sem_cnt = 0;
+long _nspr_irix_pollsem_cnt = 0;
+
+usptr_t *_pr_usArena;
+ulock_t _pr_heapLock;
+
+usema_t *_pr_irix_exit_sem;
+PRInt32 _pr_irix_exit_now = 0;
+PRInt32 _pr_irix_process_exit_code = 0; /* exit code for PR_ProcessExit */
+PRInt32 _pr_irix_process_exit = 0; /* process exiting due to call to
+ PR_ProcessExit */
+
+int _pr_irix_primoridal_cpu_fd[2] = { -1, -1 };
+static void (*libc_exit)(int) = NULL;
+static void *libc_handle = NULL;
+
+#define _NSPR_DEF_INITUSERS 100 /* default value of CONF_INITUSERS */
+#define _NSPR_DEF_INITSIZE (4 * 1024 * 1024) /* 4 MB */
+
+int _irix_initusers = _NSPR_DEF_INITUSERS;
+int _irix_initsize = _NSPR_DEF_INITSIZE;
+
+PRIntn _pr_io_in_progress, _pr_clock_in_progress;
+
+PRInt32 _pr_md_irix_sprocs_created, _pr_md_irix_sprocs_failed;
+PRInt32 _pr_md_irix_sprocs = 1;
+PRCList _pr_md_irix_sproc_list =
+PR_INIT_STATIC_CLIST(&_pr_md_irix_sproc_list);
+
+sigset_t ints_off;
+extern sigset_t timer_set;
+
+#if !defined(PR_SETABORTSIG)
+#define PR_SETABORTSIG 18
+#endif
+/*
+ * terminate the entire application if any sproc exits abnormally
+ */
+PRBool _nspr_terminate_on_error = PR_TRUE;
+
+/*
+ * exported interface to set the shared arena parameters
+ */
+void _PR_Irix_Set_Arena_Params(PRInt32 initusers, PRInt32 initsize)
+{
+ _irix_initusers = initusers;
+ _irix_initsize = initsize;
+}
+
+static usptr_t *alloc_new_arena()
+{
+ return(usinit("/dev/zero"));
+}
+
+static PRStatus new_poll_sem(struct _MDThread *mdthr, int val)
+{
+PRIntn _is;
+PRStatus rv = PR_SUCCESS;
+usema_t *sem = NULL;
+PRCList *qp;
+nspr_arena *arena;
+usptr_t *irix_arena;
+PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ _PR_LOCK(arena_list_lock);
+ for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
+ arena = ARENA_PTR(qp);
+ sem = usnewpollsema(arena->usarena, val);
+ if (sem != NULL) {
+ mdthr->cvar_pollsem = sem;
+ mdthr->pollsem_arena = arena->usarena;
+ break;
+ }
+ }
+ if (sem == NULL) {
+ /*
+ * If no space left in the arena allocate a new one.
+ */
+ if (errno == ENOMEM) {
+ arena = PR_NEWZAP(nspr_arena);
+ if (arena != NULL) {
+ irix_arena = alloc_new_arena();
+ if (irix_arena) {
+ PR_APPEND_LINK(&arena->links, &arena_list);
+ _nspr_irix_arena_cnt++;
+ arena->usarena = irix_arena;
+ sem = usnewpollsema(arena->usarena, val);
+ if (sem != NULL) {
+ mdthr->cvar_pollsem = sem;
+ mdthr->pollsem_arena = arena->usarena;
+ } else
+ rv = PR_FAILURE;
+ } else {
+ PR_DELETE(arena);
+ rv = PR_FAILURE;
+ }
+
+ } else
+ rv = PR_FAILURE;
+ } else
+ rv = PR_FAILURE;
+ }
+ _PR_UNLOCK(arena_list_lock);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ if (rv == PR_SUCCESS)
+ _MD_ATOMIC_INCREMENT(&_nspr_irix_pollsem_cnt);
+ return rv;
+}
+
+static void free_poll_sem(struct _MDThread *mdthr)
+{
+PRIntn _is;
+PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ usfreepollsema(mdthr->cvar_pollsem, mdthr->pollsem_arena);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ _MD_ATOMIC_DECREMENT(&_nspr_irix_pollsem_cnt);
+}
+
+static PRStatus new_lock(struct _MDLock *lockp)
+{
+PRIntn _is;
+PRStatus rv = PR_SUCCESS;
+ulock_t lock = NULL;
+PRCList *qp;
+nspr_arena *arena;
+usptr_t *irix_arena;
+PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ _PR_LOCK(arena_list_lock);
+ for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
+ arena = ARENA_PTR(qp);
+ lock = usnewlock(arena->usarena);
+ if (lock != NULL) {
+ lockp->lock = lock;
+ lockp->arena = arena->usarena;
+ break;
+ }
+ }
+ if (lock == NULL) {
+ /*
+ * If no space left in the arena allocate a new one.
+ */
+ if (errno == ENOMEM) {
+ arena = PR_NEWZAP(nspr_arena);
+ if (arena != NULL) {
+ irix_arena = alloc_new_arena();
+ if (irix_arena) {
+ PR_APPEND_LINK(&arena->links, &arena_list);
+ _nspr_irix_arena_cnt++;
+ arena->usarena = irix_arena;
+ lock = usnewlock(irix_arena);
+ if (lock != NULL) {
+ lockp->lock = lock;
+ lockp->arena = arena->usarena;
+ } else
+ rv = PR_FAILURE;
+ } else {
+ PR_DELETE(arena);
+ rv = PR_FAILURE;
+ }
+
+ } else
+ rv = PR_FAILURE;
+ } else
+ rv = PR_FAILURE;
+ }
+ _PR_UNLOCK(arena_list_lock);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ if (rv == PR_SUCCESS)
+ _MD_ATOMIC_INCREMENT(&_nspr_irix_lock_cnt);
+ return rv;
+}
+
+static void free_lock(struct _MDLock *lockp)
+{
+PRIntn _is;
+PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ usfreelock(lockp->lock, lockp->arena);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ _MD_ATOMIC_DECREMENT(&_nspr_irix_lock_cnt);
+}
+
+void _MD_FREE_LOCK(struct _MDLock *lockp)
+{
+ PRIntn _is;
+ PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ free_lock(lockp);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+}
+
+/*
+ * _MD_get_attached_thread
+ * Return the thread pointer of the current thread if it is attached.
+ *
+ * This function is needed for Irix because the thread-local-storage is
+ * implemented by mmapin'g a page with the MAP_LOCAL flag. This causes the
+ * sproc-private page to inherit contents of the page of the caller of sproc().
+ */
+PRThread *_MD_get_attached_thread(void)
+{
+
+ if (_MD_GET_SPROC_PID() == get_pid())
+ return _MD_THIS_THREAD();
+ else
+ return 0;
+}
+
+/*
+ * _MD_get_current_thread
+ * Return the thread pointer of the current thread (attaching it if
+ * necessary)
+ */
+PRThread *_MD_get_current_thread(void)
+{
+PRThread *me;
+
+ me = _MD_GET_ATTACHED_THREAD();
+ if (NULL == me) {
+ me = _PRI_AttachThread(
+ PR_USER_THREAD, PR_PRIORITY_NORMAL, NULL, 0);
+ }
+ PR_ASSERT(me != NULL);
+ return(me);
+}
+
+/*
+ * irix_detach_sproc
+ * auto-detach a sproc when it exits
+ */
+void irix_detach_sproc(void)
+{
+PRThread *me;
+
+ me = _MD_GET_ATTACHED_THREAD();
+ if ((me != NULL) && (me->flags & _PR_ATTACHED)) {
+ _PRI_DetachThread();
+ }
+}
+
+
+PRStatus _MD_NEW_LOCK(struct _MDLock *lockp)
+{
+ PRStatus rv;
+ PRIntn is;
+ PRThread *me = _MD_GET_ATTACHED_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ rv = new_lock(lockp);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return rv;
+}
+
+static void
+sigchld_handler(int sig)
+{
+ pid_t pid;
+ int status;
+
+ /*
+ * If an sproc exited abnormally send a SIGKILL signal to all the
+ * sprocs in the process to terminate the application
+ */
+ while ((pid = waitpid(0, &status, WNOHANG)) > 0) {
+ if (WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) ||
+ (WTERMSIG(status) == SIGBUS) ||
+ (WTERMSIG(status) == SIGABRT) ||
+ (WTERMSIG(status) == SIGILL))) {
+
+ prctl(PR_SETEXITSIG, SIGKILL);
+ _exit(status);
+ }
+ }
+}
+
+static void save_context_and_block(int sig)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+_PRCPU *cpu = _PR_MD_CURRENT_CPU();
+
+ /*
+ * save context
+ */
+ (void) setjmp(me->md.jb);
+ /*
+ * unblock the suspending thread
+ */
+ if (me->cpu) {
+ /*
+ * I am a cpu thread, not a user-created GLOBAL thread
+ */
+ unblockproc(cpu->md.suspending_id);
+ } else {
+ unblockproc(me->md.suspending_id);
+ }
+ /*
+ * now, block current thread
+ */
+ blockproc(getpid());
+}
+
+/*
+** The irix kernel has a bug in it which causes async connect's which are
+** interrupted by a signal to fail terribly (EADDRINUSE is returned).
+** We work around the bug by blocking signals during the async connect
+** attempt.
+*/
+PRInt32 _MD_irix_connect(
+ PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 rv;
+ sigset_t oldset;
+
+ sigprocmask(SIG_BLOCK, &ints_off, &oldset);
+ rv = connect(osfd, addr, addrlen);
+ sigprocmask(SIG_SETMASK, &oldset, 0);
+
+ return(rv);
+}
+
+#include "prprf.h"
+
+/********************************************************************/
+/********************************************************************/
+/*************** Various thread like things for IRIX ****************/
+/********************************************************************/
+/********************************************************************/
+
+void *_MD_GetSP(PRThread *t)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ void *sp;
+
+ if (me == t)
+ (void) setjmp(t->md.jb);
+
+ sp = (void *)(t->md.jb[JB_SP]);
+ PR_ASSERT((sp >= (void *) t->stack->stackBottom) &&
+ (sp <= (void *) (t->stack->stackBottom + t->stack->stackSize)));
+ return(sp);
+}
+
+void _MD_InitLocks()
+{
+ char buf[200];
+ char *init_users, *init_size;
+
+ PR_snprintf(buf, sizeof(buf), "/dev/zero");
+
+ if (init_users = getenv("_NSPR_IRIX_INITUSERS"))
+ _irix_initusers = atoi(init_users);
+
+ if (init_size = getenv("_NSPR_IRIX_INITSIZE"))
+ _irix_initsize = atoi(init_size);
+
+ usconfig(CONF_INITUSERS, _irix_initusers);
+ usconfig(CONF_INITSIZE, _irix_initsize);
+ usconfig(CONF_AUTOGROW, 1);
+ usconfig(CONF_AUTORESV, 1);
+ if (usconfig(CONF_ARENATYPE, US_SHAREDONLY) < 0) {
+ perror("PR_Init: unable to config mutex arena");
+ exit(-1);
+ }
+
+ _pr_usArena = usinit(buf);
+ if (!_pr_usArena) {
+ fprintf(stderr,
+ "PR_Init: Error - unable to create lock/monitor arena\n");
+ exit(-1);
+ }
+ _pr_heapLock = usnewlock(_pr_usArena);
+ _nspr_irix_lock_cnt++;
+
+ arena_list_lock = usnewlock(_pr_usArena);
+ _nspr_irix_lock_cnt++;
+
+ sproc_list_lock = usnewlock(_pr_usArena);
+ _nspr_irix_lock_cnt++;
+
+ _pr_irix_exit_sem = usnewsema(_pr_usArena, 0);
+ _nspr_irix_sem_cnt = 1;
+
+ first_arena.usarena = _pr_usArena;
+ PR_INIT_CLIST(&first_arena.links);
+ PR_APPEND_LINK(&first_arena.links, &arena_list);
+}
+
+/* _PR_IRIX_CHILD_PROCESS is a private API for Server group */
+void _PR_IRIX_CHILD_PROCESS()
+{
+extern PRUint32 _pr_global_threads;
+
+ PR_ASSERT(_PR_MD_CURRENT_CPU() == _pr_primordialCPU);
+ PR_ASSERT(_pr_numCPU == 1);
+ PR_ASSERT(_pr_global_threads == 0);
+ /*
+ * save the new pid
+ */
+ _pr_primordialCPU->md.id = getpid();
+ _MD_SET_SPROC_PID(getpid());
+}
+
+static PRStatus pr_cvar_wait_sem(PRThread *thread, PRIntervalTime timeout)
+{
+ int rv;
+
+#ifdef _PR_USE_POLL
+ struct pollfd pfd;
+ int msecs;
+
+ if (timeout == PR_INTERVAL_NO_TIMEOUT)
+ msecs = -1;
+ else
+ msecs = PR_IntervalToMilliseconds(timeout);
+#else
+ struct timeval tv, *tvp;
+ fd_set rd;
+
+ if(timeout == PR_INTERVAL_NO_TIMEOUT)
+ tvp = NULL;
+ else {
+ tv.tv_sec = PR_IntervalToSeconds(timeout);
+ tv.tv_usec = PR_IntervalToMicroseconds(
+ timeout - PR_SecondsToInterval(tv.tv_sec));
+ tvp = &tv;
+ }
+ FD_ZERO(&rd);
+ FD_SET(thread->md.cvar_pollsemfd, &rd);
+#endif
+
+ /*
+ * call uspsema only if a previous select call on this semaphore
+ * did not timeout
+ */
+ if (!thread->md.cvar_pollsem_select) {
+ rv = _PR_WAIT_SEM(thread->md.cvar_pollsem);
+ PR_ASSERT(rv >= 0);
+ } else
+ rv = 0;
+again:
+ if(!rv) {
+#ifdef _PR_USE_POLL
+ pfd.events = POLLIN;
+ pfd.fd = thread->md.cvar_pollsemfd;
+ rv = _MD_POLL(&pfd, 1, msecs);
+#else
+ rv = _MD_SELECT(thread->md.cvar_pollsemfd + 1, &rd, NULL,NULL,tvp);
+#endif
+ if ((rv == -1) && (errno == EINTR)) {
+ rv = 0;
+ goto again;
+ }
+ PR_ASSERT(rv >= 0);
+ }
+
+ if (rv > 0) {
+ /*
+ * acquired the semaphore, call uspsema next time
+ */
+ thread->md.cvar_pollsem_select = 0;
+ return PR_SUCCESS;
+ } else {
+ /*
+ * select timed out; must call select, not uspsema, when trying
+ * to acquire the semaphore the next time
+ */
+ thread->md.cvar_pollsem_select = 1;
+ return PR_FAILURE;
+ }
+}
+
+PRStatus _MD_wait(PRThread *thread, PRIntervalTime ticks)
+{
+ if ( thread->flags & _PR_GLOBAL_SCOPE ) {
+ _MD_CHECK_FOR_EXIT();
+ if (pr_cvar_wait_sem(thread, ticks) == PR_FAILURE) {
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * wait timed out
+ */
+ _PR_THREAD_LOCK(thread);
+ if (thread->wait.cvar) {
+ /*
+ * The thread will remove itself from the waitQ
+ * of the cvar in _PR_WaitCondVar
+ */
+ thread->wait.cvar = NULL;
+ thread->state = _PR_RUNNING;
+ _PR_THREAD_UNLOCK(thread);
+ } else {
+ _PR_THREAD_UNLOCK(thread);
+ /*
+ * This thread was woken up by a notifying thread
+ * at the same time as a timeout; so, consume the
+ * extra post operation on the semaphore
+ */
+ _MD_CHECK_FOR_EXIT();
+ pr_cvar_wait_sem(thread, PR_INTERVAL_NO_TIMEOUT);
+ }
+ _MD_CHECK_FOR_EXIT();
+ }
+ } else {
+ _PR_MD_SWITCH_CONTEXT(thread);
+ }
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_WakeupWaiter(PRThread *thread)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRIntn is;
+
+ PR_ASSERT(_pr_md_idle_cpus >= 0);
+ if (thread == NULL) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else if (!_PR_IS_NATIVE_THREAD(thread)) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else {
+ PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ _MD_CVAR_POST_SEM(thread);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ }
+ return PR_SUCCESS;
+}
+
+void create_sproc (void (*entry) (void *, size_t), unsigned inh,
+ void *arg, caddr_t sp, size_t len, int *pid)
+{
+sproc_params sparams;
+char data;
+int rv;
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
+ *pid = sprocsp(entry, /* startup func */
+ inh, /* attribute flags */
+ arg, /* thread param */
+ sp, /* stack address */
+ len); /* stack size */
+ } else {
+ sparams.sd.entry = entry;
+ sparams.sd.inh = inh;
+ sparams.sd.arg = arg;
+ sparams.sd.sp = sp;
+ sparams.sd.len = len;
+ sparams.sd.pid = pid;
+ sparams.sd.creator_pid = getpid();
+ _PR_LOCK(sproc_list_lock);
+ PR_APPEND_LINK(&sparams.links, &sproc_list);
+ rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
+ PR_ASSERT(rv == 1);
+ _PR_UNLOCK(sproc_list_lock);
+ blockproc(getpid());
+ }
+}
+
+/*
+ * _PR_MD_WAKEUP_PRIMORDIAL_CPU
+ *
+ * wakeup cpu 0
+ */
+
+void _PR_MD_WAKEUP_PRIMORDIAL_CPU()
+{
+char data = '0';
+int rv;
+
+ rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
+ PR_ASSERT(rv == 1);
+}
+
+/*
+ * _PR_MD_primordial_cpu
+ *
+ * process events that need to executed by the primordial cpu on each
+ * iteration through the idle loop
+ */
+
+void _PR_MD_primordial_cpu()
+{
+PRCList *qp;
+sproc_params *sp;
+int pid;
+
+ _PR_LOCK(sproc_list_lock);
+ while ((qp = sproc_list.next) != &sproc_list) {
+ sp = SPROC_PARAMS_PTR(qp);
+ PR_REMOVE_LINK(&sp->links);
+ pid = sp->sd.creator_pid;
+ (*(sp->sd.pid)) = sprocsp(sp->sd.entry, /* startup func */
+ sp->sd.inh, /* attribute flags */
+ sp->sd.arg, /* thread param */
+ sp->sd.sp, /* stack address */
+ sp->sd.len); /* stack size */
+ unblockproc(pid);
+ }
+ _PR_UNLOCK(sproc_list_lock);
+}
+
+PRStatus _MD_CreateThread(PRThread *thread,
+void (*start)(void *),
+PRThreadPriority priority,
+PRThreadScope scope,
+PRThreadState state,
+PRUint32 stackSize)
+{
+ typedef void (*SprocEntry) (void *, size_t);
+ SprocEntry spentry = (SprocEntry)start;
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 pid;
+ PRStatus rv;
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ thread->md.cvar_pollsem_select = 0;
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ thread->md.cvar_pollsemfd = -1;
+ if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+ thread->md.cvar_pollsemfd =
+ _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
+ if ((thread->md.cvar_pollsemfd < 0)) {
+ free_poll_sem(&thread->md);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+
+ create_sproc(spentry, /* startup func */
+ PR_SALL, /* attribute flags */
+ (void *)thread, /* thread param */
+ NULL, /* stack address */
+ stackSize, &pid); /* stack size */
+ if (pid > 0) {
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_created);
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs);
+ rv = PR_SUCCESS;
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return rv;
+ } else {
+ close(thread->md.cvar_pollsemfd);
+ thread->md.cvar_pollsemfd = -1;
+ free_poll_sem(&thread->md);
+ thread->md.cvar_pollsem = NULL;
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+}
+
+void _MD_CleanThread(PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ close(thread->md.cvar_pollsemfd);
+ thread->md.cvar_pollsemfd = -1;
+ free_poll_sem(&thread->md);
+ thread->md.cvar_pollsem = NULL;
+ }
+}
+
+void _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
+{
+ return;
+}
+
+extern void _MD_unix_terminate_waitpid_daemon(void);
+
+void
+_MD_CleanupBeforeExit(void)
+{
+ extern PRInt32 _pr_cpus_exit;
+
+ _MD_unix_terminate_waitpid_daemon();
+
+ _pr_irix_exit_now = 1;
+ if (_pr_numCPU > 1) {
+ /*
+ * Set a global flag, and wakeup all cpus which will notice the flag
+ * and exit.
+ */
+ _pr_cpus_exit = getpid();
+ _MD_Wakeup_CPUs();
+ while(_pr_numCPU > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _pr_numCPU--;
+ }
+ }
+ /*
+ * cause global threads on the recycle list to exit
+ */
+ _PR_DEADQ_LOCK;
+ if (_PR_NUM_DEADNATIVE != 0) {
+ PRThread *thread;
+ PRCList *ptr;
+
+ ptr = _PR_DEADNATIVEQ.next;
+ while( ptr != &_PR_DEADNATIVEQ ) {
+ thread = _PR_THREAD_PTR(ptr);
+ _MD_CVAR_POST_SEM(thread);
+ ptr = ptr->next;
+ }
+ }
+ _PR_DEADQ_UNLOCK;
+ while(_PR_NUM_DEADNATIVE > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _PR_DEC_DEADNATIVE;
+ }
+}
+
+#ifdef _PR_HAVE_SGI_PRDA_PROCMASK
+extern void __sgi_prda_procmask(int);
+#endif
+
+PRStatus
+_MD_InitAttachedThread(PRThread *thread, PRBool wakeup_parent)
+{
+ PRStatus rv = PR_SUCCESS;
+
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
+ return PR_FAILURE;
+ }
+ thread->md.cvar_pollsemfd =
+ _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
+ if ((thread->md.cvar_pollsemfd < 0)) {
+ free_poll_sem(&thread->md);
+ return PR_FAILURE;
+ }
+ if (_MD_InitThread(thread, PR_FALSE) == PR_FAILURE) {
+ close(thread->md.cvar_pollsemfd);
+ thread->md.cvar_pollsemfd = -1;
+ free_poll_sem(&thread->md);
+ thread->md.cvar_pollsem = NULL;
+ return PR_FAILURE;
+ }
+ }
+ return rv;
+}
+
+PRStatus
+_MD_InitThread(PRThread *thread, PRBool wakeup_parent)
+{
+ struct sigaction sigact;
+ PRStatus rv = PR_SUCCESS;
+
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ thread->md.id = getpid();
+ setblockproccnt(thread->md.id, 0);
+ _MD_SET_SPROC_PID(getpid());
+#ifdef _PR_HAVE_SGI_PRDA_PROCMASK
+ /*
+ * enable user-level processing of sigprocmask(); this is an
+ * undocumented feature available in Irix 6.2, 6.3, 6.4 and 6.5
+ */
+ __sgi_prda_procmask(USER_LEVEL);
+#endif
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ /*
+ * Must mask clock interrupts
+ */
+ sigact.sa_mask = timer_set;
+ sigaction(SIGUSR1, &sigact, 0);
+
+
+ /*
+ * PR_SETABORTSIG is a new command implemented in a patch to
+ * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
+ * sprocs in the process when one of them terminates abnormally
+ *
+ */
+ if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
+ /*
+ * if (errno == EINVAL)
+ *
+ * PR_SETABORTSIG not supported under this OS.
+ * You may want to get a recent kernel rollup patch that
+ * supports this feature.
+ */
+ }
+ /*
+ * SIGCLD handler for detecting abormally-terminating
+ * sprocs and for reaping sprocs
+ */
+ sigact.sa_handler = sigchld_handler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGCLD, &sigact, NULL);
+ }
+ return rv;
+}
+
+/*
+ * PR_Cleanup should be executed on the primordial sproc; migrate the thread
+ * to the primordial cpu
+ */
+
+void _PR_MD_PRE_CLEANUP(PRThread *me)
+{
+PRIntn is;
+_PRCPU *cpu = _pr_primordialCPU;
+
+ PR_ASSERT(cpu);
+
+ me->flags |= _PR_BOUND_THREAD;
+
+ if (me->cpu->id != 0) {
+ _PR_INTSOFF(is);
+ _PR_RUNQ_LOCK(cpu);
+ me->cpu = cpu;
+ me->state = _PR_RUNNABLE;
+ _PR_ADD_RUNQ(me, cpu, me->priority);
+ _PR_RUNQ_UNLOCK(cpu);
+ _MD_Wakeup_CPUs();
+
+ _PR_MD_SWITCH_CONTEXT(me);
+
+ _PR_FAST_INTSON(is);
+ PR_ASSERT(me->cpu->id == 0);
+ }
+}
+
+/*
+ * process exiting
+ */
+PR_EXTERN(void ) _MD_exit(PRIntn status)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ /*
+ * the exit code of the process is the exit code of the primordial
+ * sproc
+ */
+ if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
+ /*
+ * primordial sproc case: call _exit directly
+ * Cause SIGKILL to be sent to other sprocs
+ */
+ prctl(PR_SETEXITSIG, SIGKILL);
+ _exit(status);
+ } else {
+ int rv;
+ char data;
+ sigset_t set;
+
+ /*
+ * non-primordial sproc case: cause the primordial sproc, cpu 0,
+ * to wakeup and call _exit
+ */
+ _pr_irix_process_exit = 1;
+ _pr_irix_process_exit_code = status;
+ rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
+ PR_ASSERT(rv == 1);
+ /*
+ * block all signals and wait for SIGKILL to terminate this sproc
+ */
+ sigfillset(&set);
+ sigsuspend(&set);
+ /*
+ * this code doesn't (shouldn't) execute
+ */
+ prctl(PR_SETEXITSIG, SIGKILL);
+ _exit(status);
+ }
+}
+
+/*
+ * Override the exit() function in libc to cause the process to exit
+ * when the primodial/main nspr thread calls exit. Calls to exit by any
+ * other thread simply result in a call to the exit function in libc.
+ * The exit code of the process is the exit code of the primordial
+ * sproc.
+ */
+
+void exit(int status)
+{
+PRThread *me, *thr;
+PRCList *qp;
+
+ if (!_pr_initialized) {
+ if (!libc_exit) {
+
+ if (!libc_handle)
+ libc_handle = dlopen("libc.so",RTLD_NOW);
+ if (libc_handle)
+ libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
+ }
+ if (libc_exit)
+ (*libc_exit)(status);
+ else
+ _exit(status);
+ }
+
+ me = _PR_MD_CURRENT_THREAD();
+
+ if (me == NULL) /* detached thread */
+ (*libc_exit)(status);
+
+ PR_ASSERT(_PR_IS_NATIVE_THREAD(me) ||
+ (_PR_MD_CURRENT_CPU())->id == me->cpu->id);
+
+ if (me->flags & _PR_PRIMORDIAL) {
+
+ me->flags |= _PR_BOUND_THREAD;
+
+ PR_ASSERT((_PR_MD_CURRENT_CPU())->id == me->cpu->id);
+ if (me->cpu->id != 0) {
+ _PRCPU *cpu = _pr_primordialCPU;
+ PRIntn is;
+
+ _PR_INTSOFF(is);
+ _PR_RUNQ_LOCK(cpu);
+ me->cpu = cpu;
+ me->state = _PR_RUNNABLE;
+ _PR_ADD_RUNQ(me, cpu, me->priority);
+ _PR_RUNQ_UNLOCK(cpu);
+ _MD_Wakeup_CPUs();
+
+ _PR_MD_SWITCH_CONTEXT(me);
+
+ _PR_FAST_INTSON(is);
+ }
+
+ PR_ASSERT((_PR_MD_CURRENT_CPU())->id == 0);
+
+ if (prctl(PR_GETNSHARE) > 1) {
+#define SPROC_EXIT_WAIT_TIME 5
+ int sleep_cnt = SPROC_EXIT_WAIT_TIME;
+
+ /*
+ * sprocs still running; caue cpus and recycled global threads
+ * to exit
+ */
+ _pr_irix_exit_now = 1;
+ if (_pr_numCPU > 1) {
+ _MD_Wakeup_CPUs();
+ }
+ _PR_DEADQ_LOCK;
+ if (_PR_NUM_DEADNATIVE != 0) {
+ PRThread *thread;
+ PRCList *ptr;
+
+ ptr = _PR_DEADNATIVEQ.next;
+ while( ptr != &_PR_DEADNATIVEQ ) {
+ thread = _PR_THREAD_PTR(ptr);
+ _MD_CVAR_POST_SEM(thread);
+ ptr = ptr->next;
+ }
+ }
+
+ while (sleep_cnt-- > 0) {
+ if (waitpid(0, NULL, WNOHANG) >= 0)
+ sleep(1);
+ else
+ break;
+ }
+ prctl(PR_SETEXITSIG, SIGKILL);
+ }
+ (*libc_exit)(status);
+ } else {
+ /*
+ * non-primordial thread; simply call exit in libc.
+ */
+ (*libc_exit)(status);
+ }
+}
+
+
+void
+_MD_InitRunningCPU(_PRCPU *cpu)
+{
+ extern int _pr_md_pipefd[2];
+
+ _MD_unix_init_running_cpu(cpu);
+ cpu->md.id = getpid();
+ _MD_SET_SPROC_PID(getpid());
+ if (_pr_md_pipefd[0] >= 0) {
+ _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
+#ifndef _PR_USE_POLL
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
+#endif
+ }
+}
+
+void
+_MD_ExitThread(PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ _MD_ATOMIC_DECREMENT(&_pr_md_irix_sprocs);
+ _MD_CLEAN_THREAD(thread);
+ _MD_SET_CURRENT_THREAD(NULL);
+ }
+}
+
+void
+_MD_SuspendCPU(_PRCPU *cpu)
+{
+ PRInt32 rv;
+
+ cpu->md.suspending_id = getpid();
+ rv = kill(cpu->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+
+}
+
+void
+_MD_ResumeCPU(_PRCPU *cpu)
+{
+ unblockproc(cpu->md.id);
+}
+
+#if 0
+/*
+ * save the register context of a suspended sproc
+ */
+void get_context(PRThread *thr)
+{
+ int len, fd;
+ char pidstr[24];
+ char path[24];
+
+ /*
+ * open the file corresponding to this process in procfs
+ */
+ sprintf(path,"/proc/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",thr->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ (void) ioctl(fd, PIOCGREG, thr->md.gregs);
+ close(fd);
+ }
+ return;
+}
+#endif /* 0 */
+
+void
+_MD_SuspendThread(PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ _PR_IS_GCABLE_THREAD(thread));
+
+ thread->md.suspending_id = getpid();
+ rv = kill(thread->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+}
+
+void
+_MD_ResumeThread(PRThread *thread)
+{
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ _PR_IS_GCABLE_THREAD(thread));
+ (void)unblockproc(thread->md.id);
+}
+
+/*
+ * return the set of processors available for scheduling procs in the
+ * "mask" argument
+ */
+PRInt32 _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask)
+{
+ PRInt32 nprocs, rv;
+ struct pda_stat *pstat;
+#define MAX_PROCESSORS 32
+
+ nprocs = sysmp(MP_NPROCS);
+ if (nprocs < 0)
+ return(-1);
+ pstat = (struct pda_stat*)PR_MALLOC(sizeof(struct pda_stat) * nprocs);
+ if (pstat == NULL)
+ return(-1);
+ rv = sysmp(MP_STAT, pstat);
+ if (rv < 0) {
+ PR_DELETE(pstat);
+ return(-1);
+ }
+ /*
+ * look at the first 32 cpus
+ */
+ nprocs = (nprocs > MAX_PROCESSORS) ? MAX_PROCESSORS : nprocs;
+ *mask = 0;
+ while (nprocs) {
+ if ((pstat->p_flags & PDAF_ENABLED) &&
+ !(pstat->p_flags & PDAF_ISOLATED)) {
+ *mask |= (1 << pstat->p_cpuid);
+ }
+ nprocs--;
+ pstat++;
+ }
+ return 0;
+}
+
+static char *_thr_state[] = {
+ "UNBORN",
+ "RUNNABLE",
+ "RUNNING",
+ "LOCK_WAIT",
+ "COND_WAIT",
+ "JOIN_WAIT",
+ "IO_WAIT",
+ "SUSPENDED",
+ "DEAD"
+};
+
+void _PR_List_Threads()
+{
+ PRThread *thr;
+ void *handle;
+ struct _PRCPU *cpu;
+ PRCList *qp;
+ int len, fd;
+ char pidstr[24];
+ char path[24];
+ prpsinfo_t pinfo;
+
+
+ printf("\n%s %-s\n"," ","LOCAL Threads");
+ printf("%s %-s\n"," ","----- -------");
+ printf("%s %-14s %-10s %-12s %-3s %-10s %-10s %-12s\n\n"," ",
+ "Thread", "State", "Wait-Handle",
+ "Cpu","Stk-Base","Stk-Sz","SP");
+ for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
+ qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
+ thr = _PR_ACTIVE_THREAD_PTR(qp);
+ printf("%s 0x%-12x %-10s "," ",thr,_thr_state[thr->state]);
+ if (thr->state == _PR_LOCK_WAIT)
+ handle = thr->wait.lock;
+ else if (thr->state == _PR_COND_WAIT)
+ handle = thr->wait.cvar;
+ else
+ handle = NULL;
+ if (handle)
+ printf("0x%-10x ",handle);
+ else
+ printf("%-12s "," ");
+ printf("%-3d ",thr->cpu->id);
+ printf("0x%-8x ",thr->stack->stackBottom);
+ printf("0x%-8x ",thr->stack->stackSize);
+ printf("0x%-10x\n",thr->md.jb[JB_SP]);
+ }
+
+ printf("\n%s %-s\n"," ","GLOBAL Threads");
+ printf("%s %-s\n"," ","------ -------");
+ printf("%s %-14s %-6s %-12s %-12s %-12s %-12s\n\n"," ","Thread",
+ "Pid","State","Wait-Handle",
+ "Stk-Base","Stk-Sz");
+
+ for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
+ qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
+ thr = _PR_ACTIVE_THREAD_PTR(qp);
+ if (thr->cpu != NULL)
+ continue; /* it is a cpu thread */
+ printf("%s 0x%-12x %-6d "," ",thr,thr->md.id);
+ /*
+ * check if the sproc is still running
+ * first call prctl(PR_GETSHMASK,pid) to check if
+ * the process is part of the share group (the pid
+ * could have been recycled by the OS)
+ */
+ if (prctl(PR_GETSHMASK,thr->md.id) < 0) {
+ printf("%-12s\n","TERMINATED");
+ continue;
+ }
+ /*
+ * Now, check if the sproc terminated and is in zombie
+ * state
+ */
+ sprintf(path,"/proc/pinfo/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",thr->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
+ printf("%-12s ","TERMINATED");
+ else if (pinfo.pr_zomb)
+ printf("%-12s ","TERMINATED");
+ else
+ printf("%-12s ",_thr_state[thr->state]);
+ close(fd);
+ } else {
+ printf("%-12s ","TERMINATED");
+ }
+
+ if (thr->state == _PR_LOCK_WAIT)
+ handle = thr->wait.lock;
+ else if (thr->state == _PR_COND_WAIT)
+ handle = thr->wait.cvar;
+ else
+ handle = NULL;
+ if (handle)
+ printf("%-12x ",handle);
+ else
+ printf("%-12s "," ");
+ printf("0x%-10x ",thr->stack->stackBottom);
+ printf("0x%-10x\n",thr->stack->stackSize);
+ }
+
+ printf("\n%s %-s\n"," ","CPUs");
+ printf("%s %-s\n"," ","----");
+ printf("%s %-14s %-6s %-12s \n\n"," ","Id","Pid","State");
+
+
+ for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
+ cpu = _PR_CPU_PTR(qp);
+ printf("%s %-14d %-6d "," ",cpu->id,cpu->md.id);
+ /*
+ * check if the sproc is still running
+ * first call prctl(PR_GETSHMASK,pid) to check if
+ * the process is part of the share group (the pid
+ * could have been recycled by the OS)
+ */
+ if (prctl(PR_GETSHMASK,cpu->md.id) < 0) {
+ printf("%-12s\n","TERMINATED");
+ continue;
+ }
+ /*
+ * Now, check if the sproc terminated and is in zombie
+ * state
+ */
+ sprintf(path,"/proc/pinfo/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",cpu->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
+ printf("%-12s\n","TERMINATED");
+ else if (pinfo.pr_zomb)
+ printf("%-12s\n","TERMINATED");
+ else
+ printf("%-12s\n","RUNNING");
+ close(fd);
+ } else {
+ printf("%-12s\n","TERMINATED");
+ }
+
+ }
+ fflush(stdout);
+}
+#endif /* defined(_PR_PTHREADS) */
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#if !defined(_PR_PTHREADS)
+ if (isCurrent) {
+ (void) setjmp(t->md.jb);
+ }
+ *np = sizeof(t->md.jb) / sizeof(PRWord);
+ return (PRWord *) (t->md.jb);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+void _MD_EarlyInit(void)
+{
+#if !defined(_PR_PTHREADS)
+ char *eval;
+ int fd;
+ extern int __ateachexit(void (*func)(void));
+
+ sigemptyset(&ints_off);
+ sigaddset(&ints_off, SIGALRM);
+ sigaddset(&ints_off, SIGIO);
+ sigaddset(&ints_off, SIGCLD);
+
+ if (eval = getenv("_NSPR_TERMINATE_ON_ERROR"))
+ _nspr_terminate_on_error = (0 == atoi(eval) == 0) ? PR_FALSE : PR_TRUE;
+
+ fd = open("/dev/zero",O_RDWR , 0);
+ if (fd < 0) {
+ perror("open /dev/zero failed");
+ exit(1);
+ }
+ /*
+ * Set up the sproc private data area.
+ * This region exists at the same address, _nspr_sproc_private, for
+ * every sproc, but each sproc gets a private copy of the region.
+ */
+ _nspr_sproc_private = (char*)mmap(0, _pr_pageSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE| MAP_LOCAL, fd, 0);
+ if (_nspr_sproc_private == (void*)-1) {
+ perror("mmap /dev/zero failed");
+ exit(1);
+ }
+ _MD_SET_SPROC_PID(getpid());
+ close(fd);
+ __ateachexit(irix_detach_sproc);
+#endif
+ _MD_IrixIntervalInit();
+} /* _MD_EarlyInit */
+
+void _MD_IrixInit(void)
+{
+#if !defined(_PR_PTHREADS)
+ struct sigaction sigact;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ int rv;
+
+#ifdef _PR_HAVE_SGI_PRDA_PROCMASK
+ /*
+ * enable user-level processing of sigprocmask(); this is an undocumented
+ * feature available in Irix 6.2, 6.3, 6.4 and 6.5
+ */
+ __sgi_prda_procmask(USER_LEVEL);
+#endif
+
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ * during PR_SuspendAll
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGUSR1, &sigact, 0);
+
+ /*
+ * Change the name of the core file from core to core.pid,
+ * This is inherited by the sprocs created by this process
+ */
+#ifdef PR_COREPID
+ prctl(PR_COREPID, 0, 1);
+#endif
+ /*
+ * Irix-specific terminate on error processing
+ */
+ /*
+ * PR_SETABORTSIG is a new command implemented in a patch to
+ * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
+ * sprocs in the process when one of them terminates abnormally
+ *
+ */
+ if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
+ /*
+ * if (errno == EINVAL)
+ *
+ * PR_SETABORTSIG not supported under this OS.
+ * You may want to get a recent kernel rollup patch that
+ * supports this feature.
+ *
+ */
+ }
+ /*
+ * PR_SETEXITSIG - send the SIGCLD signal to the parent
+ * sproc when any sproc terminates
+ *
+ * This is used to cause the entire application to
+ * terminate when any sproc terminates abnormally by
+ * receipt of a SIGSEGV, SIGBUS or SIGABRT signal.
+ * If this is not done, the application may seem
+ * "hung" to the user because the other sprocs may be
+ * waiting for resources held by the
+ * abnormally-terminating sproc.
+ */
+ prctl(PR_SETEXITSIG, 0);
+
+ sigact.sa_handler = sigchld_handler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGCLD, &sigact, NULL);
+
+ /*
+ * setup stack fields for the primordial thread
+ */
+ me->stack->stackSize = prctl(PR_GETSTACKSIZE);
+ me->stack->stackBottom = me->stack->stackTop - me->stack->stackSize;
+
+ rv = pipe(_pr_irix_primoridal_cpu_fd);
+ PR_ASSERT(rv == 0);
+#ifndef _PR_USE_POLL
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0];
+ FD_SET(_pr_irix_primoridal_cpu_fd[0], &_PR_FD_READ_SET(me->cpu));
+#endif
+
+ libc_handle = dlopen("libc.so",RTLD_NOW);
+ PR_ASSERT(libc_handle != NULL);
+ libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
+ PR_ASSERT(libc_exit != NULL);
+ /* dlclose(libc_handle); */
+
+#endif /* _PR_PTHREADS */
+
+ _PR_UnixInit();
+}
+
+/**************************************************************************/
+/************** code and such for NSPR 2.0's interval times ***************/
+/**************************************************************************/
+
+#define PR_PSEC_PER_SEC 1000000000000ULL /* 10^12 */
+
+#ifndef SGI_CYCLECNTR_SIZE
+#define SGI_CYCLECNTR_SIZE 165 /* Size user needs to use to read CC */
+#endif
+
+static PRIntn mmem_fd = -1;
+static PRIntn clock_width = 0;
+static void *iotimer_addr = NULL;
+static PRUint32 pr_clock_mask = 0;
+static PRUint32 pr_clock_shift = 0;
+static PRIntervalTime pr_ticks = 0;
+static PRUint32 pr_clock_granularity = 1;
+static PRUint32 pr_previous = 0, pr_residual = 0;
+static PRUint32 pr_ticks_per_second = 0;
+
+extern PRIntervalTime _PR_UNIX_GetInterval(void);
+extern PRIntervalTime _PR_UNIX_TicksPerSecond(void);
+
+static void _MD_IrixIntervalInit(void)
+{
+ /*
+ * As much as I would like, the service available through this
+ * interface on R3000's (aka, IP12) just isn't going to make it.
+ * The register is only 24 bits wide, and rolls over at a verocious
+ * rate.
+ */
+ PRUint32 one_tick = 0;
+ struct utsname utsinfo;
+ uname(&utsinfo);
+ if ((strncmp("IP12", utsinfo.machine, 4) != 0)
+ && ((mmem_fd = open("/dev/mmem", O_RDONLY)) != -1))
+ {
+ int poffmask = getpagesize() - 1;
+ __psunsigned_t phys_addr, raddr, cycleval;
+
+ phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
+ raddr = phys_addr & ~poffmask;
+ iotimer_addr = mmap(
+ 0, poffmask, PROT_READ, MAP_PRIVATE, mmem_fd, (__psint_t)raddr);
+
+ clock_width = syssgi(SGI_CYCLECNTR_SIZE);
+ if (clock_width < 0)
+ {
+ /*
+ * We must be executing on a 6.0 or earlier system, since the
+ * SGI_CYCLECNTR_SIZE call is not supported.
+ *
+ * The only pre-6.1 platforms with 64-bit counters are
+ * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
+ */
+ if (!strncmp(utsinfo.machine, "IP19", 4) ||
+ !strncmp(utsinfo.machine, "IP21", 4))
+ clock_width = 64;
+ else
+ clock_width = 32;
+ }
+
+ /*
+ * 'cycleval' is picoseconds / increment of the counter.
+ * I'm pushing for a tick to be 100 microseconds, 10^(-4).
+ * That leaves 10^(-8) left over, or 10^8 / cycleval.
+ * Did I do that right?
+ */
+
+ one_tick = 100000000UL / cycleval ; /* 100 microseconds */
+
+ while (0 != one_tick)
+ {
+ pr_clock_shift += 1;
+ one_tick = one_tick >> 1;
+ pr_clock_granularity = pr_clock_granularity << 1;
+ }
+ pr_clock_mask = pr_clock_granularity - 1; /* to make a mask out of it */
+ pr_ticks_per_second = PR_PSEC_PER_SEC
+ / ((PRUint64)pr_clock_granularity * (PRUint64)cycleval);
+
+ iotimer_addr = (void*)
+ ((__psunsigned_t)iotimer_addr + (phys_addr & poffmask));
+ }
+ else
+ {
+ pr_ticks_per_second = _PR_UNIX_TicksPerSecond();
+ }
+} /* _MD_IrixIntervalInit */
+
+PRIntervalTime _MD_IrixIntervalPerSec(void)
+{
+ return pr_ticks_per_second;
+}
+
+PRIntervalTime _MD_IrixGetInterval(void)
+{
+ if (mmem_fd != -1)
+ {
+ if (64 == clock_width)
+ {
+ PRUint64 temp = *(PRUint64*)iotimer_addr;
+ pr_ticks = (PRIntervalTime)(temp >> pr_clock_shift);
+ }
+ else
+ {
+ PRIntervalTime ticks = pr_ticks;
+ PRUint32 now = *(PRUint32*)iotimer_addr, temp;
+ PRUint32 residual = pr_residual, previous = pr_previous;
+
+ temp = now - previous + residual;
+ residual = temp & pr_clock_mask;
+ ticks += temp >> pr_clock_shift;
+
+ pr_previous = now;
+ pr_residual = residual;
+ pr_ticks = ticks;
+ }
+ }
+ else
+ {
+ /*
+ * No fast access. Use the time of day clock. This isn't the
+ * right answer since this clock can get set back, tick at odd
+ * rates, and it's expensive to acqurie.
+ */
+ pr_ticks = _PR_UNIX_GetInterval();
+ }
+ return pr_ticks;
+} /* _MD_IrixGetInterval */
+
diff --git a/nsprpub/pr/src/md/unix/linux.c b/nsprpub/pr/src/md/unix/linux.c
new file mode 100644
index 000000000..1b485a024
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/linux.c
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifdef _PR_PTHREADS
+
+extern void _MD_unix_terminate_waitpid_daemon(void);
+
+void _MD_CleanupBeforeExit(void)
+{
+ _MD_unix_terminate_waitpid_daemon();
+}
+
+#else /* ! _PR_PTHREADS */
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ /*
+ * set the pointers to the stack-pointer and frame-pointer words in the
+ * context structure; this is for debugging use.
+ */
+ thread->md.sp = _MD_GET_SP_PTR(thread);
+ thread->md.fp = _MD_GET_FP_PTR(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Linux */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Linux.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Linux.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/netbsd.c b/nsprpub/pr/src/md/unix/netbsd.c
new file mode 100644
index 000000000..aa3618a31
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/netbsd.c
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <signal.h>
+#include <poll.h>
+#include <sys/syscall.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) sigsetjmp(CONTEXT(t), 1);
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for NetBSD */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for NetBSD.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for NetBSD.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/nto.c b/nsprpub/pr/src/md/unix/nto.c
new file mode 100644
index 000000000..e41d665ca
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/nto.c
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <setjmp.h>
+
+/* Fake this out */
+int socketpair (int foo, int foo2, int foo3, int sv[2])
+{
+ printf("error in socketpair\n");
+ exit (-1);
+}
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
diff --git a/nsprpub/pr/src/md/unix/objs.mk b/nsprpub/pr/src/md/unix/objs.mk
new file mode 100644
index 000000000..77eaa6d13
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/objs.mk
@@ -0,0 +1,31 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This makefile appends to the variable OBJS the platform-dependent
+# object modules that will be part of the nspr20 library.
+
+CSRCS = \
+ unix.c \
+ unix_errors.c \
+ uxproces.c \
+ uxrng.c \
+ uxshm.c \
+ uxwrap.c \
+ $(NULL)
+
+ifneq ($(USE_PTHREADS),1)
+CSRCS += uxpoll.c
+endif
+
+ifeq ($(PTHREADS_USER),1)
+CSRCS += pthreads_user.c
+endif
+
+CSRCS += $(PR_MD_CSRCS)
+ASFILES += $(PR_MD_ASFILES)
+
+OBJS += $(addprefix md/unix/$(OBJDIR)/,$(CSRCS:.c=.$(OBJ_SUFFIX))) \
+ $(addprefix md/unix/$(OBJDIR)/,$(ASFILES:.s=.$(OBJ_SUFFIX)))
+
diff --git a/nsprpub/pr/src/md/unix/openbsd.c b/nsprpub/pr/src/md/unix/openbsd.c
new file mode 100644
index 000000000..b7f0a2995
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/openbsd.c
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <signal.h>
+#include <poll.h>
+#include <sys/syscall.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) sigsetjmp(CONTEXT(t), 1);
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for OpenBSD */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for OpenBSD.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for OpenBSD.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/os_AIX.s b/nsprpub/pr/src/md/unix/os_AIX.s
new file mode 100644
index 000000000..0227cc47f
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_AIX.s
@@ -0,0 +1,91 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+.set r0,0; .set SP,1; .set RTOC,2; .set r3,3; .set r4,4
+.set r5,5; .set r6,6; .set r7,7; .set r8,8; .set r9,9
+.set r10,10; .set r11,11; .set r12,12; .set r13,13; .set r14,14
+.set r15,15; .set r16,16; .set r17,17; .set r18,18; .set r19,19
+.set r20,20; .set r21,21; .set r22,22; .set r23,23; .set r24,24
+.set r25,25; .set r26,26; .set r27,27; .set r28,28; .set r29,29
+.set r30,30; .set r31,31
+
+
+ .rename H.10.NO_SYMBOL{PR},""
+ .rename H.18.longjmp{TC},"longjmp"
+
+ .lglobl H.10.NO_SYMBOL{PR}
+ .globl .longjmp
+ .globl longjmp{DS}
+ .extern .sigcleanup
+ .extern .jmprestfpr
+
+# .text section
+
+ .csect H.10.NO_SYMBOL{PR}
+.longjmp:
+ mr r13,r3
+ mr r14,r4
+ stu SP,-56(SP)
+ bl .sigcleanup
+ l RTOC,0x14(SP)
+ cal SP,0x38(SP)
+ mr r3,r13
+ mr r4,r14
+ l r5,0x8(r3)
+ l SP,0xc(r3)
+ l r7,0xf8(r3)
+ st r7,0x0(SP)
+ l RTOC,0x10(r3)
+ bl .jmprestfpr
+# 1 == cr0 in disassembly
+ cmpi 1,r4,0x0
+ mtlr r5
+ lm r13,0x14(r3)
+ l r5,0x60(r3)
+ mtcrf 0x38,r5
+ mr r3,r4
+ bne __L1
+ lil r3,0x1
+__L1:
+ br
+
+# traceback table
+ .long 0x00000000
+ .byte 0x00 # VERSION=0
+ .byte 0x00 # LANG=TB_C
+ .byte 0x20 # IS_GL=0,IS_EPROL=0,HAS_TBOFF=1
+ # INT_PROC=0,HAS_CTL=0,TOCLESS=0
+ # FP_PRESENT=0,LOG_ABORT=0
+ .byte 0x40 # INT_HNDL=0,NAME_PRESENT=1
+ # USES_ALLOCA=0,CL_DIS_INV=WALK_ONCOND
+ # SAVES_CR=0,SAVES_LR=0
+ .byte 0x80 # STORES_BC=1,FPR_SAVED=0
+ .byte 0x00 # GPR_SAVED=0
+ .byte 0x02 # FIXEDPARMS=2
+ .byte 0x01 # FLOATPARMS=0,PARMSONSTK=1
+ .long 0x00000000 #
+ .long 0x00000014 # TB_OFFSET
+ .short 7 # NAME_LEN
+ .byte "longjmp"
+ .byte 0 # padding
+ .byte 0 # padding
+ .byte 0 # padding
+# End of traceback table
+ .long 0x00000000 # "\0\0\0\0"
+
+# .data section
+
+ .toc # 0x00000038
+T.18.longjmp:
+ .tc H.18.longjmp{TC},longjmp{DS}
+
+ .csect longjmp{DS}
+ .long .longjmp # "\0\0\0\0"
+ .long TOC{TC0} # "\0\0\0008"
+ .long 0x00000000 # "\0\0\0\0"
+# End csect longjmp{DS}
+
+# .bss section
diff --git a/nsprpub/pr/src/md/unix/os_BSD_386_2.s b/nsprpub/pr/src/md/unix/os_BSD_386_2.s
new file mode 100644
index 000000000..fa36599e7
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_BSD_386_2.s
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * os_BSD_386_2.s
+ * We need to define our own setjmp/longjmp on BSDI 2.x because libc's
+ * implementation does some sanity checking that defeats user level threads.
+ * This should no longer be necessary in BSDI 3.0.
+ */
+
+.globl _setjmp
+.align 2
+_setjmp:
+ movl 4(%esp),%eax
+ movl 0(%esp),%edx
+ movl %edx, 0(%eax) /* rta */
+ movl %ebx, 4(%eax)
+ movl %esp, 8(%eax)
+ movl %ebp,12(%eax)
+ movl %esi,16(%eax)
+ movl %edi,20(%eax)
+ movl $0,%eax
+ ret
+
+.globl _longjmp
+.align 2
+_longjmp:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ movl 0(%edx),%ecx
+ movl 4(%edx),%ebx
+ movl 8(%edx),%esp
+ movl 12(%edx),%ebp
+ movl 16(%edx),%esi
+ movl 20(%edx),%edi
+ cmpl $0,%eax
+ jne 1f
+ movl $1,%eax
+1: movl %ecx,0(%esp)
+ ret
diff --git a/nsprpub/pr/src/md/unix/os_Darwin.s b/nsprpub/pr/src/md/unix/os_Darwin.s
new file mode 100644
index 000000000..26a0b9226
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Darwin.s
@@ -0,0 +1,13 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifdef __i386__
+#include "os_Darwin_x86.s"
+#elif defined(__x86_64__)
+#include "os_Darwin_x86_64.s"
+#elif defined(__ppc__)
+#include "os_Darwin_ppc.s"
+#endif
diff --git a/nsprpub/pr/src/md/unix/os_Darwin_ppc.s b/nsprpub/pr/src/md/unix/os_Darwin_ppc.s
new file mode 100644
index 000000000..8479dec01
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Darwin_ppc.s
@@ -0,0 +1,68 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#
+# Based on the programming examples in The PowerPC Architecture:
+# A Specification for A New Family of RISC Processors, 2nd Ed.,
+# Book I, Section E.1, "Synchronization," pp. 249-256, May 1994.
+#
+
+.text
+
+#
+# PRInt32 __PR_DarwinPPC_AtomicIncrement(PRInt32 *val);
+#
+ .align 2
+ .globl __PR_DarwinPPC_AtomicIncrement
+ .private_extern __PR_DarwinPPC_AtomicIncrement
+__PR_DarwinPPC_AtomicIncrement:
+ lwarx r4,0,r3
+ addi r0,r4,1
+ stwcx. r0,0,r3
+ bne- __PR_DarwinPPC_AtomicIncrement
+ mr r3,r0
+ blr
+
+#
+# PRInt32 __PR_DarwinPPC_AtomicDecrement(PRInt32 *val);
+#
+ .align 2
+ .globl __PR_DarwinPPC_AtomicDecrement
+ .private_extern __PR_DarwinPPC_AtomicDecrement
+__PR_DarwinPPC_AtomicDecrement:
+ lwarx r4,0,r3
+ addi r0,r4,-1
+ stwcx. r0,0,r3
+ bne- __PR_DarwinPPC_AtomicDecrement
+ mr r3,r0
+ blr
+
+#
+# PRInt32 __PR_DarwinPPC_AtomicSet(PRInt32 *val, PRInt32 newval);
+#
+ .align 2
+ .globl __PR_DarwinPPC_AtomicSet
+ .private_extern __PR_DarwinPPC_AtomicSet
+__PR_DarwinPPC_AtomicSet:
+ lwarx r5,0,r3
+ stwcx. r4,0,r3
+ bne- __PR_DarwinPPC_AtomicSet
+ mr r3,r5
+ blr
+
+#
+# PRInt32 __PR_DarwinPPC_AtomicAdd(PRInt32 *ptr, PRInt32 val);
+#
+ .align 2
+ .globl __PR_DarwinPPC_AtomicAdd
+ .private_extern __PR_DarwinPPC_AtomicAdd
+__PR_DarwinPPC_AtomicAdd:
+ lwarx r5,0,r3
+ add r0,r4,r5
+ stwcx. r0,0,r3
+ bne- __PR_DarwinPPC_AtomicAdd
+ mr r3,r0
+ blr
diff --git a/nsprpub/pr/src/md/unix/os_Darwin_x86.s b/nsprpub/pr/src/md/unix/os_Darwin_x86.s
new file mode 100644
index 000000000..fc6379f95
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Darwin_x86.s
@@ -0,0 +1,80 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#
+# Based on os_Linux_x86.s
+#
+
+#
+# PRInt32 __PR_Darwin_x86_AtomicIncrement(PRInt32 *val);
+#
+# Atomically increment the integer pointed to by 'val' and return
+# the result of the increment.
+#
+ .text
+ .globl __PR_Darwin_x86_AtomicIncrement
+ .private_extern __PR_Darwin_x86_AtomicIncrement
+ .align 4
+__PR_Darwin_x86_AtomicIncrement:
+ movl 4(%esp), %ecx
+ movl $1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ incl %eax
+ ret
+
+#
+# PRInt32 __PR_Darwin_x86_AtomicDecrement(PRInt32 *val);
+#
+# Atomically decrement the integer pointed to by 'val' and return
+# the result of the decrement.
+#
+ .text
+ .globl __PR_Darwin_x86_AtomicDecrement
+ .private_extern __PR_Darwin_x86_AtomicDecrement
+ .align 4
+__PR_Darwin_x86_AtomicDecrement:
+ movl 4(%esp), %ecx
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ decl %eax
+ ret
+
+#
+# PRInt32 __PR_Darwin_x86_AtomicSet(PRInt32 *val, PRInt32 newval);
+#
+# Atomically set the integer pointed to by 'val' to the new
+# value 'newval' and return the old value.
+#
+ .text
+ .globl __PR_Darwin_x86_AtomicSet
+ .private_extern __PR_Darwin_x86_AtomicSet
+ .align 4
+__PR_Darwin_x86_AtomicSet:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ xchgl %eax, (%ecx)
+ ret
+
+#
+# PRInt32 __PR_Darwin_x86_AtomicAdd(PRInt32 *ptr, PRInt32 val);
+#
+# Atomically add 'val' to the integer pointed to by 'ptr'
+# and return the result of the addition.
+#
+ .text
+ .globl __PR_Darwin_x86_AtomicAdd
+ .private_extern __PR_Darwin_x86_AtomicAdd
+ .align 4
+__PR_Darwin_x86_AtomicAdd:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ movl %eax, %edx
+ lock
+ xaddl %eax, (%ecx)
+ addl %edx, %eax
+ ret
diff --git a/nsprpub/pr/src/md/unix/os_Darwin_x86_64.s b/nsprpub/pr/src/md/unix/os_Darwin_x86_64.s
new file mode 100644
index 000000000..449aaa5db
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Darwin_x86_64.s
@@ -0,0 +1,67 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# PRInt32 __PR_Darwin_x86_64_AtomicIncrement(PRInt32 *val)
+#
+# Atomically increment the integer pointed to by 'val' and return
+# the result of the increment.
+#
+ .text
+ .globl __PR_Darwin_x86_64_AtomicIncrement
+ .private_extern __PR_Darwin_x86_64_AtomicIncrement
+ .align 4
+__PR_Darwin_x86_64_AtomicIncrement:
+ movl $1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ incl %eax
+ ret
+
+# PRInt32 __PR_Darwin_x86_64_AtomicDecrement(PRInt32 *val)
+#
+# Atomically decrement the integer pointed to by 'val' and return
+# the result of the decrement.
+#
+ .text
+ .globl __PR_Darwin_x86_64_AtomicDecrement
+ .private_extern __PR_Darwin_x86_64_AtomicDecrement
+ .align 4
+__PR_Darwin_x86_64_AtomicDecrement:
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ decl %eax
+ ret
+
+# PRInt32 __PR_Darwin_x86_64_AtomicSet(PRInt32 *val, PRInt32 newval)
+#
+# Atomically set the integer pointed to by 'val' to the new
+# value 'newval' and return the old value.
+#
+ .text
+ .globl __PR_Darwin_x86_64_AtomicSet
+ .private_extern __PR_Darwin_x86_64_AtomicSet
+ .align 4
+__PR_Darwin_x86_64_AtomicSet:
+ movl %esi, %eax
+ xchgl %eax, (%rdi)
+ ret
+
+# PRInt32 __PR_Darwin_x86_64_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+#
+# Atomically add 'val' to the integer pointed to by 'ptr'
+# and return the result of the addition.
+#
+ .text
+ .globl __PR_Darwin_x86_64_AtomicAdd
+ .private_extern __PR_Darwin_x86_64_AtomicAdd
+ .align 4
+__PR_Darwin_x86_64_AtomicAdd:
+ movl %esi, %eax
+ lock
+ xaddl %eax, (%rdi)
+ addl %esi, %eax
+ ret
diff --git a/nsprpub/pr/src/md/unix/os_HPUX.s b/nsprpub/pr/src/md/unix/os_HPUX.s
new file mode 100644
index 000000000..f8ecabf9b
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_HPUX.s
@@ -0,0 +1,25 @@
+;
+; This Source Code Form is subject to the terms of the Mozilla Public
+; License, v. 2.0. If a copy of the MPL was not distributed with this
+; file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifdef __LP64__
+ .LEVEL 2.0W
+#else
+ .LEVEL 1.1
+#endif
+
+ .CODE ; equivalent to the following two lines
+; .SPACE $TEXT$,SORT=8
+; .SUBSPA $CODE$,QUAD=0,ALIGN=4,ACCESS=0x2c,CODE_ONLY,SORT=24
+
+ret_cr16
+ .PROC
+ .CALLINFO FRAME=0, NO_CALLS
+ .EXPORT ret_cr16,ENTRY
+ .ENTRY
+ BV %r0(%rp)
+ .EXIT
+ MFCTL %cr16,%ret0
+ .PROCEND
+ .END
diff --git a/nsprpub/pr/src/md/unix/os_HPUX_ia64.s b/nsprpub/pr/src/md/unix/os_HPUX_ia64.s
new file mode 100644
index 000000000..302ae7687
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_HPUX_ia64.s
@@ -0,0 +1,80 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+.text
+
+// PRInt32 _PR_ia64_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .align 16
+ .global _PR_ia64_AtomicIncrement#
+ .proc _PR_ia64_AtomicIncrement#
+_PR_ia64_AtomicIncrement:
+#ifndef _LP64
+ addp4 r32 = 0, r32 ;;
+#endif
+ fetchadd4.acq r8 = [r32], 1 ;;
+ adds r8 = 1, r8
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicIncrement#
+
+// PRInt32 _PR_ia64_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .align 16
+ .global _PR_ia64_AtomicDecrement#
+ .proc _PR_ia64_AtomicDecrement#
+_PR_ia64_AtomicDecrement:
+#ifndef _LP64
+ addp4 r32 = 0, r32 ;;
+#endif
+ fetchadd4.rel r8 = [r32], -1 ;;
+ adds r8 = -1, r8
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicDecrement#
+
+// PRInt32 _PR_ia64_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .align 16
+ .global _PR_ia64_AtomicAdd#
+ .proc _PR_ia64_AtomicAdd#
+_PR_ia64_AtomicAdd:
+#ifndef _LP64
+ addp4 r32 = 0, r32 ;;
+#endif
+ ld4 r15 = [r32] ;;
+.L3:
+ mov r14 = r15
+ mov ar.ccv = r15
+ add r8 = r15, r33 ;;
+ cmpxchg4.acq r15 = [r32], r8, ar.ccv ;;
+ cmp4.ne p6, p7 = r15, r14
+ (p6) br.cond.dptk .L3
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicAdd#
+
+// PRInt32 _PR_ia64_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+ .align 16
+ .global _PR_ia64_AtomicSet#
+ .proc _PR_ia64_AtomicSet#
+_PR_ia64_AtomicSet:
+#ifndef _LP64
+ addp4 r32 = 0, r32 ;;
+#endif
+ xchg4 r8 = [r32], r33
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicSet#
diff --git a/nsprpub/pr/src/md/unix/os_Irix.s b/nsprpub/pr/src/md/unix/os_Irix.s
new file mode 100644
index 000000000..ab1cb0b30
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Irix.s
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Atomically add a new element to the top of the stack
+ *
+ * usage : PR_StackPush(listp, elementp);
+ * -----------------------
+ */
+
+#include "md/_irix.h"
+#ifdef _PR_HAVE_ATOMIC_CAS
+
+#include <sys/asm.h>
+#include <sys/regdef.h>
+
+LEAF(PR_StackPush)
+
+retry_push:
+.set noreorder
+ lw v0,0(a0)
+ li t1,1
+ beq v0,t1,retry_push
+ move t0,a1
+
+ ll v0,0(a0)
+ beq v0,t1,retry_push
+ nop
+ sc t1,0(a0)
+ beq t1,0,retry_push
+ nop
+ sw v0,0(a1)
+ sync
+ sw t0,0(a0)
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ jr ra
+ nop
+
+END(PR_StackPush)
+
+/*
+ *
+ * Atomically remove the element at the top of the stack
+ *
+ * usage : elemep = PR_StackPop(listp);
+ *
+ */
+
+LEAF(PR_StackPop)
+retry_pop:
+.set noreorder
+
+
+ lw v0,0(a0)
+ li t1,1
+ beq v0,0,done
+ nop
+ beq v0,t1,retry_pop
+ nop
+
+ ll v0,0(a0)
+ beq v0,0,done
+ nop
+ beq v0,t1,retry_pop
+ nop
+ sc t1,0(a0)
+ beq t1,0,retry_pop
+ nop
+ lw t0,0(v0)
+ sw t0,0(a0)
+done:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ jr ra
+ nop
+
+END(PR_StackPop)
+
+#endif /* _PR_HAVE_ATOMIC_CAS */
diff --git a/nsprpub/pr/src/md/unix/os_Linux_ia64.s b/nsprpub/pr/src/md/unix/os_Linux_ia64.s
new file mode 100644
index 000000000..fef24ad10
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Linux_ia64.s
@@ -0,0 +1,71 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+.text
+
+// PRInt32 _PR_ia64_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .align 16
+ .global _PR_ia64_AtomicIncrement#
+ .proc _PR_ia64_AtomicIncrement#
+_PR_ia64_AtomicIncrement:
+ fetchadd4.acq r8 = [r32], 1 ;;
+ adds r8 = 1, r8
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicIncrement#
+
+// PRInt32 _PR_ia64_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .align 16
+ .global _PR_ia64_AtomicDecrement#
+ .proc _PR_ia64_AtomicDecrement#
+_PR_ia64_AtomicDecrement:
+ fetchadd4.rel r8 = [r32], -1 ;;
+ adds r8 = -1, r8
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicDecrement#
+
+// PRInt32 _PR_ia64_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .align 16
+ .global _PR_ia64_AtomicAdd#
+ .proc _PR_ia64_AtomicAdd#
+_PR_ia64_AtomicAdd:
+ ld4 r15 = [r32] ;;
+.L3:
+ mov r14 = r15
+ mov ar.ccv = r15
+ add r8 = r15, r33 ;;
+ cmpxchg4.acq r15 = [r32], r8, ar.ccv ;;
+ cmp4.ne p6, p7 = r15, r14
+ (p6) br.cond.dptk .L3
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicAdd#
+
+// PRInt32 _PR_ia64_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+ .align 16
+ .global _PR_ia64_AtomicSet#
+ .proc _PR_ia64_AtomicSet#
+_PR_ia64_AtomicSet:
+ xchg4 r8 = [r32], r33
+ br.ret.sptk.many b0
+ .endp _PR_ia64_AtomicSet#
+
+// Magic indicating no need for an executable stack
+.section .note.GNU-stack, "", @progbits ; .previous
diff --git a/nsprpub/pr/src/md/unix/os_Linux_ppc.s b/nsprpub/pr/src/md/unix/os_Linux_ppc.s
new file mode 100644
index 000000000..a9a83174a
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Linux_ppc.s
@@ -0,0 +1,75 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#
+# Based on the programming examples in The PowerPC Architecture:
+# A Specification for A New Family of RISC Processors, 2nd Ed.,
+# Book I, Section E.1, "Synchronization," pp. 249-256, May 1994.
+#
+
+ .section ".text"
+
+#
+# PRInt32 _PR_ppc_AtomicIncrement(PRInt32 *val);
+#
+ .align 2
+ .globl _PR_ppc_AtomicIncrement
+ .type _PR_ppc_AtomicIncrement,@function
+_PR_ppc_AtomicIncrement:
+.Lfd1: lwarx 4,0,3
+ addi 0,4,1
+ stwcx. 0,0,3
+ bne- .Lfd1
+ mr 3,0
+ blr
+.Lfe1: .size _PR_ppc_AtomicIncrement,.Lfe1-_PR_ppc_AtomicIncrement
+
+#
+# PRInt32 _PR_ppc_AtomicDecrement(PRInt32 *val);
+#
+ .align 2
+ .globl _PR_ppc_AtomicDecrement
+ .type _PR_ppc_AtomicDecrement,@function
+_PR_ppc_AtomicDecrement:
+.Lfd2: lwarx 4,0,3
+ addi 0,4,-1
+ stwcx. 0,0,3
+ bne- .Lfd2
+ mr 3,0
+ blr
+.Lfe2: .size _PR_ppc_AtomicDecrement,.Lfe2-_PR_ppc_AtomicDecrement
+
+#
+# PRInt32 _PR_ppc_AtomicSet(PRInt32 *val, PRInt32 newval);
+#
+ .align 2
+ .globl _PR_ppc_AtomicSet
+ .type _PR_ppc_AtomicSet,@function
+_PR_ppc_AtomicSet:
+.Lfd3: lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- .Lfd3
+ mr 3,5
+ blr
+.Lfe3: .size _PR_ppc_AtomicSet,.Lfe3-_PR_ppc_AtomicSet
+
+#
+# PRInt32 _PR_ppc_AtomicAdd(PRInt32 *ptr, PRInt32 val);
+#
+ .align 2
+ .globl _PR_ppc_AtomicAdd
+ .type _PR_ppc_AtomicAdd,@function
+_PR_ppc_AtomicAdd:
+.Lfd4: lwarx 5,0,3
+ add 0,4,5
+ stwcx. 0,0,3
+ bne- .Lfd4
+ mr 3,0
+ blr
+.Lfe4: .size _PR_ppc_AtomicAdd,.Lfe4-_PR_ppc_AtomicAdd
+
+# Magic indicating no need for an executable stack
+.section .note.GNU-stack, "", @progbits ; .previous
diff --git a/nsprpub/pr/src/md/unix/os_Linux_x86.s b/nsprpub/pr/src/md/unix/os_Linux_x86.s
new file mode 100644
index 000000000..a72acf5d7
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Linux_x86.s
@@ -0,0 +1,85 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// PRInt32 _PR_x86_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .text
+ .globl _PR_x86_AtomicIncrement
+ .align 4
+_PR_x86_AtomicIncrement:
+ movl 4(%esp), %ecx
+ movl $1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ incl %eax
+ ret
+
+// PRInt32 _PR_x86_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .text
+ .globl _PR_x86_AtomicDecrement
+ .align 4
+_PR_x86_AtomicDecrement:
+ movl 4(%esp), %ecx
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ decl %eax
+ ret
+
+// PRInt32 _PR_x86_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+// An alternative implementation:
+// .text
+// .globl _PR_x86_AtomicSet
+// .align 4
+//_PR_x86_AtomicSet:
+// movl 4(%esp), %ecx
+// movl 8(%esp), %edx
+// movl (%ecx), %eax
+//retry:
+// lock
+// cmpxchgl %edx, (%ecx)
+// jne retry
+// ret
+//
+ .text
+ .globl _PR_x86_AtomicSet
+ .align 4
+_PR_x86_AtomicSet:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ xchgl %eax, (%ecx)
+ ret
+
+// PRInt32 _PR_x86_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .text
+ .globl _PR_x86_AtomicAdd
+ .align 4
+_PR_x86_AtomicAdd:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ movl %eax, %edx
+ lock
+ xaddl %eax, (%ecx)
+ addl %edx, %eax
+ ret
+
+// Magic indicating no need for an executable stack
+.section .note.GNU-stack, "", @progbits ; .previous
diff --git a/nsprpub/pr/src/md/unix/os_Linux_x86_64.s b/nsprpub/pr/src/md/unix/os_Linux_x86_64.s
new file mode 100644
index 000000000..8e491f0e6
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_Linux_x86_64.s
@@ -0,0 +1,74 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// PRInt32 _PR_x86_64_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .text
+ .globl _PR_x86_64_AtomicIncrement
+ .type _PR_x86_64_AtomicIncrement, @function
+ .align 4
+_PR_x86_64_AtomicIncrement:
+ movl $1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ incl %eax
+ ret
+ .size _PR_x86_64_AtomicIncrement, .-_PR_x86_64_AtomicIncrement
+
+// PRInt32 _PR_x86_64_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .text
+ .globl _PR_x86_64_AtomicDecrement
+ .type _PR_x86_64_AtomicDecrement, @function
+ .align 4
+_PR_x86_64_AtomicDecrement:
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ decl %eax
+ ret
+ .size _PR_x86_64_AtomicDecrement, .-_PR_x86_64_AtomicDecrement
+
+// PRInt32 _PR_x86_64_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+ .text
+ .globl _PR_x86_64_AtomicSet
+ .type _PR_x86_64_AtomicSet, @function
+ .align 4
+_PR_x86_64_AtomicSet:
+ movl %esi, %eax
+ xchgl %eax, (%rdi)
+ ret
+ .size _PR_x86_64_AtomicSet, .-_PR_x86_64_AtomicSet
+
+// PRInt32 _PR_x86_64_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .text
+ .globl _PR_x86_64_AtomicAdd
+ .type _PR_x86_64_AtomicAdd, @function
+ .align 4
+_PR_x86_64_AtomicAdd:
+ movl %esi, %eax
+ lock
+ xaddl %eax, (%rdi)
+ addl %esi, %eax
+ ret
+ .size _PR_x86_64_AtomicAdd, .-_PR_x86_64_AtomicAdd
+
+// Magic indicating no need for an executable stack
+.section .note.GNU-stack, "", @progbits ; .previous
diff --git a/nsprpub/pr/src/md/unix/os_SunOS_sparcv9.s b/nsprpub/pr/src/md/unix/os_SunOS_sparcv9.s
new file mode 100644
index 000000000..8bc75af88
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_SunOS_sparcv9.s
@@ -0,0 +1,173 @@
+! -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+!
+! This Source Code Form is subject to the terms of the Mozilla Public
+! License, v. 2.0. If a copy of the MPL was not distributed with this
+! file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+!
+! atomic increment, decrement and swap routines for V8+ sparc (ultrasparc)
+! using CAS (compare-and-swap) atomic instructions
+!
+! this MUST be compiled with an ultrasparc-aware assembler
+!
+! standard asm linkage macros; this module must be compiled
+! with the -P option (use C preprocessor)
+
+#include <sys/asm_linkage.h>
+
+! ======================================================================
+!
+! Perform the sequence a = a + 1 atomically with respect to other
+! fetch-and-adds to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicIncrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(_MD_AtomicIncrement) ! standard assembler/ELF prologue
+
+retryAI:
+ ld [%o0], %o2 ! set o2 to the current value
+ add %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAI ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(_MD_AtomicIncrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = a - 1 atomically with respect to other
+! fetch-and-decs to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicDecrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(_MD_AtomicDecrement) ! standard assembler/ELF prologue
+
+retryAD:
+ ld [%o0], %o2 ! set o2 to the current value
+ sub %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAD ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(_MD_AtomicDecrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = b atomically with respect to other
+! fetch-and-stores to location a in a wait-free fashion.
+!
+! usage : old_val = PR_AtomicSet(address, newval)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [input] - the new value to set for [%o0]
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(_MD_AtomicSet) ! standard assembler/ELF prologue
+
+retryAS:
+ ld [%o0], %o2 ! set o2 to the current value
+ mov %o1, %o3 ! set up the new value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAS ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o3, %o0 ! set the return code to the prev value
+
+ SET_SIZE(_MD_AtomicSet) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = a + b atomically with respect to other
+! fetch-and-adds to location a in a wait-free fashion.
+!
+! usage : newval = PR_AtomicAdd(address, val)
+! return: the value after addition
+!
+ ENTRY(_MD_AtomicAdd) ! standard assembler/ELF prologue
+
+retryAA:
+ ld [%o0], %o2 ! set o2 to the current value
+ add %o2, %o1, %o3 ! calc the new value
+ mov %o3, %o4 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAA ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o4, %o0 ! set the return code to the new value
+
+ SET_SIZE(_MD_AtomicAdd) ! standard assembler/ELF epilogue
+
+!
+! end
+!
diff --git a/nsprpub/pr/src/md/unix/os_SunOS_ultrasparc.s b/nsprpub/pr/src/md/unix/os_SunOS_ultrasparc.s
new file mode 100644
index 000000000..32bf7884a
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_SunOS_ultrasparc.s
@@ -0,0 +1,173 @@
+! -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+!
+! This Source Code Form is subject to the terms of the Mozilla Public
+! License, v. 2.0. If a copy of the MPL was not distributed with this
+! file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+!
+! atomic increment, decrement and swap routines for V8+ sparc (ultrasparc)
+! using CAS (compare-and-swap) atomic instructions
+!
+! this MUST be compiled with an ultrasparc-aware assembler
+!
+! standard asm linkage macros; this module must be compiled
+! with the -P option (use C preprocessor)
+
+#include <sys/asm_linkage.h>
+
+! ======================================================================
+!
+! Perform the sequence a = a + 1 atomically with respect to other
+! fetch-and-adds to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicIncrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicIncrement) ! standard assembler/ELF prologue
+
+retryAI:
+ ld [%o0], %o2 ! set o2 to the current value
+ add %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAI ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(PR_AtomicIncrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = a - 1 atomically with respect to other
+! fetch-and-decs to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicDecrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicDecrement) ! standard assembler/ELF prologue
+
+retryAD:
+ ld [%o0], %o2 ! set o2 to the current value
+ sub %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAD ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(PR_AtomicDecrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = b atomically with respect to other
+! fetch-and-stores to location a in a wait-free fashion.
+!
+! usage : old_val = PR_AtomicSet(address, newval)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [input] - the new value to set for [%o0]
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicSet) ! standard assembler/ELF prologue
+
+retryAS:
+ ld [%o0], %o2 ! set o2 to the current value
+ mov %o1, %o3 ! set up the new value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAS ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o3, %o0 ! set the return code to the prev value
+
+ SET_SIZE(PR_AtomicSet) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = a + b atomically with respect to other
+! fetch-and-adds to location a in a wait-free fashion.
+!
+! usage : newval = PR_AtomicAdd(address, val)
+! return: the value after addition
+!
+ ENTRY(PR_AtomicAdd) ! standard assembler/ELF prologue
+
+retryAA:
+ ld [%o0], %o2 ! set o2 to the current value
+ add %o2, %o1, %o3 ! calc the new value
+ mov %o3, %o4 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAA ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o4, %o0 ! set the return code to the new value
+
+ SET_SIZE(PR_AtomicAdd) ! standard assembler/ELF epilogue
+
+!
+! end
+!
diff --git a/nsprpub/pr/src/md/unix/os_SunOS_x86.s b/nsprpub/pr/src/md/unix/os_SunOS_x86.s
new file mode 100644
index 000000000..dcd7535bf
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_SunOS_x86.s
@@ -0,0 +1,126 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+ .text
+
+ .globl getedi
+getedi:
+ movl %edi,%eax
+ ret
+ .type getedi,@function
+ .size getedi,.-getedi
+
+ .globl setedi
+setedi:
+ movl 4(%esp),%edi
+ ret
+ .type setedi,@function
+ .size setedi,.-setedi
+
+ .globl __MD_FlushRegisterWindows
+ .globl _MD_FlushRegisterWindows
+
+__MD_FlushRegisterWindows:
+_MD_FlushRegisterWindows:
+
+ ret
+
+//
+// sol_getsp()
+//
+// Return the current sp (for debugging)
+//
+ .globl sol_getsp
+sol_getsp:
+ movl %esp, %eax
+ ret
+
+//
+// sol_curthread()
+//
+// Return a unique identifier for the currently active thread.
+//
+ .globl sol_curthread
+sol_curthread:
+ movl %ecx, %eax
+ ret
+
+// PRInt32 _MD_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .text
+ .globl _MD_AtomicIncrement
+ .align 4
+_MD_AtomicIncrement:
+ movl 4(%esp), %ecx
+ movl $1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ incl %eax
+ ret
+
+// PRInt32 _MD_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .text
+ .globl _MD_AtomicDecrement
+ .align 4
+_MD_AtomicDecrement:
+ movl 4(%esp), %ecx
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%ecx)
+ decl %eax
+ ret
+
+// PRInt32 _MD_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+// An alternative implementation:
+// .text
+// .globl _MD_AtomicSet
+// .align 4
+//_MD_AtomicSet:
+// movl 4(%esp), %ecx
+// movl 8(%esp), %edx
+// movl (%ecx), %eax
+//retry:
+// lock
+// cmpxchgl %edx, (%ecx)
+// jne retry
+// ret
+//
+ .text
+ .globl _MD_AtomicSet
+ .align 4
+_MD_AtomicSet:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ xchgl %eax, (%ecx)
+ ret
+
+// PRInt32 _MD_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .text
+ .globl _MD_AtomicAdd
+ .align 4
+_MD_AtomicAdd:
+ movl 4(%esp), %ecx
+ movl 8(%esp), %eax
+ movl %eax, %edx
+ lock
+ xaddl %eax, (%ecx)
+ addl %edx, %eax
+ ret
diff --git a/nsprpub/pr/src/md/unix/os_SunOS_x86_64.s b/nsprpub/pr/src/md/unix/os_SunOS_x86_64.s
new file mode 100644
index 000000000..5e3df049f
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/os_SunOS_x86_64.s
@@ -0,0 +1,63 @@
+// -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// PRInt32 _MD_AtomicIncrement(PRInt32 *val)
+//
+// Atomically increment the integer pointed to by 'val' and return
+// the result of the increment.
+//
+ .text
+ .globl _MD_AtomicIncrement
+ .align 4
+_MD_AtomicIncrement:
+ movl $1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ incl %eax
+ ret
+
+// PRInt32 _MD_AtomicDecrement(PRInt32 *val)
+//
+// Atomically decrement the integer pointed to by 'val' and return
+// the result of the decrement.
+//
+ .text
+ .globl _MD_AtomicDecrement
+ .align 4
+_MD_AtomicDecrement:
+ movl $-1, %eax
+ lock
+ xaddl %eax, (%rdi)
+ decl %eax
+ ret
+
+// PRInt32 _MD_AtomicSet(PRInt32 *val, PRInt32 newval)
+//
+// Atomically set the integer pointed to by 'val' to the new
+// value 'newval' and return the old value.
+//
+ .text
+ .globl _MD_AtomicSet
+ .align 4
+_MD_AtomicSet:
+ movl %esi, %eax
+ xchgl %eax, (%rdi)
+ ret
+
+// PRInt32 _MD_AtomicAdd(PRInt32 *ptr, PRInt32 val)
+//
+// Atomically add 'val' to the integer pointed to by 'ptr'
+// and return the result of the addition.
+//
+ .text
+ .globl _MD_AtomicAdd
+ .align 4
+_MD_AtomicAdd:
+ movl %esi, %eax
+ lock
+ xaddl %eax, (%rdi)
+ addl %esi, %eax
+ ret
diff --git a/nsprpub/pr/src/md/unix/osf1.c b/nsprpub/pr/src/md/unix/osf1.c
new file mode 100644
index 000000000..a9a1c67d0
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/osf1.c
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for OSF1 */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for OSF1.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for OSF1.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/pthreads_user.c b/nsprpub/pr/src/md/unix/pthreads_user.c
new file mode 100644
index 000000000..bb64b9f44
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/pthreads_user.c
@@ -0,0 +1,448 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+#include <pthread.h>
+
+
+sigset_t ints_off;
+pthread_mutex_t _pr_heapLock;
+pthread_key_t current_thread_key;
+pthread_key_t current_cpu_key;
+pthread_key_t last_thread_key;
+pthread_key_t intsoff_key;
+
+
+PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
+PRInt32 _pr_md_pthreads = 1;
+
+void _MD_EarlyInit(void)
+{
+extern PRInt32 _nspr_noclock;
+
+ if (pthread_key_create(&current_thread_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&current_cpu_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&last_thread_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&intsoff_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+
+ sigemptyset(&ints_off);
+ sigaddset(&ints_off, SIGALRM);
+ sigaddset(&ints_off, SIGIO);
+ sigaddset(&ints_off, SIGCLD);
+
+ /*
+ * disable clock interrupts
+ */
+ _nspr_noclock = 1;
+
+}
+
+void _MD_InitLocks()
+{
+ if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
+ perror("pthread_mutex_init failed");
+ exit(1);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
+{
+ PRIntn _is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ pthread_mutex_destroy(&lockp->mutex);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+}
+
+
+
+PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
+{
+ PRStatus rv;
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ rv = pthread_mutex_init(&lockp->mutex, NULL);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
+}
+
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+PR_IMPLEMENT(void)
+_MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
+{
+ /*
+ * XXX - to be implemented
+ */
+ return;
+}
+
+PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread)
+{
+ struct sigaction sigact;
+
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ thread->md.pthread = pthread_self();
+#if 0
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ * during PR_SuspendAll
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ /*
+ * Must mask clock interrupts
+ */
+ sigact.sa_mask = timer_set;
+ sigaction(SIGUSR1, &sigact, 0);
+#endif
+ }
+
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ _MD_CLEAN_THREAD(thread);
+ _MD_SET_CURRENT_THREAD(NULL);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ _PR_IS_GCABLE_THREAD(thread));
+#if 0
+ thread->md.suspending_id = getpid();
+ rv = kill(thread->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ _PR_IS_GCABLE_THREAD(thread));
+#if 0
+ rv = unblockproc(thread->md.id);
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread)
+{
+ PRInt32 rv;
+
+#if 0
+ cpu->md.suspending_id = getpid();
+ rv = kill(cpu->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread)
+{
+#if 0
+ unblockproc(cpu->md.id);
+#endif
+}
+
+
+#define PT_NANOPERMICRO 1000UL
+#define PT_BILLION 1000000000UL
+
+PR_IMPLEMENT(PRStatus)
+_pt_wait(PRThread *thread, PRIntervalTime timeout)
+{
+int rv;
+struct timeval now;
+struct timespec tmo;
+PRUint32 ticks = PR_TicksPerSecond();
+
+
+ if (timeout != PR_INTERVAL_NO_TIMEOUT) {
+ tmo.tv_sec = timeout / ticks;
+ tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
+ tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO *
+ tmo.tv_nsec);
+
+ /* pthreads wants this in absolute time, off we go ... */
+ (void)GETTIMEOFDAY(&now);
+ /* that one's usecs, this one's nsecs - grrrr! */
+ tmo.tv_sec += now.tv_sec;
+ tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
+ tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
+ tmo.tv_nsec %= PT_BILLION;
+ }
+
+ pthread_mutex_lock(&thread->md.pthread_mutex);
+ thread->md.wait--;
+ if (thread->md.wait < 0) {
+ if (timeout != PR_INTERVAL_NO_TIMEOUT) {
+ rv = pthread_cond_timedwait(&thread->md.pthread_cond,
+ &thread->md.pthread_mutex, &tmo);
+ }
+ else
+ rv = pthread_cond_wait(&thread->md.pthread_cond,
+ &thread->md.pthread_mutex);
+ if (rv != 0) {
+ thread->md.wait++;
+ }
+ } else
+ rv = 0;
+ pthread_mutex_unlock(&thread->md.pthread_mutex);
+
+ return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_wait(PRThread *thread, PRIntervalTime ticks)
+{
+ if ( thread->flags & _PR_GLOBAL_SCOPE ) {
+ _MD_CHECK_FOR_EXIT();
+ if (_pt_wait(thread, ticks) == PR_FAILURE) {
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * wait timed out
+ */
+ _PR_THREAD_LOCK(thread);
+ if (thread->wait.cvar) {
+ /*
+ * The thread will remove itself from the waitQ
+ * of the cvar in _PR_WaitCondVar
+ */
+ thread->wait.cvar = NULL;
+ thread->state = _PR_RUNNING;
+ _PR_THREAD_UNLOCK(thread);
+ } else {
+ _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
+ _PR_THREAD_UNLOCK(thread);
+ }
+ }
+ } else {
+ _PR_MD_SWITCH_CONTEXT(thread);
+ }
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WakeupWaiter(PRThread *thread)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 pid, rv;
+ PRIntn is;
+
+ PR_ASSERT(_pr_md_idle_cpus >= 0);
+ if (thread == NULL) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else if (!_PR_IS_NATIVE_THREAD(thread)) {
+ /*
+ * If the thread is on my cpu's runq there is no need to
+ * wakeup any cpus
+ */
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (me->cpu != thread->cpu) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ }
+ } else {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ }
+ } else {
+ PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+
+ pthread_mutex_lock(&thread->md.pthread_mutex);
+ thread->md.wait++;
+ rv = pthread_cond_signal(&thread->md.pthread_cond);
+ PR_ASSERT(rv == 0);
+ pthread_mutex_unlock(&thread->md.pthread_mutex);
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for AIX */
+PR_IMPLEMENT(void)
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_CreateThread(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PRIntn is;
+ int rv;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ pthread_attr_t attr;
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+
+ if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+
+ if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ pthread_attr_init(&attr); /* initialize attr with default attributes */
+ if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ pthread_attr_destroy(&attr);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+
+ thread->md.wait = 0;
+ rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
+ if (0 == rv) {
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_SUCCESS;
+ } else {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ pthread_attr_destroy(&attr);
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
+ return PR_FAILURE;
+ }
+}
+
+PR_IMPLEMENT(void)
+_MD_InitRunningCPU(struct _PRCPU *cpu)
+{
+ extern int _pr_md_pipefd[2];
+
+ _MD_unix_init_running_cpu(cpu);
+ cpu->md.pthread = pthread_self();
+ if (_pr_md_pipefd[0] >= 0) {
+ _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
+#ifndef _PR_USE_POLL
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
+#endif
+ }
+}
+
+
+void
+_MD_CleanupBeforeExit(void)
+{
+#if 0
+ extern PRInt32 _pr_cpus_exit;
+
+ _pr_irix_exit_now = 1;
+ if (_pr_numCPU > 1) {
+ /*
+ * Set a global flag, and wakeup all cpus which will notice the flag
+ * and exit.
+ */
+ _pr_cpus_exit = getpid();
+ _MD_Wakeup_CPUs();
+ while(_pr_numCPU > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _pr_numCPU--;
+ }
+ }
+ /*
+ * cause global threads on the recycle list to exit
+ */
+ _PR_DEADQ_LOCK;
+ if (_PR_NUM_DEADNATIVE != 0) {
+ PRThread *thread;
+ PRCList *ptr;
+
+ ptr = _PR_DEADNATIVEQ.next;
+ while( ptr != &_PR_DEADNATIVEQ ) {
+ thread = _PR_THREAD_PTR(ptr);
+ _MD_CVAR_POST_SEM(thread);
+ ptr = ptr->next;
+ }
+ }
+ _PR_DEADQ_UNLOCK;
+ while(_PR_NUM_DEADNATIVE > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _PR_DEC_DEADNATIVE;
+ }
+#endif
+}
diff --git a/nsprpub/pr/src/md/unix/qnx.c b/nsprpub/pr/src/md/unix/qnx.c
new file mode 100644
index 000000000..259e29cdc
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/qnx.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Unixware */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Unixware.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRUintn priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware.");
+ return PR_FAILURE;
+}
diff --git a/nsprpub/pr/src/md/unix/riscos.c b/nsprpub/pr/src/md/unix/riscos.c
new file mode 100644
index 000000000..318903acb
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/riscos.c
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifdef _PR_PTHREADS
+
+void _MD_CleanupBeforeExit(void)
+{
+}
+
+#else /* ! _PR_PTHREADS */
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ /*
+ * set the pointers to the stack-pointer and frame-pointer words in the
+ * context structure; this is for debugging use.
+ */
+ thread->md.sp = _MD_GET_SP_PTR(thread);
+ thread->md.fp = _MD_GET_FP_PTR(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for RISC OS */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for RISC OS.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for RISC OS.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/scoos.c b/nsprpub/pr/src/md/unix/scoos.c
new file mode 100644
index 000000000..b727a53ae
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/scoos.c
@@ -0,0 +1,149 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * SCO ODT 5.0 - originally created by mikep
+ */
+#include "primpl.h"
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+/*
+ * These are also implemented in pratom.c using NSPR locks. Any reason
+ * this might be better or worse? If you like this better, define
+ * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
+ */
+#ifdef _PR_HAVE_ATOMIC_OPS
+/* Atomic operations */
+#include <stdio.h>
+static FILE *_uw_semf;
+
+void
+_MD_INIT_ATOMIC(void)
+{
+ /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
+ if ((_uw_semf = tmpfile()) == NULL)
+ PR_ASSERT(0);
+
+ return;
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)++;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
+{
+ flockfile(_uw_semf);
+ (*ptr) += val;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)--;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ flockfile(_uw_semf);
+ *val = newval;
+ unflockfile(_uw_semf);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for SCO */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for SCO.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for SCO.");
+}
diff --git a/nsprpub/pr/src/md/unix/solaris.c b/nsprpub/pr/src/md/unix/solaris.c
new file mode 100644
index 000000000..4fe2043b1
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/solaris.c
@@ -0,0 +1,161 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+
+extern PRBool suspendAllOn;
+extern PRThread *suspendAllThread;
+
+extern void _MD_SET_PRIORITY(_MDThread *md, PRThreadPriority newPri);
+
+PRIntervalTime _MD_Solaris_TicksPerSecond(void)
+{
+ /*
+ * Ticks have a 10-microsecond resolution. So there are
+ * 100000 ticks per second.
+ */
+ return 100000UL;
+}
+
+/* Interval timers, implemented using gethrtime() */
+
+PRIntervalTime _MD_Solaris_GetInterval(void)
+{
+ union {
+ hrtime_t hrt; /* hrtime_t is a 64-bit (long long) integer */
+ PRInt64 pr64;
+ } time;
+ PRInt64 resolution;
+ PRIntervalTime ticks;
+
+ time.hrt = gethrtime(); /* in nanoseconds */
+ /*
+ * Convert from nanoseconds to ticks. A tick's resolution is
+ * 10 microseconds, or 10000 nanoseconds.
+ */
+ LL_I2L(resolution, 10000);
+ LL_DIV(time.pr64, time.pr64, resolution);
+ LL_L2UI(ticks, time.pr64);
+ return ticks;
+}
+
+#ifdef _PR_PTHREADS
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np)
+{
+ *np = 0;
+ return NULL;
+}
+#endif /* _PR_PTHREADS */
+
+#if defined(_PR_LOCAL_THREADS_ONLY)
+
+void _MD_EarlyInit(void)
+{
+}
+
+void _MD_SolarisInit()
+{
+ _PR_UnixInit();
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRThreadPriority newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ PR_ASSERT((thread == NULL) || (!(thread->flags & _PR_GLOBAL_SCOPE)));
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Solaris */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Solaris");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Solaris");
+ return(PR_FAILURE);
+}
+
+#ifdef USE_SETJMP
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+#else
+PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np)
+{
+ if (isCurrent) {
+ (void) getcontext(CONTEXT(t));
+ }
+ *np = NGREG;
+ return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
+}
+#endif /* USE_SETJMP */
+
+#endif /* _PR_LOCAL_THREADS_ONLY */
+
+#ifndef _PR_PTHREADS
+#if defined(i386) && defined(SOLARIS2_4)
+/*
+ * Because clock_gettime() on Solaris/x86 2.4 always generates a
+ * segmentation fault, we use an emulated version _pr_solx86_clock_gettime(),
+ * which is implemented using gettimeofday().
+ */
+
+int
+_pr_solx86_clock_gettime(clockid_t clock_id, struct timespec *tp)
+{
+ struct timeval tv;
+
+ if (clock_id != CLOCK_REALTIME) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ gettimeofday(&tv, NULL);
+ tp->tv_sec = tv.tv_sec;
+ tp->tv_nsec = tv.tv_usec * 1000;
+ return 0;
+}
+#endif /* i386 && SOLARIS2_4 */
+#endif /* _PR_PTHREADS */
diff --git a/nsprpub/pr/src/md/unix/symbian.c b/nsprpub/pr/src/md/unix/symbian.c
new file mode 100644
index 000000000..c797d8687
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/symbian.c
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ *np = 0;
+ return NULL;
+}
diff --git a/nsprpub/pr/src/md/unix/unix.c b/nsprpub/pr/src/md/unix/unix.c
new file mode 100644
index 000000000..fdae1199c
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/unix.c
@@ -0,0 +1,3787 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/utsname.h>
+
+#ifdef _PR_POLL_AVAILABLE
+#include <poll.h>
+#endif
+
+#if defined(ANDROID)
+#include <android/api-level.h>
+#endif
+
+/* To get FIONREAD */
+#if defined(UNIXWARE)
+#include <sys/filio.h>
+#endif
+
+#if defined(NTO)
+#include <sys/statvfs.h>
+#endif
+
+/*
+ * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or
+ * PRInt32* pointer to a _PRSockLen_t* pointer.
+ */
+#if defined(HAVE_SOCKLEN_T) \
+ || (defined(__GLIBC__) && __GLIBC__ >= 2)
+#define _PRSockLen_t socklen_t
+#elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \
+ || defined(AIX4_1) || defined(LINUX) \
+ || defined(BSDI) || defined(SCO) \
+ || defined(DARWIN) \
+ || defined(QNX)
+#define _PRSockLen_t int
+#elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \
+ || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \
+ || defined(DGUX) || defined(NTO) || defined(RISCOS)
+#define _PRSockLen_t size_t
+#else
+#error "Cannot determine architecture"
+#endif
+
+/*
+** Global lock variable used to bracket calls into rusty libraries that
+** aren't thread safe (like libc, libX, etc).
+*/
+static PRLock *_pr_rename_lock = NULL;
+static PRMonitor *_pr_Xfe_mon = NULL;
+
+static PRInt64 minus_one;
+
+sigset_t timer_set;
+
+#if !defined(_PR_PTHREADS)
+
+static sigset_t empty_set;
+
+#ifdef SOLARIS
+#include <sys/file.h>
+#include <sys/filio.h>
+#endif
+
+#ifndef PIPE_BUF
+#define PIPE_BUF 512
+#endif
+
+/*
+ * _nspr_noclock - if set clock interrupts are disabled
+ */
+int _nspr_noclock = 1;
+
+#ifdef IRIX
+extern PRInt32 _nspr_terminate_on_error;
+#endif
+
+/*
+ * There is an assertion in this code that NSPR's definition of PRIOVec
+ * is bit compatible with UNIX' definition of a struct iovec. This is
+ * applicable to the 'writev()' operations where the types are casually
+ * cast to avoid warnings.
+ */
+
+int _pr_md_pipefd[2] = { -1, -1 };
+static char _pr_md_pipebuf[PIPE_BUF];
+static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag,
+ PRIntervalTime timeout);
+
+_PRInterruptTable _pr_interruptTable[] = {
+ {
+ "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, },
+ {
+ 0 }
+};
+
+void _MD_unix_init_running_cpu(_PRCPU *cpu)
+{
+ PR_INIT_CLIST(&(cpu->md.md_unix.ioQ));
+ cpu->md.md_unix.ioq_max_osfd = -1;
+ cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT;
+}
+
+PRStatus _MD_open_dir(_MDDir *d, const char *name)
+{
+int err;
+
+ d->d = opendir(name);
+ if (!d->d) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_OPENDIR_ERROR(err);
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+PRInt32 _MD_close_dir(_MDDir *d)
+{
+int rv = 0, err;
+
+ if (d->d) {
+ rv = closedir(d->d);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_CLOSEDIR_ERROR(err);
+ }
+ }
+ return rv;
+}
+
+char * _MD_read_dir(_MDDir *d, PRIntn flags)
+{
+struct dirent *de;
+int err;
+
+ for (;;) {
+ /*
+ * XXX: readdir() is not MT-safe. There is an MT-safe version
+ * readdir_r() on some systems.
+ */
+ _MD_ERRNO() = 0;
+ de = readdir(d->d);
+ if (!de) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_READDIR_ERROR(err);
+ return 0;
+ }
+ if ((flags & PR_SKIP_DOT) &&
+ (de->d_name[0] == '.') && (de->d_name[1] == 0))
+ continue;
+ if ((flags & PR_SKIP_DOT_DOT) &&
+ (de->d_name[0] == '.') && (de->d_name[1] == '.') &&
+ (de->d_name[2] == 0))
+ continue;
+ if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.'))
+ continue;
+ break;
+ }
+ return de->d_name;
+}
+
+PRInt32 _MD_delete(const char *name)
+{
+PRInt32 rv, err;
+#ifdef UNIXWARE
+ sigset_t set, oset;
+#endif
+
+#ifdef UNIXWARE
+ sigfillset(&set);
+ sigprocmask(SIG_SETMASK, &set, &oset);
+#endif
+ rv = unlink(name);
+#ifdef UNIXWARE
+ sigprocmask(SIG_SETMASK, &oset, NULL);
+#endif
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_UNLINK_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_rename(const char *from, const char *to)
+{
+ PRInt32 rv = -1, err;
+
+ /*
+ ** This is trying to enforce the semantics of WINDOZE' rename
+ ** operation. That means one is not allowed to rename over top
+ ** of an existing file. Holding a lock across these two function
+ ** and the open function is known to be a bad idea, but ....
+ */
+ if (NULL != _pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ if (0 == access(to, F_OK))
+ PR_SetError(PR_FILE_EXISTS_ERROR, 0);
+ else
+ {
+ rv = rename(from, to);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_RENAME_ERROR(err);
+ }
+ }
+ if (NULL != _pr_rename_lock)
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRInt32 _MD_access(const char *name, PRAccessHow how)
+{
+PRInt32 rv, err;
+int amode;
+
+ switch (how) {
+ case PR_ACCESS_WRITE_OK:
+ amode = W_OK;
+ break;
+ case PR_ACCESS_READ_OK:
+ amode = R_OK;
+ break;
+ case PR_ACCESS_EXISTS:
+ amode = F_OK;
+ break;
+ default:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ rv = access(name, amode);
+
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_ACCESS_ERROR(err);
+ }
+
+done:
+ return(rv);
+}
+
+PRInt32 _MD_mkdir(const char *name, PRIntn mode)
+{
+int rv, err;
+
+ /*
+ ** This lock is used to enforce rename semantics as described
+ ** in PR_Rename. Look there for more fun details.
+ */
+ if (NULL !=_pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ rv = mkdir(name, mode);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_MKDIR_ERROR(err);
+ }
+ if (NULL !=_pr_rename_lock)
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRInt32 _MD_rmdir(const char *name)
+{
+int rv, err;
+
+ rv = rmdir(name);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_RMDIR_ERROR(err);
+ }
+ return rv;
+}
+
+PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+PRInt32 rv, err;
+#ifndef _PR_USE_POLL
+fd_set rd;
+#else
+struct pollfd pfd;
+#endif /* _PR_USE_POLL */
+PRInt32 osfd = fd->secret->md.osfd;
+
+#ifndef _PR_USE_POLL
+ FD_ZERO(&rd);
+ FD_SET(osfd, &rd);
+#else
+ pfd.fd = osfd;
+ pfd.events = POLLIN;
+#endif /* _PR_USE_POLL */
+ while ((rv = read(osfd,buf,amount)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ,
+ PR_INTERVAL_NO_TIMEOUT)) < 0)
+ goto done;
+ } else {
+#ifndef _PR_USE_POLL
+ while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_SELECT() if it is interrupted */
+ }
+#else /* _PR_USE_POLL */
+ while ((rv = _MD_POLL(&pfd, 1, -1))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_POLL() if it is interrupted */
+ }
+#endif /* _PR_USE_POLL */
+ if (rv == -1) {
+ break;
+ }
+ }
+ if (_PR_PENDING_INTERRUPT(me))
+ break;
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ _PR_MD_MAP_READ_ERROR(err);
+ }
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+PRInt32 rv, err;
+#ifndef _PR_USE_POLL
+fd_set wd;
+#else
+struct pollfd pfd;
+#endif /* _PR_USE_POLL */
+PRInt32 osfd = fd->secret->md.osfd;
+
+#ifndef _PR_USE_POLL
+ FD_ZERO(&wd);
+ FD_SET(osfd, &wd);
+#else
+ pfd.fd = osfd;
+ pfd.events = POLLOUT;
+#endif /* _PR_USE_POLL */
+ while ((rv = write(osfd,buf,amount)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE,
+ PR_INTERVAL_NO_TIMEOUT)) < 0)
+ goto done;
+ } else {
+#ifndef _PR_USE_POLL
+ while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_SELECT() if it is interrupted */
+ }
+#else /* _PR_USE_POLL */
+ while ((rv = _MD_POLL(&pfd, 1, -1))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_POLL() if it is interrupted */
+ }
+#endif /* _PR_USE_POLL */
+ if (rv == -1) {
+ break;
+ }
+ }
+ if (_PR_PENDING_INTERRUPT(me))
+ break;
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ _PR_MD_MAP_WRITE_ERROR(err);
+ }
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_fsync(PRFileDesc *fd)
+{
+PRInt32 rv, err;
+
+ rv = fsync(fd->secret->md.osfd);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_FSYNC_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_close(PRInt32 osfd)
+{
+PRInt32 rv, err;
+
+ rv = close(osfd);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_CLOSE_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto)
+{
+ PRInt32 osfd, err;
+
+ osfd = socket(domain, type, proto);
+
+ if (osfd == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SOCKET_ERROR(err);
+ return(osfd);
+ }
+
+ return(osfd);
+}
+
+PRInt32 _MD_socketavailable(PRFileDesc *fd)
+{
+ PRInt32 result;
+
+ if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) {
+ _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO());
+ return -1;
+ }
+ return result;
+}
+
+PRInt64 _MD_socketavailable64(PRFileDesc *fd)
+{
+ PRInt64 result;
+ LL_I2L(result, _MD_socketavailable(fd));
+ return result;
+} /* _MD_socketavailable64 */
+
+#define READ_FD 1
+#define WRITE_FD 2
+
+/*
+ * socket_io_wait --
+ *
+ * wait for socket i/o, periodically checking for interrupt
+ *
+ * The first implementation uses select(), for platforms without
+ * poll(). The second (preferred) implementation uses poll().
+ */
+
+#ifndef _PR_USE_POLL
+
+static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv = -1;
+ struct timeval tv;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRIntervalTime epoch, now, elapsed, remaining;
+ PRBool wait_for_remaining;
+ PRInt32 syserror;
+ fd_set rd_wr;
+
+ switch (timeout) {
+ case PR_INTERVAL_NO_WAIT:
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ break;
+ case PR_INTERVAL_NO_TIMEOUT:
+ /*
+ * This is a special case of the 'default' case below.
+ * Please see the comments there.
+ */
+ tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
+ tv.tv_usec = 0;
+ FD_ZERO(&rd_wr);
+ do {
+ FD_SET(osfd, &rd_wr);
+ if (fd_type == READ_FD)
+ rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
+ else
+ rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ _PR_MD_MAP_SELECT_ERROR(syserror);
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ default:
+ now = epoch = PR_IntervalNow();
+ remaining = timeout;
+ FD_ZERO(&rd_wr);
+ do {
+ /*
+ * We block in _MD_SELECT for at most
+ * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
+ * so that there is an upper limit on the delay
+ * before the interrupt bit is checked.
+ */
+ wait_for_remaining = PR_TRUE;
+ tv.tv_sec = PR_IntervalToSeconds(remaining);
+ if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) {
+ wait_for_remaining = PR_FALSE;
+ tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_usec = PR_IntervalToMicroseconds(
+ remaining -
+ PR_SecondsToInterval(tv.tv_sec));
+ }
+ FD_SET(osfd, &rd_wr);
+ if (fd_type == READ_FD)
+ rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
+ else
+ rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
+ /*
+ * we don't consider EINTR a real error
+ */
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ _PR_MD_MAP_SELECT_ERROR(syserror);
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ /*
+ * We loop again if _MD_SELECT timed out or got interrupted
+ * by a signal, and the timeout deadline has not passed yet.
+ */
+ if (rv == 0 || (rv == -1 && syserror == EINTR)) {
+ /*
+ * If _MD_SELECT timed out, we know how much time
+ * we spent in blocking, so we can avoid a
+ * PR_IntervalNow() call.
+ */
+ if (rv == 0) {
+ if (wait_for_remaining) {
+ now += remaining;
+ } else {
+ now += PR_SecondsToInterval(tv.tv_sec)
+ + PR_MicrosecondsToInterval(tv.tv_usec);
+ }
+ } else {
+ now = PR_IntervalNow();
+ }
+ elapsed = (PRIntervalTime) (now - epoch);
+ if (elapsed >= timeout) {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ rv = -1;
+ break;
+ } else {
+ remaining = timeout - elapsed;
+ }
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ }
+ return(rv);
+}
+
+#else /* _PR_USE_POLL */
+
+static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv = -1;
+ int msecs;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRIntervalTime epoch, now, elapsed, remaining;
+ PRBool wait_for_remaining;
+ PRInt32 syserror;
+ struct pollfd pfd;
+
+ switch (timeout) {
+ case PR_INTERVAL_NO_WAIT:
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ break;
+ case PR_INTERVAL_NO_TIMEOUT:
+ /*
+ * This is a special case of the 'default' case below.
+ * Please see the comments there.
+ */
+ msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
+ pfd.fd = osfd;
+ if (fd_type == READ_FD) {
+ pfd.events = POLLIN;
+ } else {
+ pfd.events = POLLOUT;
+ }
+ do {
+ rv = _MD_POLL(&pfd, 1, msecs);
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ _PR_MD_MAP_POLL_ERROR(syserror);
+ break;
+ }
+ /*
+ * If POLLERR is set, don't process it; retry the operation
+ */
+ if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
+ rv = -1;
+ _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ default:
+ now = epoch = PR_IntervalNow();
+ remaining = timeout;
+ pfd.fd = osfd;
+ if (fd_type == READ_FD) {
+ pfd.events = POLLIN;
+ } else {
+ pfd.events = POLLOUT;
+ }
+ do {
+ /*
+ * We block in _MD_POLL for at most
+ * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
+ * so that there is an upper limit on the delay
+ * before the interrupt bit is checked.
+ */
+ wait_for_remaining = PR_TRUE;
+ msecs = PR_IntervalToMilliseconds(remaining);
+ if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) {
+ wait_for_remaining = PR_FALSE;
+ msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
+ }
+ rv = _MD_POLL(&pfd, 1, msecs);
+ /*
+ * we don't consider EINTR a real error
+ */
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ _PR_MD_MAP_POLL_ERROR(syserror);
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ /*
+ * If POLLERR is set, don't process it; retry the operation
+ */
+ if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
+ rv = -1;
+ _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
+ break;
+ }
+ /*
+ * We loop again if _MD_POLL timed out or got interrupted
+ * by a signal, and the timeout deadline has not passed yet.
+ */
+ if (rv == 0 || (rv == -1 && syserror == EINTR)) {
+ /*
+ * If _MD_POLL timed out, we know how much time
+ * we spent in blocking, so we can avoid a
+ * PR_IntervalNow() call.
+ */
+ if (rv == 0) {
+ if (wait_for_remaining) {
+ now += remaining;
+ } else {
+ now += PR_MillisecondsToInterval(msecs);
+ }
+ } else {
+ now = PR_IntervalNow();
+ }
+ elapsed = (PRIntervalTime) (now - epoch);
+ if (elapsed >= timeout) {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ rv = -1;
+ break;
+ } else {
+ remaining = timeout - elapsed;
+ }
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ }
+ return(rv);
+}
+
+#endif /* _PR_USE_POLL */
+
+static PRInt32 local_io_wait(
+ PRInt32 osfd,
+ PRInt32 wait_flag,
+ PRIntervalTime timeout)
+{
+ _PRUnixPollDesc pd;
+ PRInt32 rv;
+
+ PR_LOG(_pr_io_lm, PR_LOG_MIN,
+ ("waiting to %s on osfd=%d",
+ (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write",
+ osfd));
+
+ if (timeout == PR_INTERVAL_NO_WAIT) return 0;
+
+ pd.osfd = osfd;
+ pd.in_flags = wait_flag;
+ pd.out_flags = 0;
+
+ rv = _PR_WaitForMultipleFDs(&pd, 1, timeout);
+
+ if (rv == 0) {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ rv = -1;
+ }
+ return rv;
+}
+
+
+PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount,
+ PRInt32 flags, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+/*
+ * Many OS's (Solaris, Unixware) have a broken recv which won't read
+ * from socketpairs. As long as we don't use flags on socketpairs, this
+ * is a decent fix. - mikep
+ */
+#if defined(UNIXWARE) || defined(SOLARIS)
+ while ((rv = read(osfd,buf,amount)) == -1) {
+#else
+ while ((rv = recv(osfd,buf,amount,flags)) == -1) {
+#endif
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_RECV_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount,
+ PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((*addrlen = PR_NETADDR_SIZE(addr)),
+ ((rv = recvfrom(osfd, buf, amount, flags,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_RECVFROM_ERROR(err);
+ }
+done:
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ if (rv != -1) {
+ /* ignore the sa_len field of struct sockaddr */
+ if (addr) {
+ addr->raw.family = ((struct sockaddr *) addr)->sa_family;
+ }
+ }
+#endif /* _PR_HAVE_SOCKADDR_LEN */
+ return(rv);
+}
+
+PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount,
+ PRInt32 flags, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+#if defined(SOLARIS)
+ PRInt32 tmp_amount = amount;
+#endif
+
+ /*
+ * On pre-2.6 Solaris, send() is much slower than write().
+ * On 2.6 and beyond, with in-kernel sockets, send() and
+ * write() are fairly equivalent in performance.
+ */
+#if defined(SOLARIS)
+ PR_ASSERT(0 == flags);
+ while ((rv = write(osfd,buf,tmp_amount)) == -1) {
+#else
+ while ((rv = send(osfd,buf,amount,flags)) == -1) {
+#endif
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+#if defined(SOLARIS)
+ /*
+ * The write system call has been reported to return the ERANGE
+ * error on occasion. Try to write in smaller chunks to workaround
+ * this bug.
+ */
+ if (err == ERANGE) {
+ if (tmp_amount > 1) {
+ tmp_amount = tmp_amount/2; /* half the bytes */
+ continue;
+ }
+ }
+#endif
+ break;
+ }
+ }
+ /*
+ * optimization; if bytes sent is less than "amount" call
+ * select before returning. This is because it is likely that
+ * the next send() call will return EWOULDBLOCK.
+ */
+ if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
+ && (timeout != PR_INTERVAL_NO_WAIT)) {
+ if (_PR_IS_NATIVE_THREAD(me)) {
+ if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) {
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
+ rv = -1;
+ goto done;
+ }
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_SEND_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_sendto(
+ PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags,
+ const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ PRNetAddr addrCopy;
+
+ addrCopy = *addr;
+ ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
+ ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
+
+ while ((rv = sendto(osfd, buf, amount, flags,
+ (struct sockaddr *) &addrCopy, addrlen)) == -1) {
+#else
+ while ((rv = sendto(osfd, buf, amount, flags,
+ (struct sockaddr *) addr, addrlen)) == -1) {
+#endif
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_SENDTO_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_writev(
+ PRFileDesc *fd, const PRIOVec *iov,
+ PRInt32 iov_size, PRIntervalTime timeout)
+{
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 index, amount = 0;
+ PRInt32 osfd = fd->secret->md.osfd;
+
+ /*
+ * Calculate the total number of bytes to be sent; needed for
+ * optimization later.
+ * We could avoid this if this number was passed in; but it is
+ * probably not a big deal because iov_size is usually small (less than
+ * 3)
+ */
+ if (!fd->secret->nonblocking) {
+ for (index=0; index<iov_size; index++) {
+ amount += iov[index].iov_len;
+ }
+ }
+
+ while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ /*
+ * optimization; if bytes sent is less than "amount" call
+ * select before returning. This is because it is likely that
+ * the next writev() call will return EWOULDBLOCK.
+ */
+ if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
+ && (timeout != PR_INTERVAL_NO_WAIT)) {
+ if (_PR_IS_NATIVE_THREAD(me)) {
+ if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) {
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
+ rv = -1;
+ goto done;
+ }
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_WRITEV_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((rv = accept(osfd, (struct sockaddr *) addr,
+ (_PRSockLen_t *)addrlen)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0)
+ goto done;
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_ACCEPT_ERROR(err);
+ }
+done:
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ if (rv != -1) {
+ /* ignore the sa_len field of struct sockaddr */
+ if (addr) {
+ addr->raw.family = ((struct sockaddr *) addr)->sa_family;
+ }
+ }
+#endif /* _PR_HAVE_SOCKADDR_LEN */
+ return(rv);
+}
+
+extern int _connect (int s, const struct sockaddr *name, int namelen);
+PRInt32 _MD_connect(
+ PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 osfd = fd->secret->md.osfd;
+#ifdef IRIX
+extern PRInt32 _MD_irix_connect(
+ PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout);
+#endif
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ PRNetAddr addrCopy;
+
+ addrCopy = *addr;
+ ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
+ ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
+#endif
+
+ /*
+ * We initiate the connection setup by making a nonblocking connect()
+ * call. If the connect() call fails, there are two cases we handle
+ * specially:
+ * 1. The connect() call was interrupted by a signal. In this case
+ * we simply retry connect().
+ * 2. The NSPR socket is nonblocking and connect() fails with
+ * EINPROGRESS. We first wait until the socket becomes writable.
+ * Then we try to find out whether the connection setup succeeded
+ * or failed.
+ */
+
+retry:
+#ifdef IRIX
+ if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) {
+#else
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) {
+#else
+ if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) {
+#endif
+#endif
+ err = _MD_ERRNO();
+
+ if (err == EINTR) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ goto retry;
+ }
+
+ if (!fd->secret->nonblocking && (err == EINPROGRESS)) {
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+
+ if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
+ return -1;
+ } else {
+ /*
+ * socket_io_wait() may return -1 or 1.
+ */
+
+ rv = socket_io_wait(osfd, WRITE_FD, timeout);
+ if (rv == -1) {
+ return -1;
+ }
+ }
+
+ PR_ASSERT(rv == 1);
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ err = _MD_unix_get_nonblocking_connect_error(osfd);
+ if (err != 0) {
+ _PR_MD_MAP_CONNECT_ERROR(err);
+ return -1;
+ }
+ return 0;
+ }
+
+ _PR_MD_MAP_CONNECT_ERROR(err);
+ }
+
+ return rv;
+} /* _MD_connect */
+
+PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen)
+{
+ PRInt32 rv, err;
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ PRNetAddr addrCopy;
+
+ addrCopy = *addr;
+ ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
+ ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
+ rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen);
+#else
+ rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen);
+#endif
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_BIND_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog)
+{
+ PRInt32 rv, err;
+
+ rv = listen(fd->secret->md.osfd, backlog);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_LISTEN_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how)
+{
+ PRInt32 rv, err;
+
+ rv = shutdown(fd->secret->md.osfd, how);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SHUTDOWN_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_socketpair(int af, int type, int flags,
+ PRInt32 *osfd)
+{
+ PRInt32 rv, err;
+
+ rv = socketpair(af, type, flags, osfd);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SOCKETPAIR_ERROR(err);
+ }
+ return rv;
+}
+
+PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen)
+{
+ PRInt32 rv, err;
+
+ rv = getsockname(fd->secret->md.osfd,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ if (rv == 0) {
+ /* ignore the sa_len field of struct sockaddr */
+ if (addr) {
+ addr->raw.family = ((struct sockaddr *) addr)->sa_family;
+ }
+ }
+#endif /* _PR_HAVE_SOCKADDR_LEN */
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETSOCKNAME_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen)
+{
+ PRInt32 rv, err;
+
+ rv = getpeername(fd->secret->md.osfd,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
+#ifdef _PR_HAVE_SOCKADDR_LEN
+ if (rv == 0) {
+ /* ignore the sa_len field of struct sockaddr */
+ if (addr) {
+ addr->raw.family = ((struct sockaddr *) addr)->sa_family;
+ }
+ }
+#endif /* _PR_HAVE_SOCKADDR_LEN */
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETPEERNAME_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level,
+ PRInt32 optname, char* optval, PRInt32* optlen)
+{
+ PRInt32 rv, err;
+
+ rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETSOCKOPT_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level,
+ PRInt32 optname, const char* optval, PRInt32 optlen)
+{
+ PRInt32 rv, err;
+
+ rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SETSOCKOPT_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable)
+{
+ int rv;
+
+ rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC);
+ if (-1 == rv) {
+ PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO());
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported)
+{
+ if (imported) {
+ fd->secret->inheritable = _PR_TRI_UNKNOWN;
+ } else {
+ /* By default, a Unix fd is not closed on exec. */
+#ifdef DEBUG
+ {
+ int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
+ PR_ASSERT(0 == flags);
+ }
+#endif
+ fd->secret->inheritable = _PR_TRI_TRUE;
+ }
+}
+
+/************************************************************************/
+#if !defined(_PR_USE_POLL)
+
+/*
+** Scan through io queue and find any bad fd's that triggered the error
+** from _MD_SELECT
+*/
+static void FindBadFDs(void)
+{
+ PRCList *q;
+ PRThread *me = _MD_CURRENT_THREAD();
+
+ PR_ASSERT(!_PR_IS_NATIVE_THREAD(me));
+ q = (_PR_IOQ(me->cpu)).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ pds->out_flags = 0;
+ PR_ASSERT(osfd >= 0 || pds->in_flags == 0);
+ if (pds->in_flags == 0) {
+ continue; /* skip this fd */
+ }
+ if (fcntl(osfd, F_GETFL, 0) == -1) {
+ /* Found a bad descriptor, remove it from the fd_sets. */
+ PR_LOG(_pr_io_lm, PR_LOG_MAX,
+ ("file descriptor %d is bad", osfd));
+ pds->out_flags = _PR_UNIX_POLL_NVAL;
+ notify = PR_TRUE;
+ }
+ if (osfd > pq_max_osfd) {
+ pq_max_osfd = osfd;
+ }
+ }
+
+ if (notify) {
+ PRIntn pri;
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ /*
+ * Decrement the count of descriptors for each desciptor/event
+ * because this I/O request is being removed from the
+ * ioq
+ */
+ pds = pq->pds;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if (in_flags & _PR_UNIX_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+
+ _PR_THREAD_LOCK(pq->thr);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = pq->thr->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ if (pq->thr->flags & _PR_SUSPENDING) {
+ /*
+ * set thread state to SUSPENDED;
+ * a Resume operation on the thread
+ * will move it to the runQ
+ */
+ pq->thr->state = _PR_SUSPENDED;
+ _PR_MISCQ_LOCK(pq->thr->cpu);
+ _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
+ _PR_MISCQ_UNLOCK(pq->thr->cpu);
+ } else {
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ }
+ }
+ _PR_THREAD_UNLOCK(pq->thr);
+ } else {
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ }
+}
+#endif /* !defined(_PR_USE_POLL) */
+
+/************************************************************************/
+
+/*
+** Called by the scheduler when there is nothing to do. This means that
+** all threads are blocked on some monitor somewhere.
+**
+** Note: this code doesn't release the scheduler lock.
+*/
+/*
+** Pause the current CPU. longjmp to the cpu's pause stack
+**
+** This must be called with the scheduler locked
+*/
+void _MD_PauseCPU(PRIntervalTime ticks)
+{
+ PRThread *me = _MD_CURRENT_THREAD();
+#ifdef _PR_USE_POLL
+ int timeout;
+ struct pollfd *pollfds; /* an array of pollfd structures */
+ struct pollfd *pollfdPtr; /* a pointer that steps through the array */
+ unsigned long npollfds; /* number of pollfd structures in array */
+ unsigned long pollfds_size;
+ int nfd; /* to hold the return value of poll() */
+#else
+ struct timeval timeout, *tvp;
+ fd_set r, w, e;
+ fd_set *rp, *wp, *ep;
+ PRInt32 max_osfd, nfd;
+#endif /* _PR_USE_POLL */
+ PRInt32 rv;
+ PRCList *q;
+ PRUint32 min_timeout;
+ sigset_t oldset;
+#ifdef IRIX
+extern sigset_t ints_off;
+#endif
+
+ PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);
+
+ _PR_MD_IOQ_LOCK();
+
+#ifdef _PR_USE_POLL
+ /* Build up the pollfd structure array to wait on */
+
+ /* Find out how many pollfd structures are needed */
+ npollfds = _PR_IOQ_OSFD_CNT(me->cpu);
+ PR_ASSERT(npollfds >= 0);
+
+ /*
+ * We use a pipe to wake up a native thread. An fd is needed
+ * for the pipe and we poll it for reading.
+ */
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ npollfds++;
+#ifdef IRIX
+ /*
+ * On Irix, a second pipe is used to cause the primordial cpu to
+ * wakeup and exit, when the process is exiting because of a call
+ * to exit/PR_ProcessExit.
+ */
+ if (me->cpu->id == 0) {
+ npollfds++;
+ }
+#endif
+ }
+
+ /*
+ * if the cpu's pollfd array is not big enough, release it and allocate a new one
+ */
+ if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) {
+ if (_PR_IOQ_POLLFDS(me->cpu) != NULL)
+ PR_DELETE(_PR_IOQ_POLLFDS(me->cpu));
+ pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds);
+ pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollfd));
+ _PR_IOQ_POLLFDS(me->cpu) = pollfds;
+ _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size;
+ } else {
+ pollfds = _PR_IOQ_POLLFDS(me->cpu);
+ }
+ pollfdPtr = pollfds;
+
+ /*
+ * If we need to poll the pipe for waking up a native thread,
+ * the pipe's fd is the first element in the pollfds array.
+ */
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ pollfdPtr->fd = _pr_md_pipefd[0];
+ pollfdPtr->events = POLLIN;
+ pollfdPtr++;
+#ifdef IRIX
+ /*
+ * On Irix, the second element is the exit pipe
+ */
+ if (me->cpu->id == 0) {
+ pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0];
+ pollfdPtr->events = POLLIN;
+ pollfdPtr++;
+ }
+#endif
+ }
+
+ min_timeout = PR_INTERVAL_NO_TIMEOUT;
+ for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+
+ if (pq->timeout < min_timeout) {
+ min_timeout = pq->timeout;
+ }
+ for (; pds < epds; pds++, pollfdPtr++) {
+ /*
+ * Assert that the pollfdPtr pointer does not go
+ * beyond the end of the pollfds array
+ */
+ PR_ASSERT(pollfdPtr < pollfds + npollfds);
+ pollfdPtr->fd = pds->osfd;
+ /* direct copy of poll flags */
+ pollfdPtr->events = pds->in_flags;
+ }
+ }
+ _PR_IOQ_TIMEOUT(me->cpu) = min_timeout;
+#else
+ /*
+ * assigment of fd_sets
+ */
+ r = _PR_FD_READ_SET(me->cpu);
+ w = _PR_FD_WRITE_SET(me->cpu);
+ e = _PR_FD_EXCEPTION_SET(me->cpu);
+
+ rp = &r;
+ wp = &w;
+ ep = &e;
+
+ max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1;
+ min_timeout = _PR_IOQ_TIMEOUT(me->cpu);
+#endif /* _PR_USE_POLL */
+ /*
+ ** Compute the minimum timeout value: make it the smaller of the
+ ** timeouts specified by the i/o pollers or the timeout of the first
+ ** sleeping thread.
+ */
+ q = _PR_SLEEPQ(me->cpu).next;
+
+ if (q != &_PR_SLEEPQ(me->cpu)) {
+ PRThread *t = _PR_THREAD_PTR(q);
+
+ if (t->sleep < min_timeout) {
+ min_timeout = t->sleep;
+ }
+ }
+ if (min_timeout > ticks) {
+ min_timeout = ticks;
+ }
+
+#ifdef _PR_USE_POLL
+ if (min_timeout == PR_INTERVAL_NO_TIMEOUT)
+ timeout = -1;
+ else
+ timeout = PR_IntervalToMilliseconds(min_timeout);
+#else
+ if (min_timeout == PR_INTERVAL_NO_TIMEOUT) {
+ tvp = NULL;
+ } else {
+ timeout.tv_sec = PR_IntervalToSeconds(min_timeout);
+ timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout)
+ % PR_USEC_PER_SEC;
+ tvp = &timeout;
+ }
+#endif /* _PR_USE_POLL */
+
+ _PR_MD_IOQ_UNLOCK();
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * check for i/o operations
+ */
+#ifndef _PR_NO_CLOCK_TIMER
+ /*
+ * Disable the clock interrupts while we are in select, if clock interrupts
+ * are enabled. Otherwise, when the select/poll calls are interrupted, the
+ * timer value starts ticking from zero again when the system call is restarted.
+ */
+#ifdef IRIX
+ /*
+ * SIGCHLD signal is used on Irix to detect he termination of an
+ * sproc by SIGSEGV, SIGBUS or SIGABRT signals when
+ * _nspr_terminate_on_error is set.
+ */
+ if ((!_nspr_noclock) || (_nspr_terminate_on_error))
+#else
+ if (!_nspr_noclock)
+#endif /* IRIX */
+#ifdef IRIX
+ sigprocmask(SIG_BLOCK, &ints_off, &oldset);
+#else
+ PR_ASSERT(sigismember(&timer_set, SIGALRM));
+ sigprocmask(SIG_BLOCK, &timer_set, &oldset);
+#endif /* IRIX */
+#endif /* !_PR_NO_CLOCK_TIMER */
+
+#ifndef _PR_USE_POLL
+ PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp));
+ nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp);
+#else
+ nfd = _MD_POLL(pollfds, npollfds, timeout);
+#endif /* !_PR_USE_POLL */
+
+#ifndef _PR_NO_CLOCK_TIMER
+#ifdef IRIX
+ if ((!_nspr_noclock) || (_nspr_terminate_on_error))
+#else
+ if (!_nspr_noclock)
+#endif /* IRIX */
+ sigprocmask(SIG_SETMASK, &oldset, 0);
+#endif /* !_PR_NO_CLOCK_TIMER */
+
+ _MD_CHECK_FOR_EXIT();
+
+#ifdef IRIX
+ _PR_MD_primordial_cpu();
+#endif
+
+ _PR_MD_IOQ_LOCK();
+ /*
+ ** Notify monitors that are associated with the selected descriptors.
+ */
+#ifdef _PR_USE_POLL
+ if (nfd > 0) {
+ pollfdPtr = pollfds;
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ /*
+ * Assert that the pipe is the first element in the
+ * pollfds array.
+ */
+ PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]);
+ if ((pollfds[0].revents & POLLIN) && (nfd == 1)) {
+ /*
+ * woken up by another thread; read all the data
+ * in the pipe to empty the pipe
+ */
+ while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf,
+ PIPE_BUF)) == PIPE_BUF){
+ }
+ PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
+ }
+ pollfdPtr++;
+#ifdef IRIX
+ /*
+ * On Irix, check to see if the primordial cpu needs to exit
+ * to cause the process to terminate
+ */
+ if (me->cpu->id == 0) {
+ PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]);
+ if (pollfdPtr->revents & POLLIN) {
+ if (_pr_irix_process_exit) {
+ /*
+ * process exit due to a call to PR_ProcessExit
+ */
+ prctl(PR_SETEXITSIG, SIGKILL);
+ _exit(_pr_irix_process_exit_code);
+ } else {
+ while ((rv = read(_pr_irix_primoridal_cpu_fd[0],
+ _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) {
+ }
+ PR_ASSERT(rv > 0);
+ }
+ }
+ pollfdPtr++;
+ }
+#endif
+ }
+ for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+
+ for (; pds < epds; pds++, pollfdPtr++) {
+ /*
+ * Assert that the pollfdPtr pointer does not go beyond
+ * the end of the pollfds array.
+ */
+ PR_ASSERT(pollfdPtr < pollfds + npollfds);
+ /*
+ * Assert that the fd's in the pollfds array (stepped
+ * through by pollfdPtr) are in the same order as
+ * the fd's in _PR_IOQ() (stepped through by q and pds).
+ * This is how the pollfds array was created earlier.
+ */
+ PR_ASSERT(pollfdPtr->fd == pds->osfd);
+ pds->out_flags = pollfdPtr->revents;
+ /* Negative fd's are ignored by poll() */
+ if (pds->osfd >= 0 && pds->out_flags) {
+ notify = PR_TRUE;
+ }
+ }
+ if (notify) {
+ PRIntn pri;
+ PRThread *thred;
+
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ thred = pq->thr;
+ _PR_THREAD_LOCK(thred);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = pq->thr->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ if (pq->thr->flags & _PR_SUSPENDING) {
+ /*
+ * set thread state to SUSPENDED;
+ * a Resume operation on the thread
+ * will move it to the runQ
+ */
+ pq->thr->state = _PR_SUSPENDED;
+ _PR_MISCQ_LOCK(pq->thr->cpu);
+ _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
+ _PR_MISCQ_UNLOCK(pq->thr->cpu);
+ } else {
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ if (_pr_md_idle_cpus > 1)
+ _PR_MD_WAKEUP_WAITER(thred);
+ }
+ }
+ _PR_THREAD_UNLOCK(thred);
+ _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds;
+ PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
+ }
+ }
+ } else if (nfd == -1) {
+ PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno));
+ }
+
+#else
+ if (nfd > 0) {
+ q = _PR_IOQ(me->cpu).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PRInt16 out_flags = 0;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) {
+ out_flags |= _PR_UNIX_POLL_READ;
+ }
+ if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) {
+ out_flags |= _PR_UNIX_POLL_WRITE;
+ }
+ if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) {
+ out_flags |= _PR_UNIX_POLL_EXCEPT;
+ }
+ pds->out_flags = out_flags;
+ if (out_flags) {
+ notify = PR_TRUE;
+ }
+ if (osfd > pq_max_osfd) {
+ pq_max_osfd = osfd;
+ }
+ }
+ if (notify == PR_TRUE) {
+ PRIntn pri;
+ PRThread *thred;
+
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ /*
+ * Decrement the count of descriptors for each desciptor/event
+ * because this I/O request is being removed from the
+ * ioq
+ */
+ pds = pq->pds;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if (in_flags & _PR_UNIX_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+
+ /*
+ * Because this thread can run on a different cpu right
+ * after being added to the run queue, do not dereference
+ * pq
+ */
+ thred = pq->thr;
+ _PR_THREAD_LOCK(thred);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = thred->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ if (pq->thr->flags & _PR_SUSPENDING) {
+ /*
+ * set thread state to SUSPENDED;
+ * a Resume operation on the thread
+ * will move it to the runQ
+ */
+ pq->thr->state = _PR_SUSPENDED;
+ _PR_MISCQ_LOCK(pq->thr->cpu);
+ _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
+ _PR_MISCQ_UNLOCK(pq->thr->cpu);
+ } else {
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ pq->thr->cpu = cpu;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ if (_pr_md_idle_cpus > 1)
+ _PR_MD_WAKEUP_WAITER(thred);
+ }
+ }
+ _PR_THREAD_UNLOCK(thred);
+ } else {
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) {
+ /*
+ * woken up by another thread; read all the data
+ * in the pipe to empty the pipe
+ */
+ while ((rv =
+ read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
+ == PIPE_BUF){
+ }
+ PR_ASSERT((rv > 0) ||
+ ((rv == -1) && (errno == EAGAIN)));
+ }
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+#ifdef IRIX
+ if ((me->cpu->id == 0) &&
+ (FD_ISSET(_pr_irix_primoridal_cpu_fd[0], rp))) {
+ if (_pr_irix_process_exit) {
+ /*
+ * process exit due to a call to PR_ProcessExit
+ */
+ prctl(PR_SETEXITSIG, SIGKILL);
+ _exit(_pr_irix_process_exit_code);
+ } else {
+ while ((rv = read(_pr_irix_primoridal_cpu_fd[0],
+ _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) {
+ }
+ PR_ASSERT(rv > 0);
+ }
+ }
+ if (me->cpu->id == 0) {
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primoridal_cpu_fd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0];
+ }
+#endif
+ }
+ } else if (nfd < 0) {
+ if (errno == EBADF) {
+ FindBadFDs();
+ } else {
+ PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d",
+ errno));
+ }
+ } else {
+ PR_ASSERT(nfd == 0);
+ /*
+ * compute the new value of _PR_IOQ_TIMEOUT
+ */
+ q = _PR_IOQ(me->cpu).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ if (pds->osfd > pq_max_osfd) {
+ pq_max_osfd = pds->osfd;
+ }
+ }
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ }
+ }
+#endif /* _PR_USE_POLL */
+ _PR_MD_IOQ_UNLOCK();
+}
+
+void _MD_Wakeup_CPUs()
+{
+ PRInt32 rv, data;
+
+ data = 0;
+ rv = write(_pr_md_pipefd[1], &data, 1);
+
+ while ((rv < 0) && (errno == EAGAIN)) {
+ /*
+ * pipe full, read all data in pipe to empty it
+ */
+ while ((rv =
+ read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
+ == PIPE_BUF) {
+ }
+ PR_ASSERT((rv > 0) ||
+ ((rv == -1) && (errno == EAGAIN)));
+ rv = write(_pr_md_pipefd[1], &data, 1);
+ }
+}
+
+
+void _MD_InitCPUS()
+{
+ PRInt32 rv, flags;
+ PRThread *me = _MD_CURRENT_THREAD();
+
+ rv = pipe(_pr_md_pipefd);
+ PR_ASSERT(rv == 0);
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+#ifndef _PR_USE_POLL
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu));
+#endif
+
+ flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0);
+ fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK);
+ flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0);
+ fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK);
+}
+
+/*
+** Unix SIGALRM (clock) signal handler
+*/
+static void ClockInterruptHandler()
+{
+ int olderrno;
+ PRUintn pri;
+ _PRCPU *cpu = _PR_MD_CURRENT_CPU();
+ PRThread *me = _MD_CURRENT_THREAD();
+
+#ifdef SOLARIS
+ if (!me || _PR_IS_NATIVE_THREAD(me)) {
+ _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK;
+ return;
+ }
+#endif
+
+ if (_PR_MD_GET_INTSOFF() != 0) {
+ cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK;
+ return;
+ }
+ _PR_MD_SET_INTSOFF(1);
+
+ olderrno = errno;
+ _PR_ClockInterrupt();
+ errno = olderrno;
+
+ /*
+ ** If the interrupt wants a resched or if some other thread at
+ ** the same priority needs the cpu, reschedule.
+ */
+ pri = me->priority;
+ if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) {
+#ifdef _PR_NO_PREEMPT
+ cpu->resched = PR_TRUE;
+ if (pr_interruptSwitchHook) {
+ (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg);
+ }
+#else /* _PR_NO_PREEMPT */
+ /*
+ ** Re-enable unix interrupts (so that we can use
+ ** setjmp/longjmp for context switching without having to
+ ** worry about the signal state)
+ */
+ sigprocmask(SIG_SETMASK, &empty_set, 0);
+ PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch"));
+
+ if(!(me->flags & _PR_IDLE_THREAD)) {
+ _PR_THREAD_LOCK(me);
+ me->state = _PR_RUNNABLE;
+ me->cpu = cpu;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(me, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ _PR_THREAD_UNLOCK(me);
+ } else
+ me->state = _PR_RUNNABLE;
+ _MD_SWITCH_CONTEXT(me);
+ PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch"));
+#endif /* _PR_NO_PREEMPT */
+ }
+ /*
+ * Because this thread could be running on a different cpu after
+ * a context switch the current cpu should be accessed and the
+ * value of the 'cpu' variable should not be used.
+ */
+ _PR_MD_SET_INTSOFF(0);
+}
+
+/*
+ * On HP-UX 9, we have to use the sigvector() interface to restart
+ * interrupted system calls, because sigaction() does not have the
+ * SA_RESTART flag.
+ */
+
+#ifdef HPUX9
+static void HPUX9_ClockInterruptHandler(
+ int sig,
+ int code,
+ struct sigcontext *scp)
+{
+ ClockInterruptHandler();
+ scp->sc_syscall_action = SIG_RESTART;
+}
+#endif /* HPUX9 */
+
+/* # of milliseconds per clock tick that we will use */
+#define MSEC_PER_TICK 50
+
+
+void _MD_StartInterrupts()
+{
+ char *eval;
+
+ if ((eval = getenv("NSPR_NOCLOCK")) != NULL) {
+ if (atoi(eval) == 0)
+ _nspr_noclock = 0;
+ else
+ _nspr_noclock = 1;
+ }
+
+#ifndef _PR_NO_CLOCK_TIMER
+ if (!_nspr_noclock) {
+ _MD_EnableClockInterrupts();
+ }
+#endif
+}
+
+void _MD_StopInterrupts()
+{
+ sigprocmask(SIG_BLOCK, &timer_set, 0);
+}
+
+void _MD_EnableClockInterrupts()
+{
+ struct itimerval itval;
+ extern PRUintn _pr_numCPU;
+#ifdef HPUX9
+ struct sigvec vec;
+
+ vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler;
+ vec.sv_mask = 0;
+ vec.sv_flags = 0;
+ sigvector(SIGALRM, &vec, 0);
+#else
+ struct sigaction vtact;
+
+ vtact.sa_handler = (void (*)()) ClockInterruptHandler;
+ sigemptyset(&vtact.sa_mask);
+ vtact.sa_flags = SA_RESTART;
+ sigaction(SIGALRM, &vtact, 0);
+#endif /* HPUX9 */
+
+ PR_ASSERT(_pr_numCPU == 1);
+ itval.it_interval.tv_sec = 0;
+ itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC;
+ itval.it_value = itval.it_interval;
+ setitimer(ITIMER_REAL, &itval, 0);
+}
+
+void _MD_DisableClockInterrupts()
+{
+ struct itimerval itval;
+ extern PRUintn _pr_numCPU;
+
+ PR_ASSERT(_pr_numCPU == 1);
+ itval.it_interval.tv_sec = 0;
+ itval.it_interval.tv_usec = 0;
+ itval.it_value = itval.it_interval;
+ setitimer(ITIMER_REAL, &itval, 0);
+}
+
+void _MD_BlockClockInterrupts()
+{
+ sigprocmask(SIG_BLOCK, &timer_set, 0);
+}
+
+void _MD_UnblockClockInterrupts()
+{
+ sigprocmask(SIG_UNBLOCK, &timer_set, 0);
+}
+
+void _MD_MakeNonblock(PRFileDesc *fd)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ int flags;
+
+ if (osfd <= 2) {
+ /* Don't mess around with stdin, stdout or stderr */
+ return;
+ }
+ flags = fcntl(osfd, F_GETFL, 0);
+
+ /*
+ * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible.
+ * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O),
+ * otherwise connect() still blocks and can be interrupted by SIGALRM.
+ */
+
+ fcntl(osfd, F_SETFL, flags | O_NONBLOCK);
+ }
+
+PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode)
+{
+ PRInt32 osflags;
+ PRInt32 rv, err;
+
+ if (flags & PR_RDWR) {
+ osflags = O_RDWR;
+ } else if (flags & PR_WRONLY) {
+ osflags = O_WRONLY;
+ } else {
+ osflags = O_RDONLY;
+ }
+
+ if (flags & PR_EXCL)
+ osflags |= O_EXCL;
+ if (flags & PR_APPEND)
+ osflags |= O_APPEND;
+ if (flags & PR_TRUNCATE)
+ osflags |= O_TRUNC;
+ if (flags & PR_SYNC) {
+#if defined(O_SYNC)
+ osflags |= O_SYNC;
+#elif defined(O_FSYNC)
+ osflags |= O_FSYNC;
+#else
+#error "Neither O_SYNC nor O_FSYNC is defined on this platform"
+#endif
+ }
+
+ /*
+ ** On creations we hold the 'create' lock in order to enforce
+ ** the semantics of PR_Rename. (see the latter for more details)
+ */
+ if (flags & PR_CREATE_FILE)
+ {
+ osflags |= O_CREAT;
+ if (NULL !=_pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ }
+
+#if defined(ANDROID)
+ osflags |= O_LARGEFILE;
+#endif
+
+ rv = _md_iovector._open64(name, osflags, mode);
+
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_OPEN_ERROR(err);
+ }
+
+ if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock))
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRIntervalTime intr_timeout_ticks;
+
+#if defined(SOLARIS) || defined(IRIX)
+static void sigsegvhandler() {
+ fprintf(stderr,"Received SIGSEGV\n");
+ fflush(stderr);
+ pause();
+}
+
+static void sigaborthandler() {
+ fprintf(stderr,"Received SIGABRT\n");
+ fflush(stderr);
+ pause();
+}
+
+static void sigbushandler() {
+ fprintf(stderr,"Received SIGBUS\n");
+ fflush(stderr);
+ pause();
+}
+#endif /* SOLARIS, IRIX */
+
+#endif /* !defined(_PR_PTHREADS) */
+
+void _MD_query_fd_inheritable(PRFileDesc *fd)
+{
+ int flags;
+
+ PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable);
+ flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
+ PR_ASSERT(-1 != flags);
+ fd->secret->inheritable = (flags & FD_CLOEXEC) ?
+ _PR_TRI_FALSE : _PR_TRI_TRUE;
+}
+
+PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence)
+{
+ PROffset32 rv, where;
+
+ switch (whence) {
+ case PR_SEEK_SET:
+ where = SEEK_SET;
+ break;
+ case PR_SEEK_CUR:
+ where = SEEK_CUR;
+ break;
+ case PR_SEEK_END:
+ where = SEEK_END;
+ break;
+ default:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ rv = lseek(fd->secret->md.osfd,offset,where);
+ if (rv == -1)
+ {
+ PRInt32 syserr = _MD_ERRNO();
+ _PR_MD_MAP_LSEEK_ERROR(syserr);
+ }
+done:
+ return(rv);
+}
+
+PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence)
+{
+ PRInt32 where;
+ PROffset64 rv;
+
+ switch (whence)
+ {
+ case PR_SEEK_SET:
+ where = SEEK_SET;
+ break;
+ case PR_SEEK_CUR:
+ where = SEEK_CUR;
+ break;
+ case PR_SEEK_END:
+ where = SEEK_END;
+ break;
+ default:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ rv = minus_one;
+ goto done;
+ }
+ rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where);
+ if (LL_EQ(rv, minus_one))
+ {
+ PRInt32 syserr = _MD_ERRNO();
+ _PR_MD_MAP_LSEEK_ERROR(syserr);
+ }
+done:
+ return rv;
+} /* _MD_lseek64 */
+
+/*
+** _MD_set_fileinfo_times --
+** Set the modifyTime and creationTime of the PRFileInfo
+** structure using the values in struct stat.
+**
+** _MD_set_fileinfo64_times --
+** Set the modifyTime and creationTime of the PRFileInfo64
+** structure using the values in _MDStat64.
+*/
+
+#if defined(_PR_STAT_HAS_ST_ATIM)
+/*
+** struct stat has st_atim, st_mtim, and st_ctim fields of
+** type timestruc_t.
+*/
+static void _MD_set_fileinfo_times(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+
+static void _MD_set_fileinfo64_times(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+#elif defined(_PR_STAT_HAS_ST_ATIM_UNION)
+/*
+** The st_atim, st_mtim, and st_ctim fields in struct stat are
+** unions with a st__tim union member of type timestruc_t.
+*/
+static void _MD_set_fileinfo_times(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+
+static void _MD_set_fileinfo64_times(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+#elif defined(_PR_STAT_HAS_ST_ATIMESPEC)
+/*
+** struct stat has st_atimespec, st_mtimespec, and st_ctimespec
+** fields of type struct timespec.
+*/
+#if defined(_PR_TIMESPEC_HAS_TS_SEC)
+static void _MD_set_fileinfo_times(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+
+static void _MD_set_fileinfo64_times(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+#else /* _PR_TIMESPEC_HAS_TS_SEC */
+/*
+** The POSIX timespec structure has tv_sec and tv_nsec.
+*/
+static void _MD_set_fileinfo_times(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+
+static void _MD_set_fileinfo64_times(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ PRInt64 us, s2us;
+
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
+ LL_MUL(info->modifyTime, info->modifyTime, s2us);
+ LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
+ LL_ADD(info->modifyTime, info->modifyTime, us);
+ LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
+ LL_MUL(info->creationTime, info->creationTime, s2us);
+ LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
+ LL_ADD(info->creationTime, info->creationTime, us);
+}
+#endif /* _PR_TIMESPEC_HAS_TS_SEC */
+#elif defined(_PR_STAT_HAS_ONLY_ST_ATIME)
+/*
+** struct stat only has st_atime, st_mtime, and st_ctime fields
+** of type time_t.
+*/
+static void _MD_set_fileinfo_times(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ PRInt64 s, s2us;
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(s, sb->st_mtime);
+ LL_MUL(s, s, s2us);
+ info->modifyTime = s;
+ LL_I2L(s, sb->st_ctime);
+ LL_MUL(s, s, s2us);
+ info->creationTime = s;
+}
+
+static void _MD_set_fileinfo64_times(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ PRInt64 s, s2us;
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(s, sb->st_mtime);
+ LL_MUL(s, s, s2us);
+ info->modifyTime = s;
+ LL_I2L(s, sb->st_ctime);
+ LL_MUL(s, s, s2us);
+ info->creationTime = s;
+}
+#else
+#error "I don't know yet"
+#endif
+
+static int _MD_convert_stat_to_fileinfo(
+ const struct stat *sb,
+ PRFileInfo *info)
+{
+ if (S_IFREG & sb->st_mode)
+ info->type = PR_FILE_FILE;
+ else if (S_IFDIR & sb->st_mode)
+ info->type = PR_FILE_DIRECTORY;
+ else
+ info->type = PR_FILE_OTHER;
+
+#if defined(_PR_HAVE_LARGE_OFF_T)
+ if (0x7fffffffL < sb->st_size)
+ {
+ PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
+ return -1;
+ }
+#endif /* defined(_PR_HAVE_LARGE_OFF_T) */
+ info->size = sb->st_size;
+
+ _MD_set_fileinfo_times(sb, info);
+ return 0;
+} /* _MD_convert_stat_to_fileinfo */
+
+static int _MD_convert_stat64_to_fileinfo64(
+ const _MDStat64 *sb,
+ PRFileInfo64 *info)
+{
+ if (S_IFREG & sb->st_mode)
+ info->type = PR_FILE_FILE;
+ else if (S_IFDIR & sb->st_mode)
+ info->type = PR_FILE_DIRECTORY;
+ else
+ info->type = PR_FILE_OTHER;
+
+ LL_I2L(info->size, sb->st_size);
+
+ _MD_set_fileinfo64_times(sb, info);
+ return 0;
+} /* _MD_convert_stat64_to_fileinfo64 */
+
+PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info)
+{
+ PRInt32 rv;
+ struct stat sb;
+
+ rv = stat(fn, &sb);
+ if (rv < 0)
+ _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
+ else if (NULL != info)
+ rv = _MD_convert_stat_to_fileinfo(&sb, info);
+ return rv;
+}
+
+PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info)
+{
+ _MDStat64 sb;
+ PRInt32 rv = _md_iovector._stat64(fn, &sb);
+ if (rv < 0)
+ _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
+ else if (NULL != info)
+ rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
+ return rv;
+}
+
+PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info)
+{
+ struct stat sb;
+ PRInt32 rv = fstat(fd->secret->md.osfd, &sb);
+ if (rv < 0)
+ _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
+ else if (NULL != info)
+ rv = _MD_convert_stat_to_fileinfo(&sb, info);
+ return rv;
+}
+
+PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info)
+{
+ _MDStat64 sb;
+ PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb);
+ if (rv < 0)
+ _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
+ else if (NULL != info)
+ rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
+ return rv;
+}
+
+/*
+ * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can
+ * open the log file during NSPR initialization, before _md_iovector is
+ * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a
+ * large file on some platforms.
+ */
+#ifdef SYMBIAN
+struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */
+#else
+struct _MD_IOVector _md_iovector = { open };
+#endif
+
+/*
+** These implementations are to emulate large file routines on systems that
+** don't have them. Their goal is to check in case overflow occurs. Otherwise
+** they will just operate as normal using 32-bit file routines.
+**
+** The checking might be pre- or post-op, depending on the semantics.
+*/
+
+#if defined(SOLARIS2_5)
+
+static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf)
+{
+ PRInt32 rv;
+ struct stat sb;
+
+ rv = fstat(osfd, &sb);
+ if (rv >= 0)
+ {
+ /*
+ ** I'm only copying the fields that are immediately needed.
+ ** If somebody else calls this function, some of the fields
+ ** may not be defined.
+ */
+ (void)memset(buf, 0, sizeof(_MDStat64));
+ buf->st_mode = sb.st_mode;
+ buf->st_ctim = sb.st_ctim;
+ buf->st_mtim = sb.st_mtim;
+ buf->st_size = sb.st_size;
+ }
+ return rv;
+} /* _MD_solaris25_fstat64 */
+
+static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf)
+{
+ PRInt32 rv;
+ struct stat sb;
+
+ rv = stat(fn, &sb);
+ if (rv >= 0)
+ {
+ /*
+ ** I'm only copying the fields that are immediately needed.
+ ** If somebody else calls this function, some of the fields
+ ** may not be defined.
+ */
+ (void)memset(buf, 0, sizeof(_MDStat64));
+ buf->st_mode = sb.st_mode;
+ buf->st_ctim = sb.st_ctim;
+ buf->st_mtim = sb.st_mtim;
+ buf->st_size = sb.st_size;
+ }
+ return rv;
+} /* _MD_solaris25_stat64 */
+#endif /* defined(SOLARIS2_5) */
+
+#if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5)
+
+static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence)
+{
+ PRUint64 maxoff;
+ PROffset64 rv = minus_one;
+ LL_I2L(maxoff, 0x7fffffff);
+ if (LL_CMP(offset, <=, maxoff))
+ {
+ off_t off;
+ LL_L2I(off, offset);
+ LL_I2L(rv, lseek(osfd, off, whence));
+ }
+ else errno = EFBIG; /* we can't go there */
+ return rv;
+} /* _MD_Unix_lseek64 */
+
+static void* _MD_Unix_mmap64(
+ void *addr, PRSize len, PRIntn prot, PRIntn flags,
+ PRIntn fildes, PRInt64 offset)
+{
+ PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
+ return NULL;
+} /* _MD_Unix_mmap64 */
+#endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */
+
+/* Android <= 19 doesn't have mmap64. */
+#if defined(ANDROID) && __ANDROID_API__ <= 19
+PR_IMPORT(void) *__mmap2(void *, size_t, int, int, int, size_t);
+
+#define ANDROID_PAGE_SIZE 4096
+
+static void *
+mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset)
+{
+ if (offset & (ANDROID_PAGE_SIZE - 1)) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+ return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE);
+}
+#endif
+
+#if defined(OSF1) && defined(__GNUC__)
+
+/*
+ * On OSF1 V5.0A, <sys/stat.h> defines stat and fstat as
+ * macros when compiled under gcc, so it is rather tricky to
+ * take the addresses of the real functions the macros expend
+ * to. A simple solution is to define forwarder functions
+ * and take the addresses of the forwarder functions instead.
+ */
+
+static int stat_forwarder(const char *path, struct stat *buffer)
+{
+ return stat(path, buffer);
+}
+
+static int fstat_forwarder(int filedes, struct stat *buffer)
+{
+ return fstat(filedes, buffer);
+}
+
+#endif
+
+static void _PR_InitIOV(void)
+{
+#if defined(SOLARIS2_5)
+ PRLibrary *lib;
+ void *open64_func;
+
+ open64_func = PR_FindSymbolAndLibrary("open64", &lib);
+ if (NULL != open64_func)
+ {
+ PR_ASSERT(NULL != lib);
+ _md_iovector._open64 = (_MD_Open64)open64_func;
+ _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64");
+ _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64");
+ _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64");
+ _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64");
+ (void)PR_UnloadLibrary(lib);
+ }
+ else
+ {
+ _md_iovector._open64 = open;
+ _md_iovector._mmap64 = _MD_Unix_mmap64;
+ _md_iovector._fstat64 = _MD_solaris25_fstat64;
+ _md_iovector._stat64 = _MD_solaris25_stat64;
+ _md_iovector._lseek64 = _MD_Unix_lseek64;
+ }
+#elif defined(_PR_NO_LARGE_FILES)
+ _md_iovector._open64 = open;
+ _md_iovector._mmap64 = _MD_Unix_mmap64;
+ _md_iovector._fstat64 = fstat;
+ _md_iovector._stat64 = stat;
+ _md_iovector._lseek64 = _MD_Unix_lseek64;
+#elif defined(_PR_HAVE_OFF64_T)
+#if defined(IRIX5_3) || defined(ANDROID)
+ /*
+ * Android doesn't have open64. We pass the O_LARGEFILE flag to open
+ * in _MD_open.
+ */
+ _md_iovector._open64 = open;
+#else
+ _md_iovector._open64 = open64;
+#endif
+ _md_iovector._mmap64 = mmap64;
+ _md_iovector._fstat64 = fstat64;
+ _md_iovector._stat64 = stat64;
+ _md_iovector._lseek64 = lseek64;
+#elif defined(_PR_HAVE_LARGE_OFF_T)
+ _md_iovector._open64 = open;
+ _md_iovector._mmap64 = mmap;
+#if defined(OSF1) && defined(__GNUC__)
+ _md_iovector._fstat64 = fstat_forwarder;
+ _md_iovector._stat64 = stat_forwarder;
+#else
+ _md_iovector._fstat64 = fstat;
+ _md_iovector._stat64 = stat;
+#endif
+ _md_iovector._lseek64 = lseek;
+#else
+#error "I don't know yet"
+#endif
+ LL_I2L(minus_one, -1);
+} /* _PR_InitIOV */
+
+void _PR_UnixInit(void)
+{
+ struct sigaction sigact;
+ int rv;
+
+ sigemptyset(&timer_set);
+
+#if !defined(_PR_PTHREADS)
+
+ sigaddset(&timer_set, SIGALRM);
+ sigemptyset(&empty_set);
+ intr_timeout_ticks =
+ PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS);
+
+#if defined(SOLARIS) || defined(IRIX)
+
+ if (getenv("NSPR_SIGSEGV_HANDLE")) {
+ sigact.sa_handler = sigsegvhandler;
+ sigact.sa_flags = 0;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGSEGV, &sigact, 0);
+ }
+
+ if (getenv("NSPR_SIGABRT_HANDLE")) {
+ sigact.sa_handler = sigaborthandler;
+ sigact.sa_flags = 0;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGABRT, &sigact, 0);
+ }
+
+ if (getenv("NSPR_SIGBUS_HANDLE")) {
+ sigact.sa_handler = sigbushandler;
+ sigact.sa_flags = 0;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGBUS, &sigact, 0);
+ }
+
+#endif
+#endif /* !defined(_PR_PTHREADS) */
+
+ /*
+ * Under HP-UX DCE threads, sigaction() installs a per-thread
+ * handler, so we use sigvector() to install a process-wide
+ * handler.
+ */
+#if defined(HPUX) && defined(_PR_DCETHREADS)
+ {
+ struct sigvec vec;
+
+ vec.sv_handler = SIG_IGN;
+ vec.sv_mask = 0;
+ vec.sv_flags = 0;
+ rv = sigvector(SIGPIPE, &vec, NULL);
+ PR_ASSERT(0 == rv);
+ }
+#else
+ sigact.sa_handler = SIG_IGN;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = 0;
+ rv = sigaction(SIGPIPE, &sigact, 0);
+ PR_ASSERT(0 == rv);
+#endif /* HPUX && _PR_DCETHREADS */
+
+ _pr_rename_lock = PR_NewLock();
+ PR_ASSERT(NULL != _pr_rename_lock);
+ _pr_Xfe_mon = PR_NewMonitor();
+ PR_ASSERT(NULL != _pr_Xfe_mon);
+
+ _PR_InitIOV(); /* one last hack */
+}
+
+void _PR_UnixCleanup(void)
+{
+ if (_pr_rename_lock) {
+ PR_DestroyLock(_pr_rename_lock);
+ _pr_rename_lock = NULL;
+ }
+ if (_pr_Xfe_mon) {
+ PR_DestroyMonitor(_pr_Xfe_mon);
+ _pr_Xfe_mon = NULL;
+ }
+}
+
+#if !defined(_PR_PTHREADS)
+
+/*
+ * Variables used by the GC code, initialized in _MD_InitSegs().
+ */
+static PRInt32 _pr_zero_fd = -1;
+static PRLock *_pr_md_lock = NULL;
+
+/*
+ * _MD_InitSegs --
+ *
+ * This is Unix's version of _PR_MD_INIT_SEGS(), which is
+ * called by _PR_InitSegs(), which in turn is called by
+ * PR_Init().
+ */
+void _MD_InitSegs(void)
+{
+#ifdef DEBUG
+ /*
+ ** Disable using mmap(2) if NSPR_NO_MMAP is set
+ */
+ if (getenv("NSPR_NO_MMAP")) {
+ _pr_zero_fd = -2;
+ return;
+ }
+#endif
+ _pr_zero_fd = open("/dev/zero",O_RDWR , 0);
+ /* Prevent the fd from being inherited by child processes */
+ fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC);
+ _pr_md_lock = PR_NewLock();
+}
+
+PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr)
+{
+ static char *lastaddr = (char*) _PR_STACK_VMBASE;
+ PRStatus retval = PR_SUCCESS;
+ int prot;
+ void *rv;
+
+ PR_ASSERT(seg != 0);
+ PR_ASSERT(size != 0);
+
+ PR_Lock(_pr_md_lock);
+ if (_pr_zero_fd < 0) {
+from_heap:
+ seg->vaddr = PR_MALLOC(size);
+ if (!seg->vaddr) {
+ retval = PR_FAILURE;
+ }
+ else {
+ seg->size = size;
+ }
+ goto exit;
+ }
+
+ prot = PROT_READ|PROT_WRITE;
+ /*
+ * On Alpha Linux, the user-level thread stack needs
+ * to be made executable because longjmp/signal seem
+ * to put machine instructions on the stack.
+ */
+#if defined(LINUX) && defined(__alpha)
+ prot |= PROT_EXEC;
+#endif
+ rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot,
+ _MD_MMAP_FLAGS,
+ _pr_zero_fd, 0);
+ if (rv == (void*)-1) {
+ goto from_heap;
+ }
+ lastaddr += size;
+ seg->vaddr = rv;
+ seg->size = size;
+ seg->flags = _PR_SEG_VM;
+
+exit:
+ PR_Unlock(_pr_md_lock);
+ return retval;
+}
+
+void _MD_FreeSegment(PRSegment *seg)
+{
+ if (seg->flags & _PR_SEG_VM)
+ (void) munmap(seg->vaddr, seg->size);
+ else
+ PR_DELETE(seg->vaddr);
+}
+
+#endif /* _PR_PTHREADS */
+
+/*
+ *-----------------------------------------------------------------------
+ *
+ * PR_Now --
+ *
+ * Returns the current time in microseconds since the epoch.
+ * The epoch is midnight January 1, 1970 GMT.
+ * The implementation is machine dependent. This is the Unix
+ * implementation.
+ * Cf. time_t time(time_t *tp)
+ *
+ *-----------------------------------------------------------------------
+ */
+
+PR_IMPLEMENT(PRTime)
+PR_Now(void)
+{
+ struct timeval tv;
+ PRInt64 s, us, s2us;
+
+ GETTIMEOFDAY(&tv);
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(s, tv.tv_sec);
+ LL_I2L(us, tv.tv_usec);
+ LL_MUL(s, s, s2us);
+ LL_ADD(s, s, us);
+ return s;
+}
+
+#if defined(_MD_INTERVAL_USE_GTOD)
+/*
+ * This version of interval times is based on the time of day
+ * capability offered by the system. This isn't valid for two reasons:
+ * 1) The time of day is neither linear nor montonically increasing
+ * 2) The units here are milliseconds. That's not appropriate for our use.
+ */
+PRIntervalTime _PR_UNIX_GetInterval()
+{
+ struct timeval time;
+ PRIntervalTime ticks;
+
+ (void)GETTIMEOFDAY(&time); /* fallicy of course */
+ ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */
+ ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */
+ return ticks;
+} /* _PR_UNIX_GetInterval */
+
+PRIntervalTime _PR_UNIX_TicksPerSecond()
+{
+ return 1000; /* this needs some work :) */
+}
+#endif
+
+#if defined(_PR_HAVE_CLOCK_MONOTONIC)
+PRIntervalTime _PR_UNIX_GetInterval2()
+{
+ struct timespec time;
+ PRIntervalTime ticks;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) {
+ fprintf(stderr, "clock_gettime failed: %d\n", errno);
+ abort();
+ }
+
+ ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC;
+ ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC;
+ return ticks;
+}
+
+PRIntervalTime _PR_UNIX_TicksPerSecond2()
+{
+ return 1000;
+}
+#endif
+
+#if !defined(_PR_PTHREADS)
+/*
+ * Wait for I/O on multiple descriptors.
+ *
+ * Return 0 if timed out, return -1 if interrupted,
+ * else return the number of ready descriptors.
+ */
+PRInt32 _PR_WaitForMultipleFDs(
+ _PRUnixPollDesc *unixpds,
+ PRInt32 pdcnt,
+ PRIntervalTime timeout)
+{
+ PRPollQueue pq;
+ PRIntn is;
+ PRInt32 rv;
+ _PRCPU *io_cpu;
+ _PRUnixPollDesc *unixpd, *eunixpd;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
+
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+
+ pq.pds = unixpds;
+ pq.npds = pdcnt;
+
+ _PR_INTSOFF(is);
+ _PR_MD_IOQ_LOCK();
+ _PR_THREAD_LOCK(me);
+
+ pq.thr = me;
+ io_cpu = me->cpu;
+ pq.on_ioq = PR_TRUE;
+ pq.timeout = timeout;
+ _PR_ADD_TO_IOQ(pq, me->cpu);
+
+#if !defined(_PR_USE_POLL)
+ eunixpd = unixpds + pdcnt;
+ for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
+ PRInt32 osfd = unixpd->osfd;
+ if (unixpd->in_flags & _PR_UNIX_POLL_READ) {
+ FD_SET(osfd, &_PR_FD_READ_SET(me->cpu));
+ _PR_FD_READ_CNT(me->cpu)[osfd]++;
+ }
+ if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) {
+ FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ (_PR_FD_WRITE_CNT(me->cpu))[osfd]++;
+ }
+ if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) {
+ FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++;
+ }
+ if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) {
+ _PR_IOQ_MAX_OSFD(me->cpu) = osfd;
+ }
+ }
+#endif /* !defined(_PR_USE_POLL) */
+
+ if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) {
+ _PR_IOQ_TIMEOUT(me->cpu) = timeout;
+ }
+
+ _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt;
+
+ _PR_SLEEPQ_LOCK(me->cpu);
+ _PR_ADD_SLEEPQ(me, timeout);
+ me->state = _PR_IO_WAIT;
+ me->io_pending = PR_TRUE;
+ me->io_suspended = PR_FALSE;
+ _PR_SLEEPQ_UNLOCK(me->cpu);
+ _PR_THREAD_UNLOCK(me);
+ _PR_MD_IOQ_UNLOCK();
+
+ _PR_MD_WAIT(me, timeout);
+
+ me->io_pending = PR_FALSE;
+ me->io_suspended = PR_FALSE;
+
+ /*
+ * This thread should run on the same cpu on which it was blocked; when
+ * the IO request times out the fd sets and fd counts for the
+ * cpu are updated below.
+ */
+ PR_ASSERT(me->cpu == io_cpu);
+
+ /*
+ ** If we timed out the pollq might still be on the ioq. Remove it
+ ** before continuing.
+ */
+ if (pq.on_ioq) {
+ _PR_MD_IOQ_LOCK();
+ /*
+ * Need to check pq.on_ioq again
+ */
+ if (pq.on_ioq) {
+ PR_REMOVE_LINK(&pq.links);
+#ifndef _PR_USE_POLL
+ eunixpd = unixpds + pdcnt;
+ for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
+ PRInt32 osfd = unixpd->osfd;
+ PRInt16 in_flags = unixpd->in_flags;
+
+ if (in_flags & _PR_UNIX_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & _PR_UNIX_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+#endif /* _PR_USE_POLL */
+ PR_ASSERT(pq.npds == pdcnt);
+ _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt;
+ PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
+ }
+ _PR_MD_IOQ_UNLOCK();
+ }
+ /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */
+ if (1 == pdcnt) {
+ _PR_FAST_INTSON(is);
+ } else {
+ _PR_INTSON(is);
+ }
+
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+
+ rv = 0;
+ if (pq.on_ioq == PR_FALSE) {
+ /* Count the number of ready descriptors */
+ while (--pdcnt >= 0) {
+ if (unixpds->out_flags != 0) {
+ rv++;
+ }
+ unixpds++;
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * Unblock threads waiting for I/O
+ * used when interrupting threads
+ *
+ * NOTE: The thread lock should held when this function is called.
+ * On return, the thread lock is released.
+ */
+void _PR_Unblock_IO_Wait(PRThread *thr)
+{
+ int pri = thr->priority;
+ _PRCPU *cpu = thr->cpu;
+
+ /*
+ * GLOBAL threads wakeup periodically to check for interrupt
+ */
+ if (_PR_IS_NATIVE_THREAD(thr)) {
+ _PR_THREAD_UNLOCK(thr);
+ return;
+ }
+
+ PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ));
+ _PR_SLEEPQ_LOCK(cpu);
+ _PR_DEL_SLEEPQ(thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(cpu);
+
+ PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD));
+ thr->state = _PR_RUNNABLE;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ _PR_THREAD_UNLOCK(thr);
+ _PR_MD_WAKEUP_WAITER(thr);
+}
+#endif /* !defined(_PR_PTHREADS) */
+
+/*
+ * When a nonblocking connect has completed, determine whether it
+ * succeeded or failed, and if it failed, what the error code is.
+ *
+ * The function returns the error code. An error code of 0 means
+ * that the nonblocking connect succeeded.
+ */
+
+int _MD_unix_get_nonblocking_connect_error(int osfd)
+{
+#if defined(NTO)
+ /* Neutrino does not support the SO_ERROR socket option */
+ PRInt32 rv;
+ PRNetAddr addr;
+ _PRSockLen_t addrlen = sizeof(addr);
+
+ /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */
+ struct statvfs superblock;
+ rv = fstatvfs(osfd, &superblock);
+ if (rv == 0) {
+ if (strcmp(superblock.f_basetype, "ttcpip") == 0) {
+ /* Using the Tiny Stack! */
+ rv = getpeername(osfd, (struct sockaddr *) &addr,
+ (_PRSockLen_t *) &addrlen);
+ if (rv == -1) {
+ int errno_copy = errno; /* make a copy so I don't
+ * accidentally reset */
+
+ if (errno_copy == ENOTCONN) {
+ struct stat StatInfo;
+ rv = fstat(osfd, &StatInfo);
+ if (rv == 0) {
+ time_t current_time = time(NULL);
+
+ /*
+ * this is a real hack, can't explain why it
+ * works it just does
+ */
+ if (abs(current_time - StatInfo.st_atime) < 5) {
+ return ECONNREFUSED;
+ } else {
+ return ETIMEDOUT;
+ }
+ } else {
+ return ECONNREFUSED;
+ }
+ } else {
+ return errno_copy;
+ }
+ } else {
+ /* No Error */
+ return 0;
+ }
+ } else {
+ /* Have the FULL Stack which supports SO_ERROR */
+ /* Hasn't been written yet, never been tested! */
+ /* Jerry.Kirk@Nexwarecorp.com */
+
+ int err;
+ _PRSockLen_t optlen = sizeof(err);
+
+ if (getsockopt(osfd, SOL_SOCKET, SO_ERROR,
+ (char *) &err, &optlen) == -1) {
+ return errno;
+ } else {
+ return err;
+ }
+ }
+ } else {
+ return ECONNREFUSED;
+ }
+#elif defined(UNIXWARE)
+ /*
+ * getsockopt() fails with EPIPE, so use getmsg() instead.
+ */
+
+ int rv;
+ int flags = 0;
+ rv = getmsg(osfd, NULL, NULL, &flags);
+ PR_ASSERT(-1 == rv || 0 == rv);
+ if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) {
+ return errno;
+ }
+ return 0; /* no error */
+#else
+ int err;
+ _PRSockLen_t optlen = sizeof(err);
+ if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) {
+ return errno;
+ } else {
+ return err;
+ }
+#endif
+}
+
+/************************************************************************/
+
+/*
+** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread
+** safe. Unfortunately, neither is mozilla. To make these programs work
+** in a pre-emptive threaded environment, we need to use a lock.
+*/
+
+void PR_XLock(void)
+{
+ PR_EnterMonitor(_pr_Xfe_mon);
+}
+
+void PR_XUnlock(void)
+{
+ PR_ExitMonitor(_pr_Xfe_mon);
+}
+
+PRBool PR_XIsLocked(void)
+{
+ return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE;
+}
+
+void PR_XWait(int ms)
+{
+ PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms));
+}
+
+void PR_XNotify(void)
+{
+ PR_Notify(_pr_Xfe_mon);
+}
+
+void PR_XNotifyAll(void)
+{
+ PR_NotifyAll(_pr_Xfe_mon);
+}
+
+#if defined(HAVE_FCNTL_FILE_LOCKING)
+
+PRStatus
+_MD_LockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ struct flock arg;
+
+ arg.l_type = F_WRLCK;
+ arg.l_whence = SEEK_SET;
+ arg.l_start = 0;
+ arg.l_len = 0; /* until EOF */
+ rv = fcntl(f, F_SETLKW, &arg);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_TLockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ struct flock arg;
+
+ arg.l_type = F_WRLCK;
+ arg.l_whence = SEEK_SET;
+ arg.l_start = 0;
+ arg.l_len = 0; /* until EOF */
+ rv = fcntl(f, F_SETLK, &arg);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_UnlockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ struct flock arg;
+
+ arg.l_type = F_UNLCK;
+ arg.l_whence = SEEK_SET;
+ arg.l_start = 0;
+ arg.l_len = 0; /* until EOF */
+ rv = fcntl(f, F_SETLK, &arg);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+#elif defined(HAVE_BSD_FLOCK)
+
+#include <sys/file.h>
+
+PRStatus
+_MD_LockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_EX);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_TLockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_EX|LOCK_NB);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_UnlockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_UN);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+#else
+
+PRStatus
+_MD_LockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_LOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_TLockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_TLOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus
+_MD_UnlockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_ULOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+#endif
+
+PRStatus _MD_gethostname(char *name, PRUint32 namelen)
+{
+ PRIntn rv;
+
+ rv = gethostname(name, namelen);
+ if (0 == rv) {
+ return PR_SUCCESS;
+ }
+ _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen)
+{
+ struct utsname info;
+
+ PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE));
+
+ if (uname(&info) == -1) {
+ _PR_MD_MAP_DEFAULT_ERROR(errno);
+ return PR_FAILURE;
+ }
+ if (PR_SI_SYSNAME == cmd)
+ (void)PR_snprintf(name, namelen, info.sysname);
+ else if (PR_SI_RELEASE == cmd)
+ (void)PR_snprintf(name, namelen, info.release);
+ else
+ return PR_FAILURE;
+ return PR_SUCCESS;
+}
+
+/*
+ *******************************************************************
+ *
+ * Memory-mapped files
+ *
+ *******************************************************************
+ */
+
+PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size)
+{
+ PRFileInfo info;
+ PRUint32 sz;
+
+ LL_L2UI(sz, size);
+ if (sz) {
+ if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) {
+ return PR_FAILURE;
+ }
+ if (sz > info.size) {
+ /*
+ * Need to extend the file
+ */
+ if (fmap->prot != PR_PROT_READWRITE) {
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0);
+ return PR_FAILURE;
+ }
+ if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) {
+ return PR_FAILURE;
+ }
+ if (PR_Write(fmap->fd, "", 1) != 1) {
+ return PR_FAILURE;
+ }
+ }
+ }
+ if (fmap->prot == PR_PROT_READONLY) {
+ fmap->md.prot = PROT_READ;
+#ifdef OSF1V4_MAP_PRIVATE_BUG
+ /*
+ * Use MAP_SHARED to work around a bug in OSF1 V4.0D
+ * (QAR 70220 in the OSF_QAR database) that results in
+ * corrupted data in the memory-mapped region. This
+ * bug is fixed in V5.0.
+ */
+ fmap->md.flags = MAP_SHARED;
+#else
+ fmap->md.flags = MAP_PRIVATE;
+#endif
+ } else if (fmap->prot == PR_PROT_READWRITE) {
+ fmap->md.prot = PROT_READ | PROT_WRITE;
+ fmap->md.flags = MAP_SHARED;
+ } else {
+ PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY);
+ fmap->md.prot = PROT_READ | PROT_WRITE;
+ fmap->md.flags = MAP_PRIVATE;
+ }
+ return PR_SUCCESS;
+}
+
+void * _MD_MemMap(
+ PRFileMap *fmap,
+ PRInt64 offset,
+ PRUint32 len)
+{
+ PRInt32 off;
+ void *addr;
+
+ LL_L2I(off, offset);
+ if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags,
+ fmap->fd->secret->md.osfd, off)) == (void *) -1) {
+ _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
+ addr = NULL;
+ }
+ return addr;
+}
+
+PRStatus _MD_MemUnmap(void *addr, PRUint32 len)
+{
+ if (munmap(addr, len) == 0) {
+ return PR_SUCCESS;
+ }
+ _PR_MD_MAP_DEFAULT_ERROR(errno);
+ return PR_FAILURE;
+}
+
+PRStatus _MD_CloseFileMap(PRFileMap *fmap)
+{
+ if ( PR_TRUE == fmap->md.isAnonFM ) {
+ PRStatus rc = PR_Close( fmap->fd );
+ if ( PR_FAILURE == rc ) {
+ PR_LOG( _pr_io_lm, PR_LOG_DEBUG,
+ ("_MD_CloseFileMap(): error closing anonymnous file map osfd"));
+ return PR_FAILURE;
+ }
+ }
+ PR_DELETE(fmap);
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_SyncMemMap(
+ PRFileDesc *fd,
+ void *addr,
+ PRUint32 len)
+{
+ /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk
+ * synchronously. It is not necessary to call fsync. */
+ if (msync(addr, len, MS_SYNC) == 0) {
+ return PR_SUCCESS;
+ }
+ _PR_MD_MAP_DEFAULT_ERROR(errno);
+ return PR_FAILURE;
+}
+
+#if defined(_PR_NEED_FAKE_POLL)
+
+/*
+ * Some platforms don't have poll(). For easier porting of code
+ * that calls poll(), we emulate poll() using select().
+ */
+
+int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
+{
+ int i;
+ int rv;
+ int maxfd;
+ fd_set rd, wr, ex;
+ struct timeval tv, *tvp;
+
+ if (timeout < 0 && timeout != -1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (timeout == -1) {
+ tvp = NULL;
+ } else {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ tvp = &tv;
+ }
+
+ maxfd = -1;
+ FD_ZERO(&rd);
+ FD_ZERO(&wr);
+ FD_ZERO(&ex);
+
+ for (i = 0; i < nfds; i++) {
+ int osfd = filedes[i].fd;
+ int events = filedes[i].events;
+ PRBool fdHasEvent = PR_FALSE;
+
+ if (osfd < 0) {
+ continue; /* Skip this osfd. */
+ }
+
+ /*
+ * Map the poll events to the select fd_sets.
+ * POLLIN, POLLRDNORM ===> readable
+ * POLLOUT, POLLWRNORM ===> writable
+ * POLLPRI, POLLRDBAND ===> exception
+ * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
+ * are ignored.
+ *
+ * The output events POLLERR and POLLHUP are never turned on.
+ * POLLNVAL may be turned on.
+ */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ FD_SET(osfd, &rd);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ FD_SET(osfd, &wr);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLPRI | POLLRDBAND)) {
+ FD_SET(osfd, &ex);
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent && osfd > maxfd) {
+ maxfd = osfd;
+ }
+ }
+
+ rv = select(maxfd + 1, &rd, &wr, &ex, tvp);
+
+ /* Compute poll results */
+ if (rv > 0) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ PRBool fdHasEvent = PR_FALSE;
+
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (FD_ISSET(filedes[i].fd, &rd)) {
+ if (filedes[i].events & POLLIN) {
+ filedes[i].revents |= POLLIN;
+ }
+ if (filedes[i].events & POLLRDNORM) {
+ filedes[i].revents |= POLLRDNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &wr)) {
+ if (filedes[i].events & POLLOUT) {
+ filedes[i].revents |= POLLOUT;
+ }
+ if (filedes[i].events & POLLWRNORM) {
+ filedes[i].revents |= POLLWRNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &ex)) {
+ if (filedes[i].events & POLLPRI) {
+ filedes[i].revents |= POLLPRI;
+ }
+ if (filedes[i].events & POLLRDBAND) {
+ filedes[i].revents |= POLLRDBAND;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent) {
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ } else if (rv == -1 && errno == EBADF) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) {
+ filedes[i].revents = POLLNVAL;
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ }
+ PR_ASSERT(-1 != timeout || rv != 0);
+
+ return rv;
+}
+#endif /* _PR_NEED_FAKE_POLL */
diff --git a/nsprpub/pr/src/md/unix/unix_errors.c b/nsprpub/pr/src/md/unix/unix_errors.c
new file mode 100644
index 000000000..bcea0993f
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/unix_errors.c
@@ -0,0 +1,829 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+#if defined(_PR_POLL_AVAILABLE)
+#include <poll.h>
+#endif
+#include <errno.h>
+
+void _MD_unix_map_default_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err ) {
+ case EACCES:
+ prError = PR_NO_ACCESS_RIGHTS_ERROR;
+ break;
+ case EADDRINUSE:
+ prError = PR_ADDRESS_IN_USE_ERROR;
+ break;
+ case EADDRNOTAVAIL:
+ prError = PR_ADDRESS_NOT_AVAILABLE_ERROR;
+ break;
+ case EAFNOSUPPORT:
+ prError = PR_ADDRESS_NOT_SUPPORTED_ERROR;
+ break;
+ case EAGAIN:
+ prError = PR_WOULD_BLOCK_ERROR;
+ break;
+ /*
+ * On QNX and Neutrino, EALREADY is defined as EBUSY.
+ */
+#if EALREADY != EBUSY
+ case EALREADY:
+ prError = PR_ALREADY_INITIATED_ERROR;
+ break;
+#endif
+ case EBADF:
+ prError = PR_BAD_DESCRIPTOR_ERROR;
+ break;
+#ifdef EBADMSG
+ case EBADMSG:
+ prError = PR_IO_ERROR;
+ break;
+#endif
+ case EBUSY:
+ prError = PR_FILESYSTEM_MOUNTED_ERROR;
+ break;
+ case ECONNABORTED:
+ prError = PR_CONNECT_ABORTED_ERROR;
+ break;
+ case ECONNREFUSED:
+ prError = PR_CONNECT_REFUSED_ERROR;
+ break;
+ case ECONNRESET:
+ prError = PR_CONNECT_RESET_ERROR;
+ break;
+ case EDEADLK:
+ prError = PR_DEADLOCK_ERROR;
+ break;
+#ifdef EDIRCORRUPTED
+ case EDIRCORRUPTED:
+ prError = PR_DIRECTORY_CORRUPTED_ERROR;
+ break;
+#endif
+#ifdef EDQUOT
+ case EDQUOT:
+ prError = PR_NO_DEVICE_SPACE_ERROR;
+ break;
+#endif
+ case EEXIST:
+ prError = PR_FILE_EXISTS_ERROR;
+ break;
+ case EFAULT:
+ prError = PR_ACCESS_FAULT_ERROR;
+ break;
+ case EFBIG:
+ prError = PR_FILE_TOO_BIG_ERROR;
+ break;
+ case EHOSTUNREACH:
+ case EHOSTDOWN:
+ prError = PR_HOST_UNREACHABLE_ERROR;
+ break;
+ case EINPROGRESS:
+ prError = PR_IN_PROGRESS_ERROR;
+ break;
+ case EINTR:
+ prError = PR_PENDING_INTERRUPT_ERROR;
+ break;
+ case EINVAL:
+ prError = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ case EIO:
+ prError = PR_IO_ERROR;
+ break;
+ case EISCONN:
+ prError = PR_IS_CONNECTED_ERROR;
+ break;
+ case EISDIR:
+ prError = PR_IS_DIRECTORY_ERROR;
+ break;
+ case ELOOP:
+ prError = PR_LOOP_ERROR;
+ break;
+ case EMFILE:
+ prError = PR_PROC_DESC_TABLE_FULL_ERROR;
+ break;
+ case EMLINK:
+ prError = PR_MAX_DIRECTORY_ENTRIES_ERROR;
+ break;
+ case EMSGSIZE:
+ prError = PR_INVALID_ARGUMENT_ERROR;
+ break;
+#ifdef EMULTIHOP
+ case EMULTIHOP:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+#endif
+ case ENAMETOOLONG:
+ prError = PR_NAME_TOO_LONG_ERROR;
+ break;
+ case ENETUNREACH:
+ prError = PR_NETWORK_UNREACHABLE_ERROR;
+ break;
+ case ENFILE:
+ prError = PR_SYS_DESC_TABLE_FULL_ERROR;
+ break;
+ /*
+ * On SCO OpenServer 5, ENOBUFS is defined as ENOSR.
+ */
+#if defined(ENOBUFS) && (ENOBUFS != ENOSR)
+ case ENOBUFS:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+#endif
+ case ENODEV:
+ prError = PR_FILE_NOT_FOUND_ERROR;
+ break;
+ case ENOENT:
+ prError = PR_FILE_NOT_FOUND_ERROR;
+ break;
+ case ENOLCK:
+ prError = PR_FILE_IS_LOCKED_ERROR;
+ break;
+#ifdef ENOLINK
+ case ENOLINK:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+#endif
+ case ENOMEM:
+ prError = PR_OUT_OF_MEMORY_ERROR;
+ break;
+ case ENOPROTOOPT:
+ prError = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ case ENOSPC:
+ prError = PR_NO_DEVICE_SPACE_ERROR;
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+#endif
+ case ENOSYS:
+ prError = PR_NOT_IMPLEMENTED_ERROR;
+ break;
+ case ENOTCONN:
+ prError = PR_NOT_CONNECTED_ERROR;
+ break;
+ case ENOTDIR:
+ prError = PR_NOT_DIRECTORY_ERROR;
+ break;
+ case ENOTSOCK:
+ prError = PR_NOT_SOCKET_ERROR;
+ break;
+ case ENXIO:
+ prError = PR_FILE_NOT_FOUND_ERROR;
+ break;
+ case EOPNOTSUPP:
+ prError = PR_NOT_TCP_SOCKET_ERROR;
+ break;
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ prError = PR_BUFFER_OVERFLOW_ERROR;
+ break;
+#endif
+ case EPERM:
+ prError = PR_NO_ACCESS_RIGHTS_ERROR;
+ break;
+ case EPIPE:
+ prError = PR_CONNECT_RESET_ERROR;
+ break;
+#ifdef EPROTO
+ case EPROTO:
+ prError = PR_IO_ERROR;
+ break;
+#endif
+ case EPROTONOSUPPORT:
+ prError = PR_PROTOCOL_NOT_SUPPORTED_ERROR;
+ break;
+ case EPROTOTYPE:
+ prError = PR_ADDRESS_NOT_SUPPORTED_ERROR;
+ break;
+ case ERANGE:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ case EROFS:
+ prError = PR_READ_ONLY_FILESYSTEM_ERROR;
+ break;
+ case ESPIPE:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ case ETIMEDOUT:
+ prError = PR_IO_TIMEOUT_ERROR;
+ break;
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+ prError = PR_WOULD_BLOCK_ERROR;
+ break;
+#endif
+ case EXDEV:
+ prError = PR_NOT_SAME_DEVICE_ERROR;
+ break;
+ default:
+ prError = PR_UNKNOWN_ERROR;
+ break;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_opendir_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_closedir_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_BAD_DESCRIPTOR_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_readdir_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case 0:
+ case ENOENT:
+ prError = PR_NO_MORE_FILES_ERROR;
+ break;
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ prError = PR_IO_ERROR;
+ break;
+#endif
+ case EINVAL:
+ prError = PR_IO_ERROR;
+ break;
+ case ENXIO:
+ prError = PR_IO_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_unlink_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EPERM:
+ prError = PR_IS_DIRECTORY_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_stat_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_fstat_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_rename_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EEXIST:
+ prError = PR_DIRECTORY_NOT_EMPTY_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_access_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_mkdir_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_rmdir_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ /*
+ * On AIX 4.3, ENOTEMPTY is defined as EEXIST.
+ */
+#if ENOTEMPTY != EEXIST
+ case ENOTEMPTY:
+ prError = PR_DIRECTORY_NOT_EMPTY_ERROR;
+ break;
+#endif
+ case EEXIST:
+ prError = PR_DIRECTORY_NOT_EMPTY_ERROR;
+ break;
+ case EINVAL:
+ prError = PR_DIRECTORY_NOT_EMPTY_ERROR;
+ break;
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_read_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ case ENXIO:
+ prError = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_write_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ case ENXIO:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_lseek_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_fsync_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ case EINVAL:
+ prError = PR_INVALID_METHOD_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_close_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_socket_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_socketavailable_error(int err)
+{
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+}
+
+void _MD_unix_map_recv_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_recvfrom_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_send_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_sendto_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_writev_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_accept_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ENODEV:
+ prError = PR_NOT_TCP_SOCKET_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_connect_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+#if defined(UNIXWARE)
+ /*
+ * On some platforms, if we connect to a port on the local host
+ * (the loopback address) that no process is listening on, we get
+ * EIO instead of ECONNREFUSED.
+ */
+ case EIO:
+ prError = PR_CONNECT_REFUSED_ERROR;
+ break;
+#endif
+ case ENXIO:
+ prError = PR_IO_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_bind_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_SOCKET_ADDRESS_IS_BOUND_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_listen_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_shutdown_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_socketpair_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_getsockname_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_getpeername_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_getsockopt_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_BUFFER_OVERFLOW_ERROR;
+ break;
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_setsockopt_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_BUFFER_OVERFLOW_ERROR;
+ break;
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_open_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EAGAIN:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ case EBUSY:
+ prError = PR_IO_ERROR;
+ break;
+ case ENODEV:
+ prError = PR_FILE_NOT_FOUND_ERROR;
+ break;
+ case ENOMEM:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ prError = PR_FILE_TOO_BIG_ERROR;
+ break;
+#endif
+ case ETIMEDOUT:
+ prError = PR_REMOTE_FILE_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_mmap_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EAGAIN:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ case EMFILE:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ case ENODEV:
+ prError = PR_OPERATION_NOT_SUPPORTED_ERROR;
+ break;
+ case ENXIO:
+ prError = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_gethostname_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+void _MD_unix_map_select_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+
+#if defined(_PR_POLL_AVAILABLE) || defined(_PR_NEED_FAKE_POLL)
+void _MD_unix_map_poll_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EAGAIN:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_poll_revents_error(int err)
+{
+ if (err & POLLNVAL)
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, EBADF);
+ else if (err & POLLHUP)
+ PR_SetError(PR_CONNECT_RESET_ERROR, EPIPE);
+ else if (err & POLLERR)
+ PR_SetError(PR_IO_ERROR, EIO);
+ else
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+}
+#endif /* _PR_POLL_AVAILABLE || _PR_NEED_FAKE_POLL */
+
+
+void _MD_unix_map_flock_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EINVAL:
+ prError = PR_BAD_DESCRIPTOR_ERROR;
+ break;
+ case EWOULDBLOCK:
+ prError = PR_FILE_IS_LOCKED_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+void _MD_unix_map_lockf_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ case EACCES:
+ prError = PR_FILE_IS_LOCKED_ERROR;
+ break;
+ case EDEADLK:
+ prError = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ default:
+ _MD_unix_map_default_error(err);
+ return;
+ }
+ PR_SetError(prError, err);
+}
+
+#ifdef AIX
+void _MD_aix_map_sendfile_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+#endif /* AIX */
+
+#ifdef HPUX11
+void _MD_hpux_map_sendfile_error(int err)
+{
+ _MD_unix_map_default_error(err);
+}
+#endif /* HPUX11 */
+
+#ifdef SOLARIS
+void _MD_solaris_map_sendfile_error(int err)
+{
+ PRErrorCode prError;
+
+ switch (err) {
+ /*
+ * Solaris defines a 0 return value for sendfile to mean end-of-file.
+ */
+ case 0:
+ prError = PR_END_OF_FILE_ERROR;
+ break;
+
+ default:
+ _MD_unix_map_default_error(err) ;
+ return;
+ }
+ PR_SetError(prError, err);
+}
+#endif /* SOLARIS */
+
+#ifdef LINUX
+void _MD_linux_map_sendfile_error(int err)
+{
+ _MD_unix_map_default_error(err) ;
+}
+#endif /* LINUX */
diff --git a/nsprpub/pr/src/md/unix/unixware.c b/nsprpub/pr/src/md/unix/unixware.c
new file mode 100644
index 000000000..638af5f2e
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/unixware.c
@@ -0,0 +1,551 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#if !defined (USE_SVR4_THREADS)
+
+/*
+ * using only NSPR threads here
+ */
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+/*
+ * These are also implemented in pratom.c using NSPR locks. Any reason
+ * this might be better or worse? If you like this better, define
+ * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
+ */
+#ifdef _PR_HAVE_ATOMIC_OPS
+/* Atomic operations */
+#include <stdio.h>
+static FILE *_uw_semf;
+
+void
+_MD_INIT_ATOMIC(void)
+{
+ /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
+ if ((_uw_semf = tmpfile()) == NULL)
+ PR_ASSERT(0);
+
+ return;
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)++;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
+{
+ flockfile(_uw_semf);
+ (*ptr) += val;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)--;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ flockfile(_uw_semf);
+ *val = newval;
+ unflockfile(_uw_semf);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Unixware */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Unixware.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware.");
+}
+
+#else /* USE_SVR4_THREADS */
+
+/* NOTE:
+ * SPARC v9 (Ultras) do have an atomic test-and-set operation. But
+ * SPARC v8 doesn't. We should detect in the init if we are running on
+ * v8 or v9, and then use assembly where we can.
+ */
+
+#include <thread.h>
+#include <synch.h>
+
+static mutex_t _unixware_atomic = DEFAULTMUTEX;
+
+#define TEST_THEN_ADD(where, inc) \
+ if (mutex_lock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);\
+ *where += inc;\
+ if (mutex_unlock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);
+
+#define TEST_THEN_SET(where, val) \
+ if (mutex_lock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);\
+ *where = val;\
+ if (mutex_unlock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);
+
+void
+_MD_INIT_ATOMIC(void)
+{
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ TEST_THEN_ADD(val, 1);
+}
+
+void
+_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
+{
+ TEST_THEN_ADD(ptr, val);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ TEST_THEN_ADD(val, 0xffffffff);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ TEST_THEN_SET(val, newval);
+}
+
+#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <sys/lwp.h>
+#include <sys/procfs.h>
+#include <sys/syscall.h>
+
+
+THREAD_KEY_T threadid_key;
+THREAD_KEY_T cpuid_key;
+THREAD_KEY_T last_thread_key;
+static sigset_t set, oldset;
+
+void _MD_EarlyInit(void)
+{
+ THR_KEYCREATE(&threadid_key, NULL);
+ THR_KEYCREATE(&cpuid_key, NULL);
+ THR_KEYCREATE(&last_thread_key, NULL);
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+}
+
+PRStatus _MD_CREATE_THREAD(PRThread *thread,
+ void (*start)(void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ long flags;
+
+ /* mask out SIGALRM for native thread creation */
+ thr_sigsetmask(SIG_BLOCK, &set, &oldset);
+
+ flags = (state == PR_JOINABLE_THREAD ? THR_SUSPENDED/*|THR_NEW_LWP*/
+ : THR_SUSPENDED|THR_DETACHED/*|THR_NEW_LWP*/);
+ if (_PR_IS_GCABLE_THREAD(thread) ||
+ (scope == PR_GLOBAL_BOUND_THREAD))
+ flags |= THR_BOUND;
+
+ if (thr_create(NULL, thread->stack->stackSize,
+ (void *(*)(void *)) start, (void *) thread,
+ flags,
+ &thread->md.handle)) {
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ return PR_FAILURE;
+ }
+
+
+ /* When the thread starts running, then the lwpid is set to the right
+ * value. Until then we want to mark this as 'uninit' so that
+ * its register state is initialized properly for GC */
+
+ thread->md.lwpid = -1;
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+
+ if ((scope == PR_GLOBAL_THREAD) || (scope == PR_GLOBAL_BOUND_THREAD)) {
+ thread->flags |= _PR_GLOBAL_SCOPE;
+ }
+
+ /*
+ ** Set the thread priority. This will also place the thread on
+ ** the runQ.
+ **
+ ** Force PR_SetThreadPriority to set the priority by
+ ** setting thread->priority to 100.
+ */
+ {
+ int pri;
+ pri = thread->priority;
+ thread->priority = 100;
+ PR_SetThreadPriority( thread, pri );
+
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("(0X%x)[Start]: on to runq at priority %d",
+ thread, thread->priority));
+ }
+
+ /* Activate the thread */
+ if (thr_continue( thread->md.handle ) ) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+void _MD_cleanup_thread(PRThread *thread)
+{
+ thread_t hdl;
+ PRMonitor *mon;
+
+ hdl = thread->md.handle;
+
+ /*
+ ** First, suspend the thread (unless it's the active one)
+ ** Because we suspend it first, we don't have to use LOCK_SCHEDULER to
+ ** prevent both of us modifying the thread structure at the same time.
+ */
+ if ( thread != _PR_MD_CURRENT_THREAD() ) {
+ thr_suspend(hdl);
+ }
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("(0X%x)[DestroyThread]\n", thread));
+
+ _MD_DESTROY_SEM(&thread->md.waiter_sem);
+}
+
+void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri)
+{
+ if(thr_setprio((thread_t)md_thread->handle, newPri)) {
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("_PR_SetThreadPriority: can't set thread priority\n"));
+ }
+}
+
+void _MD_WAIT_CV(
+ struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout)
+{
+ struct timespec tt;
+ PRUint32 msec;
+ int rv;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ msec = PR_IntervalToMilliseconds(timeout);
+
+ GETTIME (&tt);
+
+ tt.tv_sec += msec / PR_MSEC_PER_SEC;
+ tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC;
+ /* Check for nsec overflow - otherwise we'll get an EINVAL */
+ if (tt.tv_nsec >= PR_NSEC_PER_SEC) {
+ tt.tv_sec++;
+ tt.tv_nsec -= PR_NSEC_PER_SEC;
+ }
+ me->md.sp = unixware_getsp();
+
+
+ /* XXX Solaris 2.5.x gives back EINTR occasionally for no reason
+ * hence ignore EINTR for now */
+
+ COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt);
+}
+
+void _MD_lock(struct _MDLock *md_lock)
+{
+ mutex_lock(&md_lock->lock);
+}
+
+void _MD_unlock(struct _MDLock *md_lock)
+{
+ mutex_unlock(&((md_lock)->lock));
+}
+
+
+PRThread *_pr_current_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(threadid_key, (void **)&ret);
+ return ret;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ _MD_WAIT_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread == NULL) {
+ return PR_SUCCESS;
+ }
+ _MD_POST_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+_PRCPU *_pr_current_cpu_tls()
+{
+ _PRCPU *ret;
+
+ thr_getspecific(cpuid_key, (void **)&ret);
+ return ret;
+}
+
+PRThread *_pr_last_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(last_thread_key, (void **)&ret);
+ return ret;
+}
+
+_MDLock _pr_ioq_lock;
+
+void _MD_INIT_IO (void)
+{
+ _MD_NEW_LOCK(&_pr_ioq_lock);
+}
+
+PRStatus _MD_InitializeThread(PRThread *thread)
+{
+ if (!_PR_IS_NATIVE_THREAD(thread))
+ return;
+ /* prime the sp; substract 4 so we don't hit the assert that
+ * curr sp > base_stack
+ */
+ thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long);
+ thread->md.lwpid = _lwp_self();
+ thread->md.handle = THR_SELF();
+
+ /* all threads on Solaris are global threads from NSPR's perspective
+ * since all of them are mapped to Solaris threads.
+ */
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ /* For primordial/attached thread, we don't create an underlying native thread.
+ * So, _MD_CREATE_THREAD() does not get called. We need to do initialization
+ * like allocating thread's synchronization variables and set the underlying
+ * native thread's priority.
+ */
+ if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) {
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+ _MD_SET_PRIORITY(&(thread->md), thread->priority);
+ }
+ return PR_SUCCESS;
+}
+
+static sigset_t old_mask; /* store away original gc thread sigmask */
+static int gcprio; /* store away original gc thread priority */
+static lwpid_t *all_lwps=NULL; /* list of lwps that we suspended */
+static int num_lwps ;
+static int suspendAllOn = 0;
+
+#define VALID_SP(sp, bottom, top) \
+ (((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top)))
+
+void unixware_preempt_off()
+{
+ sigset_t set;
+ (void)sigfillset(&set);
+ sigprocmask (SIG_SETMASK, &set, &old_mask);
+}
+
+void unixware_preempt_on()
+{
+ sigprocmask (SIG_SETMASK, &old_mask, NULL);
+}
+
+void _MD_Begin_SuspendAll()
+{
+ unixware_preempt_off();
+
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n"));
+ /* run at highest prio so I cannot be preempted */
+ thr_getprio(thr_self(), &gcprio);
+ thr_setprio(thr_self(), 0x7fffffff);
+ suspendAllOn = 1;
+}
+
+void _MD_End_SuspendAll()
+{
+}
+
+void _MD_End_ResumeAll()
+{
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n"));
+ thr_setprio(thr_self(), gcprio);
+ unixware_preempt_on();
+ suspendAllOn = 0;
+}
+
+void _MD_Suspend(PRThread *thr)
+{
+ int lwp_fd, result;
+ int lwp_main_proc_fd = 0;
+
+ thr_suspend(thr->md.handle);
+ if (!_PR_IS_GCABLE_THREAD(thr))
+ return;
+ /* XXX Primordial thread can't be bound to an lwp, hence there is no
+ * way we can assume that we can get the lwp status for primordial
+ * thread reliably. Hence we skip this for primordial thread, hoping
+ * that the SP is saved during lock and cond. wait.
+ * XXX - Again this is concern only for java interpreter, not for the
+ * server, 'cause primordial thread in the server does not do java work
+ */
+ if (thr->flags & _PR_PRIMORDIAL)
+ return;
+
+ /* if the thread is not started yet then don't do anything */
+ if (!suspendAllOn || thr->md.lwpid == -1)
+ return;
+
+}
+void _MD_Resume(PRThread *thr)
+{
+ if (!_PR_IS_GCABLE_THREAD(thr) || !suspendAllOn){
+ /*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend
+ * during that time we can't call any thread lib or libc calls. Hence
+ * make sure that no resume is requested for Non gcable thread
+ * during suspendAllOn */
+ PR_ASSERT(!suspendAllOn);
+ thr_continue(thr->md.handle);
+ return;
+ }
+ if (thr->md.lwpid == -1)
+ return;
+
+ if ( _lwp_continue(thr->md.lwpid) < 0) {
+ PR_ASSERT(0); /* ARGH, we are hosed! */
+ }
+}
+
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) getcontext(CONTEXT(t)); /* XXX tune me: set md_IRIX.c */
+ }
+ *np = NGREG;
+ if (t->md.lwpid == -1)
+ memset(&t->md.context.uc_mcontext.gregs[0], 0, NGREG * sizeof(PRWord));
+ return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
+}
+
+int
+_pr_unixware_clock_gettime (struct timespec *tp)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ tp->tv_sec = tv.tv_sec;
+ tp->tv_nsec = tv.tv_usec * 1000;
+ return 0;
+}
+
+
+#endif /* USE_SVR4_THREADS */
diff --git a/nsprpub/pr/src/md/unix/uxpoll.c b/nsprpub/pr/src/md/unix/uxpoll.c
new file mode 100644
index 000000000..8a618ed80
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/uxpoll.c
@@ -0,0 +1,676 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if defined(_PR_PTHREADS)
+
+#error "This file should not be compiled"
+
+#else /* defined(_PR_PTHREADS) */
+
+#include "primpl.h"
+
+#include <sys/time.h>
+
+#include <fcntl.h>
+#ifdef _PR_USE_POLL
+#include <poll.h>
+#endif
+
+#if defined(_PR_USE_POLL)
+static PRInt32 NativeThreadPoll(
+ PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
+{
+ /*
+ * This function is mostly duplicated from ptio.s's PR_Poll().
+ */
+ PRInt32 ready = 0;
+ /*
+ * For restarting poll() if it is interrupted by a signal.
+ * We use these variables to figure out how much time has
+ * elapsed and how much of the timeout still remains.
+ */
+ PRIntn index, msecs;
+ struct pollfd *syspoll = NULL;
+ PRIntervalTime start, elapsed, remaining;
+
+ syspoll = (struct pollfd*)PR_MALLOC(npds * sizeof(struct pollfd));
+ if (NULL == syspoll)
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return -1;
+ }
+ for (index = 0; index < npds; ++index)
+ {
+ PRFileDesc *bottom;
+ PRInt16 in_flags_read = 0, in_flags_write = 0;
+ PRInt16 out_flags_read = 0, out_flags_write = 0;
+
+ if ((NULL != pds[index].fd) && (0 != pds[index].in_flags))
+ {
+ if (pds[index].in_flags & PR_POLL_READ)
+ {
+ in_flags_read = (pds[index].fd->methods->poll)(
+ pds[index].fd,
+ pds[index].in_flags & ~PR_POLL_WRITE,
+ &out_flags_read);
+ }
+ if (pds[index].in_flags & PR_POLL_WRITE)
+ {
+ in_flags_write = (pds[index].fd->methods->poll)(
+ pds[index].fd,
+ pds[index].in_flags & ~PR_POLL_READ,
+ &out_flags_write);
+ }
+ if ((0 != (in_flags_read & out_flags_read))
+ || (0 != (in_flags_write & out_flags_write)))
+ {
+ /* this one is ready right now */
+ if (0 == ready)
+ {
+ /*
+ * We will return without calling the system
+ * poll function. So zero the out_flags
+ * fields of all the poll descriptors before
+ * this one.
+ */
+ int i;
+ for (i = 0; i < index; i++)
+ {
+ pds[i].out_flags = 0;
+ }
+ }
+ ready += 1;
+ pds[index].out_flags = out_flags_read | out_flags_write;
+ }
+ else
+ {
+ pds[index].out_flags = 0; /* pre-condition */
+ /* now locate the NSPR layer at the bottom of the stack */
+ bottom = PR_GetIdentitiesLayer(pds[index].fd, PR_NSPR_IO_LAYER);
+ PR_ASSERT(NULL != bottom); /* what to do about that? */
+ if ((NULL != bottom)
+ && (_PR_FILEDESC_OPEN == bottom->secret->state))
+ {
+ if (0 == ready)
+ {
+ syspoll[index].fd = bottom->secret->md.osfd;
+ syspoll[index].events = 0; /* pre-condition */
+ if (in_flags_read & PR_POLL_READ)
+ {
+ pds[index].out_flags |=
+ _PR_POLL_READ_SYS_READ;
+ syspoll[index].events |= POLLIN;
+ }
+ if (in_flags_read & PR_POLL_WRITE)
+ {
+ pds[index].out_flags |=
+ _PR_POLL_READ_SYS_WRITE;
+ syspoll[index].events |= POLLOUT;
+ }
+ if (in_flags_write & PR_POLL_READ)
+ {
+ pds[index].out_flags |=
+ _PR_POLL_WRITE_SYS_READ;
+ syspoll[index].events |= POLLIN;
+ }
+ if (in_flags_write & PR_POLL_WRITE)
+ {
+ pds[index].out_flags |=
+ _PR_POLL_WRITE_SYS_WRITE;
+ syspoll[index].events |= POLLOUT;
+ }
+ if (pds[index].in_flags & PR_POLL_EXCEPT)
+ syspoll[index].events |= POLLPRI;
+ }
+ }
+ else
+ {
+ if (0 == ready)
+ {
+ int i;
+ for (i = 0; i < index; i++)
+ {
+ pds[i].out_flags = 0;
+ }
+ }
+ ready += 1; /* this will cause an abrupt return */
+ pds[index].out_flags = PR_POLL_NVAL; /* bogii */
+ }
+ }
+ }
+ else
+ {
+ /* make poll() ignore this entry */
+ syspoll[index].fd = -1;
+ syspoll[index].events = 0;
+ pds[index].out_flags = 0;
+ }
+ }
+
+ if (0 == ready)
+ {
+ switch (timeout)
+ {
+ case PR_INTERVAL_NO_WAIT: msecs = 0; break;
+ case PR_INTERVAL_NO_TIMEOUT: msecs = -1; break;
+ default:
+ msecs = PR_IntervalToMilliseconds(timeout);
+ start = PR_IntervalNow();
+ }
+
+retry:
+ ready = _MD_POLL(syspoll, npds, msecs);
+ if (-1 == ready)
+ {
+ PRIntn oserror = errno;
+
+ if (EINTR == oserror)
+ {
+ if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry;
+ else if (timeout == PR_INTERVAL_NO_WAIT) ready = 0;
+ else
+ {
+ elapsed = (PRIntervalTime)(PR_IntervalNow() - start);
+ if (elapsed > timeout) ready = 0; /* timed out */
+ else
+ {
+ remaining = timeout - elapsed;
+ msecs = PR_IntervalToMilliseconds(remaining);
+ goto retry;
+ }
+ }
+ }
+ else _PR_MD_MAP_POLL_ERROR(oserror);
+ }
+ else if (ready > 0)
+ {
+ for (index = 0; index < npds; ++index)
+ {
+ PRInt16 out_flags = 0;
+ if ((NULL != pds[index].fd) && (0 != pds[index].in_flags))
+ {
+ if (0 != syspoll[index].revents)
+ {
+ /*
+ ** Set up the out_flags so that it contains the
+ ** bits that the highest layer thinks are nice
+ ** to have. Then the client of that layer will
+ ** call the appropriate I/O function and maybe
+ ** the protocol will make progress.
+ */
+ if (syspoll[index].revents & POLLIN)
+ {
+ if (pds[index].out_flags
+ & _PR_POLL_READ_SYS_READ)
+ {
+ out_flags |= PR_POLL_READ;
+ }
+ if (pds[index].out_flags
+ & _PR_POLL_WRITE_SYS_READ)
+ {
+ out_flags |= PR_POLL_WRITE;
+ }
+ }
+ if (syspoll[index].revents & POLLOUT)
+ {
+ if (pds[index].out_flags
+ & _PR_POLL_READ_SYS_WRITE)
+ {
+ out_flags |= PR_POLL_READ;
+ }
+ if (pds[index].out_flags
+ & _PR_POLL_WRITE_SYS_WRITE)
+ {
+ out_flags |= PR_POLL_WRITE;
+ }
+ }
+ if (syspoll[index].revents & POLLPRI)
+ out_flags |= PR_POLL_EXCEPT;
+ if (syspoll[index].revents & POLLERR)
+ out_flags |= PR_POLL_ERR;
+ if (syspoll[index].revents & POLLNVAL)
+ out_flags |= PR_POLL_NVAL;
+ if (syspoll[index].revents & POLLHUP)
+ out_flags |= PR_POLL_HUP;
+ }
+ }
+ pds[index].out_flags = out_flags;
+ }
+ }
+ }
+
+ PR_DELETE(syspoll);
+ return ready;
+
+} /* NativeThreadPoll */
+#endif /* defined(_PR_USE_POLL) */
+
+#if !defined(_PR_USE_POLL)
+static PRInt32 NativeThreadSelect(
+ PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
+{
+ /*
+ * This code is almost a duplicate of w32poll.c's _PR_MD_PR_POLL().
+ */
+ fd_set rd, wt, ex;
+ PRFileDesc *bottom;
+ PRPollDesc *pd, *epd;
+ PRInt32 maxfd = -1, ready, err;
+ PRIntervalTime remaining, elapsed, start;
+
+ struct timeval tv, *tvp = NULL;
+
+ FD_ZERO(&rd);
+ FD_ZERO(&wt);
+ FD_ZERO(&ex);
+
+ ready = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++)
+ {
+ PRInt16 in_flags_read = 0, in_flags_write = 0;
+ PRInt16 out_flags_read = 0, out_flags_write = 0;
+
+ if ((NULL != pd->fd) && (0 != pd->in_flags))
+ {
+ if (pd->in_flags & PR_POLL_READ)
+ {
+ in_flags_read = (pd->fd->methods->poll)(
+ pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read);
+ }
+ if (pd->in_flags & PR_POLL_WRITE)
+ {
+ in_flags_write = (pd->fd->methods->poll)(
+ pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write);
+ }
+ if ((0 != (in_flags_read & out_flags_read))
+ || (0 != (in_flags_write & out_flags_write)))
+ {
+ /* this one's ready right now */
+ if (0 == ready)
+ {
+ /*
+ * We will have to return without calling the
+ * system poll/select function. So zero the
+ * out_flags fields of all the poll descriptors
+ * before this one.
+ */
+ PRPollDesc *prev;
+ for (prev = pds; prev < pd; prev++)
+ {
+ prev->out_flags = 0;
+ }
+ }
+ ready += 1;
+ pd->out_flags = out_flags_read | out_flags_write;
+ }
+ else
+ {
+ pd->out_flags = 0; /* pre-condition */
+
+ /* make sure this is an NSPR supported stack */
+ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
+ PR_ASSERT(NULL != bottom); /* what to do about that? */
+ if ((NULL != bottom)
+ && (_PR_FILEDESC_OPEN == bottom->secret->state))
+ {
+ if (0 == ready)
+ {
+ PRInt32 osfd = bottom->secret->md.osfd;
+ if (osfd > maxfd) maxfd = osfd;
+ if (in_flags_read & PR_POLL_READ)
+ {
+ pd->out_flags |= _PR_POLL_READ_SYS_READ;
+ FD_SET(osfd, &rd);
+ }
+ if (in_flags_read & PR_POLL_WRITE)
+ {
+ pd->out_flags |= _PR_POLL_READ_SYS_WRITE;
+ FD_SET(osfd, &wt);
+ }
+ if (in_flags_write & PR_POLL_READ)
+ {
+ pd->out_flags |= _PR_POLL_WRITE_SYS_READ;
+ FD_SET(osfd, &rd);
+ }
+ if (in_flags_write & PR_POLL_WRITE)
+ {
+ pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE;
+ FD_SET(osfd, &wt);
+ }
+ if (pd->in_flags & PR_POLL_EXCEPT) FD_SET(osfd, &ex);
+ }
+ }
+ else
+ {
+ if (0 == ready)
+ {
+ PRPollDesc *prev;
+ for (prev = pds; prev < pd; prev++)
+ {
+ prev->out_flags = 0;
+ }
+ }
+ ready += 1; /* this will cause an abrupt return */
+ pd->out_flags = PR_POLL_NVAL; /* bogii */
+ }
+ }
+ }
+ else
+ {
+ pd->out_flags = 0;
+ }
+ }
+
+ if (0 != ready) return ready; /* no need to block */
+
+ remaining = timeout;
+ start = PR_IntervalNow();
+
+retry:
+ if (timeout != PR_INTERVAL_NO_TIMEOUT)
+ {
+ PRInt32 ticksPerSecond = PR_TicksPerSecond();
+ tv.tv_sec = remaining / ticksPerSecond;
+ tv.tv_usec = PR_IntervalToMicroseconds( remaining % ticksPerSecond );
+ tvp = &tv;
+ }
+
+ ready = _MD_SELECT(maxfd + 1, &rd, &wt, &ex, tvp);
+
+ if (ready == -1 && errno == EINTR)
+ {
+ if (timeout == PR_INTERVAL_NO_TIMEOUT) goto retry;
+ else
+ {
+ elapsed = (PRIntervalTime) (PR_IntervalNow() - start);
+ if (elapsed > timeout) ready = 0; /* timed out */
+ else
+ {
+ remaining = timeout - elapsed;
+ goto retry;
+ }
+ }
+ }
+
+ /*
+ ** Now to unravel the select sets back into the client's poll
+ ** descriptor list. Is this possibly an area for pissing away
+ ** a few cycles or what?
+ */
+ if (ready > 0)
+ {
+ ready = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++)
+ {
+ PRInt16 out_flags = 0;
+ if ((NULL != pd->fd) && (0 != pd->in_flags))
+ {
+ PRInt32 osfd;
+ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
+ PR_ASSERT(NULL != bottom);
+
+ osfd = bottom->secret->md.osfd;
+
+ if (FD_ISSET(osfd, &rd))
+ {
+ if (pd->out_flags & _PR_POLL_READ_SYS_READ)
+ out_flags |= PR_POLL_READ;
+ if (pd->out_flags & _PR_POLL_WRITE_SYS_READ)
+ out_flags |= PR_POLL_WRITE;
+ }
+ if (FD_ISSET(osfd, &wt))
+ {
+ if (pd->out_flags & _PR_POLL_READ_SYS_WRITE)
+ out_flags |= PR_POLL_READ;
+ if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE)
+ out_flags |= PR_POLL_WRITE;
+ }
+ if (FD_ISSET(osfd, &ex)) out_flags |= PR_POLL_EXCEPT;
+ }
+ pd->out_flags = out_flags;
+ if (out_flags) ready++;
+ }
+ PR_ASSERT(ready > 0);
+ }
+ else if (ready < 0)
+ {
+ err = _MD_ERRNO();
+ if (err == EBADF)
+ {
+ /* Find the bad fds */
+ ready = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++)
+ {
+ pd->out_flags = 0;
+ if ((NULL != pd->fd) && (0 != pd->in_flags))
+ {
+ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
+ if (fcntl(bottom->secret->md.osfd, F_GETFL, 0) == -1)
+ {
+ pd->out_flags = PR_POLL_NVAL;
+ ready++;
+ }
+ }
+ }
+ PR_ASSERT(ready > 0);
+ }
+ else _PR_MD_MAP_SELECT_ERROR(err);
+ }
+
+ return ready;
+} /* NativeThreadSelect */
+#endif /* !defined(_PR_USE_POLL) */
+
+static PRInt32 LocalThreads(
+ PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
+{
+ PRPollDesc *pd, *epd;
+ PRInt32 ready, pdcnt;
+ _PRUnixPollDesc *unixpds, *unixpd;
+
+ /*
+ * XXX
+ * PRPollDesc has a PRFileDesc field, fd, while the IOQ
+ * is a list of PRPollQueue structures, each of which contains
+ * a _PRUnixPollDesc. A _PRUnixPollDesc struct contains
+ * the OS file descriptor, osfd, and not a PRFileDesc.
+ * So, we have allocate memory for _PRUnixPollDesc structures,
+ * copy the flags information from the pds list and have pq
+ * point to this list of _PRUnixPollDesc structures.
+ *
+ * It would be better if the memory allocation can be avoided.
+ */
+
+ unixpd = unixpds = (_PRUnixPollDesc*)
+ PR_MALLOC(npds * sizeof(_PRUnixPollDesc));
+ if (NULL == unixpds)
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return -1;
+ }
+
+ ready = 0;
+ for (pdcnt = 0, pd = pds, epd = pd + npds; pd < epd; pd++)
+ {
+ PRFileDesc *bottom;
+ PRInt16 in_flags_read = 0, in_flags_write = 0;
+ PRInt16 out_flags_read = 0, out_flags_write = 0;
+
+ if ((NULL != pd->fd) && (0 != pd->in_flags))
+ {
+ if (pd->in_flags & PR_POLL_READ)
+ {
+ in_flags_read = (pd->fd->methods->poll)(
+ pd->fd, pd->in_flags & ~PR_POLL_WRITE, &out_flags_read);
+ }
+ if (pd->in_flags & PR_POLL_WRITE)
+ {
+ in_flags_write = (pd->fd->methods->poll)(
+ pd->fd, pd->in_flags & ~PR_POLL_READ, &out_flags_write);
+ }
+ if ((0 != (in_flags_read & out_flags_read))
+ || (0 != (in_flags_write & out_flags_write)))
+ {
+ /* this one's ready right now */
+ if (0 == ready)
+ {
+ /*
+ * We will have to return without calling the
+ * system poll/select function. So zero the
+ * out_flags fields of all the poll descriptors
+ * before this one.
+ */
+ PRPollDesc *prev;
+ for (prev = pds; prev < pd; prev++)
+ {
+ prev->out_flags = 0;
+ }
+ }
+ ready += 1;
+ pd->out_flags = out_flags_read | out_flags_write;
+ }
+ else
+ {
+ pd->out_flags = 0; /* pre-condition */
+ bottom = PR_GetIdentitiesLayer(pd->fd, PR_NSPR_IO_LAYER);
+ PR_ASSERT(NULL != bottom); /* what to do about that? */
+ if ((NULL != bottom)
+ && (_PR_FILEDESC_OPEN == bottom->secret->state))
+ {
+ if (0 == ready)
+ {
+ unixpd->osfd = bottom->secret->md.osfd;
+ unixpd->in_flags = 0;
+ if (in_flags_read & PR_POLL_READ)
+ {
+ unixpd->in_flags |= _PR_UNIX_POLL_READ;
+ pd->out_flags |= _PR_POLL_READ_SYS_READ;
+ }
+ if (in_flags_read & PR_POLL_WRITE)
+ {
+ unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
+ pd->out_flags |= _PR_POLL_READ_SYS_WRITE;
+ }
+ if (in_flags_write & PR_POLL_READ)
+ {
+ unixpd->in_flags |= _PR_UNIX_POLL_READ;
+ pd->out_flags |= _PR_POLL_WRITE_SYS_READ;
+ }
+ if (in_flags_write & PR_POLL_WRITE)
+ {
+ unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
+ pd->out_flags |= _PR_POLL_WRITE_SYS_WRITE;
+ }
+ if ((in_flags_read | in_flags_write) & PR_POLL_EXCEPT)
+ {
+ unixpd->in_flags |= _PR_UNIX_POLL_EXCEPT;
+ }
+ unixpd++; pdcnt++;
+ }
+ }
+ else
+ {
+ if (0 == ready)
+ {
+ PRPollDesc *prev;
+ for (prev = pds; prev < pd; prev++)
+ {
+ prev->out_flags = 0;
+ }
+ }
+ ready += 1; /* this will cause an abrupt return */
+ pd->out_flags = PR_POLL_NVAL; /* bogii */
+ }
+ }
+ }
+ }
+
+ if (0 != ready)
+ {
+ /* no need to block */
+ PR_DELETE(unixpds);
+ return ready;
+ }
+
+ ready = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout);
+
+ /*
+ * Copy the out_flags from the _PRUnixPollDesc structures to the
+ * user's PRPollDesc structures and free the allocated memory
+ */
+ unixpd = unixpds;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++)
+ {
+ PRInt16 out_flags = 0;
+ if ((NULL != pd->fd) && (0 != pd->in_flags))
+ {
+ /*
+ * take errors from the poll operation,
+ * the R/W bits from the request
+ */
+ if (0 != unixpd->out_flags)
+ {
+ if (unixpd->out_flags & _PR_UNIX_POLL_READ)
+ {
+ if (pd->out_flags & _PR_POLL_READ_SYS_READ)
+ out_flags |= PR_POLL_READ;
+ if (pd->out_flags & _PR_POLL_WRITE_SYS_READ)
+ out_flags |= PR_POLL_WRITE;
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_WRITE)
+ {
+ if (pd->out_flags & _PR_POLL_READ_SYS_WRITE)
+ out_flags |= PR_POLL_READ;
+ if (pd->out_flags & _PR_POLL_WRITE_SYS_WRITE)
+ out_flags |= PR_POLL_WRITE;
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_EXCEPT)
+ out_flags |= PR_POLL_EXCEPT;
+ if (unixpd->out_flags & _PR_UNIX_POLL_ERR)
+ out_flags |= PR_POLL_ERR;
+ if (unixpd->out_flags & _PR_UNIX_POLL_NVAL)
+ out_flags |= PR_POLL_NVAL;
+ if (unixpd->out_flags & _PR_UNIX_POLL_HUP)
+ out_flags |= PR_POLL_HUP;
+ }
+ unixpd++;
+ }
+ pd->out_flags = out_flags;
+ }
+
+ PR_DELETE(unixpds);
+
+ return ready;
+} /* LocalThreads */
+
+#if defined(_PR_USE_POLL)
+#define NativeThreads NativeThreadPoll
+#else
+#define NativeThreads NativeThreadSelect
+#endif
+
+PRInt32 _MD_pr_poll(PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
+{
+ PRInt32 rv = 0;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (_PR_PENDING_INTERRUPT(me))
+ {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ if (0 == npds) PR_Sleep(timeout);
+ else if (_PR_IS_NATIVE_THREAD(me))
+ rv = NativeThreads(pds, npds, timeout);
+ else rv = LocalThreads(pds, npds, timeout);
+
+ return rv;
+} /* _MD_pr_poll */
+
+#endif /* defined(_PR_PTHREADS) */
+
+/* uxpoll.c */
+
diff --git a/nsprpub/pr/src/md/unix/uxproces.c b/nsprpub/pr/src/md/unix/uxproces.c
new file mode 100644
index 000000000..a3b35e540
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/uxproces.c
@@ -0,0 +1,885 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "primpl.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <string.h>
+#if defined(AIX)
+#include <dlfcn.h> /* For dlopen, dlsym, dlclose */
+#endif
+
+#if defined(DARWIN)
+#if defined(HAVE_CRT_EXTERNS_H)
+#include <crt_externs.h>
+#endif
+#else
+PR_IMPORT_DATA(char **) environ;
+#endif
+
+/*
+ * HP-UX 9 doesn't have the SA_RESTART flag.
+ */
+#ifndef SA_RESTART
+#define SA_RESTART 0
+#endif
+
+/*
+ **********************************************************************
+ *
+ * The Unix process routines
+ *
+ **********************************************************************
+ */
+
+#define _PR_SIGNALED_EXITSTATUS 256
+
+typedef enum pr_PidState {
+ _PR_PID_DETACHED,
+ _PR_PID_REAPED,
+ _PR_PID_WAITING
+} pr_PidState;
+
+typedef struct pr_PidRecord {
+ pid_t pid;
+ int exitStatus;
+ pr_PidState state;
+ PRCondVar *reapedCV;
+ struct pr_PidRecord *next;
+} pr_PidRecord;
+
+/*
+ * Irix sprocs and LinuxThreads are actually a kind of processes
+ * that can share the virtual address space and file descriptors.
+ */
+#if (defined(IRIX) && !defined(_PR_PTHREADS)) \
+ || ((defined(LINUX) || defined(__GNU__) || defined(__GLIBC__)) \
+ && defined(_PR_PTHREADS))
+#define _PR_SHARE_CLONES
+#endif
+
+/*
+ * The macro _PR_NATIVE_THREADS indicates that we are
+ * using native threads only, so waitpid() blocks just the
+ * calling thread, not the process. In this case, the waitpid
+ * daemon thread can safely block in waitpid(). So we don't
+ * need to catch SIGCHLD, and the pipe to unblock PR_Poll() is
+ * also not necessary.
+ */
+
+#if defined(_PR_GLOBAL_THREADS_ONLY) \
+ || (defined(_PR_PTHREADS) \
+ && !defined(LINUX) && !defined(__GNU__) && !defined(__GLIBC__))
+#define _PR_NATIVE_THREADS
+#endif
+
+/*
+ * All the static variables used by the Unix process routines are
+ * collected in this structure.
+ */
+
+static struct {
+ PRCallOnceType once;
+ PRThread *thread;
+ PRLock *ml;
+#if defined(_PR_NATIVE_THREADS)
+ PRInt32 numProcs;
+ PRCondVar *cv;
+#else
+ int pipefd[2];
+#endif
+ pr_PidRecord **pidTable;
+
+#ifdef _PR_SHARE_CLONES
+ struct pr_CreateProcOp *opHead, *opTail;
+#endif
+
+#ifdef AIX
+ pid_t (*forkptr)(void); /* Newer versions of AIX (starting in 4.3.2)
+ * have f_fork, which is faster than the
+ * regular fork in a multithreaded process
+ * because it skips calling the fork handlers.
+ * So we look up the f_fork symbol to see if
+ * it's available and fall back on fork.
+ */
+#endif /* AIX */
+} pr_wp;
+
+#ifdef _PR_SHARE_CLONES
+static int pr_waitpid_daemon_exit;
+
+void
+_MD_unix_terminate_waitpid_daemon(void)
+{
+ if (pr_wp.thread) {
+ pr_waitpid_daemon_exit = 1;
+ write(pr_wp.pipefd[1], "", 1);
+ PR_JoinThread(pr_wp.thread);
+ }
+}
+#endif
+
+static PRStatus _MD_InitProcesses(void);
+#if !defined(_PR_NATIVE_THREADS)
+static void pr_InstallSigchldHandler(void);
+#endif
+
+static PRProcess *
+ForkAndExec(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ PRProcess *process;
+ int nEnv, idx;
+ char *const *childEnvp;
+ char **newEnvp = NULL;
+ int flags;
+
+ process = PR_NEW(PRProcess);
+ if (!process) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return NULL;
+ }
+
+ childEnvp = envp;
+ if (attr && attr->fdInheritBuffer) {
+ PRBool found = PR_FALSE;
+
+ if (NULL == childEnvp) {
+#ifdef DARWIN
+#ifdef HAVE_CRT_EXTERNS_H
+ childEnvp = *(_NSGetEnviron());
+#else
+ /* _NSGetEnviron() is not available on iOS. */
+ PR_DELETE(process);
+ PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0);
+ return NULL;
+#endif
+#else
+ childEnvp = environ;
+#endif
+ }
+
+ for (nEnv = 0; childEnvp[nEnv]; nEnv++) {
+ }
+ newEnvp = (char **) PR_MALLOC((nEnv + 2) * sizeof(char *));
+ if (NULL == newEnvp) {
+ PR_DELETE(process);
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return NULL;
+ }
+ for (idx = 0; idx < nEnv; idx++) {
+ newEnvp[idx] = childEnvp[idx];
+ if (!found && !strncmp(newEnvp[idx], "NSPR_INHERIT_FDS=", 17)) {
+ newEnvp[idx] = attr->fdInheritBuffer;
+ found = PR_TRUE;
+ }
+ }
+ if (!found) {
+ newEnvp[idx++] = attr->fdInheritBuffer;
+ }
+ newEnvp[idx] = NULL;
+ childEnvp = newEnvp;
+ }
+
+#ifdef AIX
+ process->md.pid = (*pr_wp.forkptr)();
+#elif defined(NTO) || defined(SYMBIAN)
+ /*
+ * fork() & exec() does not work in a multithreaded process.
+ * Use spawn() instead.
+ */
+ {
+ int fd_map[3] = { 0, 1, 2 };
+
+ if (attr) {
+ if (attr->stdinFd && attr->stdinFd->secret->md.osfd != 0) {
+ fd_map[0] = dup(attr->stdinFd->secret->md.osfd);
+ flags = fcntl(fd_map[0], F_GETFL, 0);
+ if (flags & O_NONBLOCK)
+ fcntl(fd_map[0], F_SETFL, flags & ~O_NONBLOCK);
+ }
+ if (attr->stdoutFd && attr->stdoutFd->secret->md.osfd != 1) {
+ fd_map[1] = dup(attr->stdoutFd->secret->md.osfd);
+ flags = fcntl(fd_map[1], F_GETFL, 0);
+ if (flags & O_NONBLOCK)
+ fcntl(fd_map[1], F_SETFL, flags & ~O_NONBLOCK);
+ }
+ if (attr->stderrFd && attr->stderrFd->secret->md.osfd != 2) {
+ fd_map[2] = dup(attr->stderrFd->secret->md.osfd);
+ flags = fcntl(fd_map[2], F_GETFL, 0);
+ if (flags & O_NONBLOCK)
+ fcntl(fd_map[2], F_SETFL, flags & ~O_NONBLOCK);
+ }
+
+ PR_ASSERT(attr->currentDirectory == NULL); /* not implemented */
+ }
+
+#ifdef SYMBIAN
+ /* In Symbian OS, we use posix_spawn instead of fork() and exec() */
+ posix_spawn(&(process->md.pid), path, NULL, NULL, argv, childEnvp);
+#else
+ process->md.pid = spawn(path, 3, fd_map, NULL, argv, childEnvp);
+#endif
+
+ if (fd_map[0] != 0)
+ close(fd_map[0]);
+ if (fd_map[1] != 1)
+ close(fd_map[1]);
+ if (fd_map[2] != 2)
+ close(fd_map[2]);
+ }
+#else
+ process->md.pid = fork();
+#endif
+ if ((pid_t) -1 == process->md.pid) {
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, errno);
+ PR_DELETE(process);
+ if (newEnvp) {
+ PR_DELETE(newEnvp);
+ }
+ return NULL;
+ } else if (0 == process->md.pid) { /* the child process */
+ /*
+ * If the child process needs to exit, it must call _exit().
+ * Do not call exit(), because exit() will flush and close
+ * the standard I/O file descriptors, and hence corrupt
+ * the parent process's standard I/O data structures.
+ */
+
+#if !defined(NTO) && !defined(SYMBIAN)
+ if (attr) {
+ /* the osfd's to redirect stdin, stdout, and stderr to */
+ int in_osfd = -1, out_osfd = -1, err_osfd = -1;
+
+ if (attr->stdinFd
+ && attr->stdinFd->secret->md.osfd != 0) {
+ in_osfd = attr->stdinFd->secret->md.osfd;
+ if (dup2(in_osfd, 0) != 0) {
+ _exit(1); /* failed */
+ }
+ flags = fcntl(0, F_GETFL, 0);
+ if (flags & O_NONBLOCK) {
+ fcntl(0, F_SETFL, flags & ~O_NONBLOCK);
+ }
+ }
+ if (attr->stdoutFd
+ && attr->stdoutFd->secret->md.osfd != 1) {
+ out_osfd = attr->stdoutFd->secret->md.osfd;
+ if (dup2(out_osfd, 1) != 1) {
+ _exit(1); /* failed */
+ }
+ flags = fcntl(1, F_GETFL, 0);
+ if (flags & O_NONBLOCK) {
+ fcntl(1, F_SETFL, flags & ~O_NONBLOCK);
+ }
+ }
+ if (attr->stderrFd
+ && attr->stderrFd->secret->md.osfd != 2) {
+ err_osfd = attr->stderrFd->secret->md.osfd;
+ if (dup2(err_osfd, 2) != 2) {
+ _exit(1); /* failed */
+ }
+ flags = fcntl(2, F_GETFL, 0);
+ if (flags & O_NONBLOCK) {
+ fcntl(2, F_SETFL, flags & ~O_NONBLOCK);
+ }
+ }
+ if (in_osfd != -1) {
+ close(in_osfd);
+ }
+ if (out_osfd != -1 && out_osfd != in_osfd) {
+ close(out_osfd);
+ }
+ if (err_osfd != -1 && err_osfd != in_osfd
+ && err_osfd != out_osfd) {
+ close(err_osfd);
+ }
+ if (attr->currentDirectory) {
+ if (chdir(attr->currentDirectory) < 0) {
+ _exit(1); /* failed */
+ }
+ }
+ }
+
+ if (childEnvp) {
+ (void)execve(path, argv, childEnvp);
+ } else {
+ /* Inherit the environment of the parent. */
+ (void)execv(path, argv);
+ }
+ /* Whoops! It returned. That's a bad sign. */
+ _exit(1);
+#endif /* !NTO */
+ }
+
+ if (newEnvp) {
+ PR_DELETE(newEnvp);
+ }
+
+#if defined(_PR_NATIVE_THREADS)
+ PR_Lock(pr_wp.ml);
+ if (0 == pr_wp.numProcs++) {
+ PR_NotifyCondVar(pr_wp.cv);
+ }
+ PR_Unlock(pr_wp.ml);
+#endif
+ return process;
+}
+
+#ifdef _PR_SHARE_CLONES
+
+struct pr_CreateProcOp {
+ const char *path;
+ char *const *argv;
+ char *const *envp;
+ const PRProcessAttr *attr;
+ PRProcess *process;
+ PRErrorCode prerror;
+ PRInt32 oserror;
+ PRBool done;
+ PRCondVar *doneCV;
+ struct pr_CreateProcOp *next;
+};
+
+PRProcess *
+_MD_CreateUnixProcess(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ struct pr_CreateProcOp *op;
+ PRProcess *proc;
+ int rv;
+
+ if (PR_CallOnce(&pr_wp.once, _MD_InitProcesses) == PR_FAILURE) {
+ return NULL;
+ }
+
+ op = PR_NEW(struct pr_CreateProcOp);
+ if (NULL == op) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return NULL;
+ }
+ op->path = path;
+ op->argv = argv;
+ op->envp = envp;
+ op->attr = attr;
+ op->done = PR_FALSE;
+ op->doneCV = PR_NewCondVar(pr_wp.ml);
+ if (NULL == op->doneCV) {
+ PR_DELETE(op);
+ return NULL;
+ }
+ PR_Lock(pr_wp.ml);
+
+ /* add to the tail of op queue */
+ op->next = NULL;
+ if (pr_wp.opTail) {
+ pr_wp.opTail->next = op;
+ pr_wp.opTail = op;
+ } else {
+ PR_ASSERT(NULL == pr_wp.opHead);
+ pr_wp.opHead = pr_wp.opTail = op;
+ }
+
+ /* wake up the daemon thread */
+ do {
+ rv = write(pr_wp.pipefd[1], "", 1);
+ } while (-1 == rv && EINTR == errno);
+
+ while (op->done == PR_FALSE) {
+ PR_WaitCondVar(op->doneCV, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+ PR_DestroyCondVar(op->doneCV);
+ proc = op->process;
+ if (!proc) {
+ PR_SetError(op->prerror, op->oserror);
+ }
+ PR_DELETE(op);
+ return proc;
+}
+
+#else /* ! _PR_SHARE_CLONES */
+
+PRProcess *
+_MD_CreateUnixProcess(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ if (PR_CallOnce(&pr_wp.once, _MD_InitProcesses) == PR_FAILURE) {
+ return NULL;
+ }
+ return ForkAndExec(path, argv, envp, attr);
+} /* _MD_CreateUnixProcess */
+
+#endif /* _PR_SHARE_CLONES */
+
+/*
+ * The pid table is a hashtable.
+ *
+ * The number of buckets in the hashtable (NBUCKETS) must be a power of 2.
+ */
+#define NBUCKETS_LOG2 6
+#define NBUCKETS (1 << NBUCKETS_LOG2)
+#define PID_HASH_MASK ((pid_t) (NBUCKETS - 1))
+
+static pr_PidRecord *
+FindPidTable(pid_t pid)
+{
+ pr_PidRecord *pRec;
+ int keyHash = (int) (pid & PID_HASH_MASK);
+
+ pRec = pr_wp.pidTable[keyHash];
+ while (pRec) {
+ if (pRec->pid == pid) {
+ break;
+ }
+ pRec = pRec->next;
+ }
+ return pRec;
+}
+
+static void
+InsertPidTable(pr_PidRecord *pRec)
+{
+ int keyHash = (int) (pRec->pid & PID_HASH_MASK);
+
+ pRec->next = pr_wp.pidTable[keyHash];
+ pr_wp.pidTable[keyHash] = pRec;
+}
+
+static void
+DeletePidTable(pr_PidRecord *pRec)
+{
+ int keyHash = (int) (pRec->pid & PID_HASH_MASK);
+
+ if (pr_wp.pidTable[keyHash] == pRec) {
+ pr_wp.pidTable[keyHash] = pRec->next;
+ } else {
+ pr_PidRecord *pred, *cur; /* predecessor and current */
+
+ pred = pr_wp.pidTable[keyHash];
+ cur = pred->next;
+ while (cur) {
+ if (cur == pRec) {
+ pred->next = cur->next;
+ break;
+ }
+ pred = cur;
+ cur = cur->next;
+ }
+ PR_ASSERT(cur != NULL);
+ }
+}
+
+static int
+ExtractExitStatus(int rawExitStatus)
+{
+ /*
+ * We did not specify the WCONTINUED and WUNTRACED options
+ * for waitpid, so these two events should not be reported.
+ */
+ PR_ASSERT(!WIFSTOPPED(rawExitStatus));
+#ifdef WIFCONTINUED
+ PR_ASSERT(!WIFCONTINUED(rawExitStatus));
+#endif
+ if (WIFEXITED(rawExitStatus)) {
+ return WEXITSTATUS(rawExitStatus);
+ } else {
+ PR_ASSERT(WIFSIGNALED(rawExitStatus));
+ return _PR_SIGNALED_EXITSTATUS;
+ }
+}
+
+static void
+ProcessReapedChildInternal(pid_t pid, int status)
+{
+ pr_PidRecord *pRec;
+
+ pRec = FindPidTable(pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ pRec->pid = pid;
+ pRec->state = _PR_PID_REAPED;
+ pRec->exitStatus = ExtractExitStatus(status);
+ pRec->reapedCV = NULL;
+ InsertPidTable(pRec);
+ } else {
+ PR_ASSERT(pRec->state != _PR_PID_REAPED);
+ if (_PR_PID_DETACHED == pRec->state) {
+ PR_ASSERT(NULL == pRec->reapedCV);
+ DeletePidTable(pRec);
+ PR_DELETE(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_WAITING == pRec->state);
+ PR_ASSERT(NULL != pRec->reapedCV);
+ pRec->exitStatus = ExtractExitStatus(status);
+ pRec->state = _PR_PID_REAPED;
+ PR_NotifyCondVar(pRec->reapedCV);
+ }
+ }
+}
+
+#if defined(_PR_NATIVE_THREADS)
+
+/*
+ * If all the threads are native threads, the daemon thread is
+ * simpler. We don't need to catch the SIGCHLD signal. We can
+ * just have the daemon thread block in waitpid().
+ */
+
+static void WaitPidDaemonThread(void *unused)
+{
+ pid_t pid;
+ int status;
+
+ while (1) {
+ PR_Lock(pr_wp.ml);
+ while (0 == pr_wp.numProcs) {
+ PR_WaitCondVar(pr_wp.cv, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+
+ while (1) {
+ do {
+ pid = waitpid((pid_t) -1, &status, 0);
+ } while ((pid_t) -1 == pid && EINTR == errno);
+
+ /*
+ * waitpid() cannot return 0 because we did not invoke it
+ * with the WNOHANG option.
+ */
+ PR_ASSERT(0 != pid);
+
+ /*
+ * The only possible error code is ECHILD. But if we do
+ * our accounting correctly, we should only call waitpid()
+ * when there is a child process to wait for.
+ */
+ PR_ASSERT((pid_t) -1 != pid);
+ if ((pid_t) -1 == pid) {
+ break;
+ }
+
+ PR_Lock(pr_wp.ml);
+ ProcessReapedChildInternal(pid, status);
+ pr_wp.numProcs--;
+ while (0 == pr_wp.numProcs) {
+ PR_WaitCondVar(pr_wp.cv, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+ }
+ }
+}
+
+#else /* _PR_NATIVE_THREADS */
+
+static void WaitPidDaemonThread(void *unused)
+{
+ PRPollDesc pd;
+ PRFileDesc *fd;
+ int rv;
+ char buf[128];
+ pid_t pid;
+ int status;
+#ifdef _PR_SHARE_CLONES
+ struct pr_CreateProcOp *op;
+#endif
+
+#ifdef _PR_SHARE_CLONES
+ pr_InstallSigchldHandler();
+#endif
+
+ fd = PR_ImportFile(pr_wp.pipefd[0]);
+ PR_ASSERT(NULL != fd);
+ pd.fd = fd;
+ pd.in_flags = PR_POLL_READ;
+
+ while (1) {
+ rv = PR_Poll(&pd, 1, PR_INTERVAL_NO_TIMEOUT);
+ PR_ASSERT(1 == rv);
+
+#ifdef _PR_SHARE_CLONES
+ if (pr_waitpid_daemon_exit) {
+ return;
+ }
+ PR_Lock(pr_wp.ml);
+#endif
+
+ do {
+ rv = read(pr_wp.pipefd[0], buf, sizeof(buf));
+ } while (sizeof(buf) == rv || (-1 == rv && EINTR == errno));
+
+#ifdef _PR_SHARE_CLONES
+ PR_Unlock(pr_wp.ml);
+ while ((op = pr_wp.opHead) != NULL) {
+ op->process = ForkAndExec(op->path, op->argv,
+ op->envp, op->attr);
+ if (NULL == op->process) {
+ op->prerror = PR_GetError();
+ op->oserror = PR_GetOSError();
+ }
+ PR_Lock(pr_wp.ml);
+ pr_wp.opHead = op->next;
+ if (NULL == pr_wp.opHead) {
+ pr_wp.opTail = NULL;
+ }
+ op->done = PR_TRUE;
+ PR_NotifyCondVar(op->doneCV);
+ PR_Unlock(pr_wp.ml);
+ }
+#endif
+
+ while (1) {
+ do {
+ pid = waitpid((pid_t) -1, &status, WNOHANG);
+ } while ((pid_t) -1 == pid && EINTR == errno);
+ if (0 == pid) break;
+ if ((pid_t) -1 == pid) {
+ /* must be because we have no child processes */
+ PR_ASSERT(ECHILD == errno);
+ break;
+ }
+
+ PR_Lock(pr_wp.ml);
+ ProcessReapedChildInternal(pid, status);
+ PR_Unlock(pr_wp.ml);
+ }
+ }
+}
+
+static void pr_SigchldHandler(int sig)
+{
+ int errnoCopy;
+ int rv;
+
+ errnoCopy = errno;
+
+ do {
+ rv = write(pr_wp.pipefd[1], "", 1);
+ } while (-1 == rv && EINTR == errno);
+
+#ifdef DEBUG
+ if (-1 == rv && EAGAIN != errno && EWOULDBLOCK != errno) {
+ char *msg = "cannot write to pipe\n";
+ write(2, msg, strlen(msg) + 1);
+ _exit(1);
+ }
+#endif
+
+ errno = errnoCopy;
+}
+
+static void pr_InstallSigchldHandler()
+{
+#if defined(HPUX) && defined(_PR_DCETHREADS)
+#error "HP-UX DCE threads have their own SIGCHLD handler"
+#endif
+
+ struct sigaction act, oact;
+ int rv;
+
+ act.sa_handler = pr_SigchldHandler;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_NOCLDSTOP | SA_RESTART;
+ rv = sigaction(SIGCHLD, &act, &oact);
+ PR_ASSERT(0 == rv);
+ /* Make sure we are not overriding someone else's SIGCHLD handler */
+#ifndef _PR_SHARE_CLONES
+ PR_ASSERT(oact.sa_handler == SIG_DFL);
+#endif
+}
+
+#endif /* !defined(_PR_NATIVE_THREADS) */
+
+static PRStatus _MD_InitProcesses(void)
+{
+#if !defined(_PR_NATIVE_THREADS)
+ int rv;
+ int flags;
+#endif
+
+#ifdef AIX
+ {
+ void *handle = dlopen(NULL, RTLD_NOW | RTLD_GLOBAL);
+ pr_wp.forkptr = (pid_t (*)(void)) dlsym(handle, "f_fork");
+ if (!pr_wp.forkptr) {
+ pr_wp.forkptr = fork;
+ }
+ dlclose(handle);
+ }
+#endif /* AIX */
+
+ pr_wp.ml = PR_NewLock();
+ PR_ASSERT(NULL != pr_wp.ml);
+
+#if defined(_PR_NATIVE_THREADS)
+ pr_wp.numProcs = 0;
+ pr_wp.cv = PR_NewCondVar(pr_wp.ml);
+ PR_ASSERT(NULL != pr_wp.cv);
+#else
+ rv = pipe(pr_wp.pipefd);
+ PR_ASSERT(0 == rv);
+ flags = fcntl(pr_wp.pipefd[0], F_GETFL, 0);
+ fcntl(pr_wp.pipefd[0], F_SETFL, flags | O_NONBLOCK);
+ flags = fcntl(pr_wp.pipefd[1], F_GETFL, 0);
+ fcntl(pr_wp.pipefd[1], F_SETFL, flags | O_NONBLOCK);
+
+#ifndef _PR_SHARE_CLONES
+ pr_InstallSigchldHandler();
+#endif
+#endif /* !_PR_NATIVE_THREADS */
+
+ pr_wp.thread = PR_CreateThread(PR_SYSTEM_THREAD,
+ WaitPidDaemonThread, NULL, PR_PRIORITY_NORMAL,
+#ifdef _PR_SHARE_CLONES
+ PR_GLOBAL_THREAD,
+#else
+ PR_LOCAL_THREAD,
+#endif
+ PR_JOINABLE_THREAD, 0);
+ PR_ASSERT(NULL != pr_wp.thread);
+
+ pr_wp.pidTable = (pr_PidRecord**)PR_CALLOC(NBUCKETS * sizeof(pr_PidRecord *));
+ PR_ASSERT(NULL != pr_wp.pidTable);
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_DetachUnixProcess(PRProcess *process)
+{
+ PRStatus retVal = PR_SUCCESS;
+ pr_PidRecord *pRec;
+
+ PR_Lock(pr_wp.ml);
+ pRec = FindPidTable(process->md.pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ if (NULL == pRec) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ pRec->pid = process->md.pid;
+ pRec->state = _PR_PID_DETACHED;
+ pRec->reapedCV = NULL;
+ InsertPidTable(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_REAPED == pRec->state);
+ if (_PR_PID_REAPED != pRec->state) {
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ retVal = PR_FAILURE;
+ } else {
+ DeletePidTable(pRec);
+ PR_ASSERT(NULL == pRec->reapedCV);
+ PR_DELETE(pRec);
+ }
+ }
+ PR_DELETE(process);
+
+done:
+ PR_Unlock(pr_wp.ml);
+ return retVal;
+}
+
+PRStatus _MD_WaitUnixProcess(
+ PRProcess *process,
+ PRInt32 *exitCode)
+{
+ pr_PidRecord *pRec;
+ PRStatus retVal = PR_SUCCESS;
+ PRBool interrupted = PR_FALSE;
+
+ PR_Lock(pr_wp.ml);
+ pRec = FindPidTable(process->md.pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ if (NULL == pRec) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ pRec->pid = process->md.pid;
+ pRec->state = _PR_PID_WAITING;
+ pRec->reapedCV = PR_NewCondVar(pr_wp.ml);
+ if (NULL == pRec->reapedCV) {
+ PR_DELETE(pRec);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ InsertPidTable(pRec);
+ while (!interrupted && _PR_PID_REAPED != pRec->state) {
+ if (PR_WaitCondVar(pRec->reapedCV,
+ PR_INTERVAL_NO_TIMEOUT) == PR_FAILURE
+ && PR_GetError() == PR_PENDING_INTERRUPT_ERROR) {
+ interrupted = PR_TRUE;
+ }
+ }
+ if (_PR_PID_REAPED == pRec->state) {
+ if (exitCode) {
+ *exitCode = pRec->exitStatus;
+ }
+ } else {
+ PR_ASSERT(interrupted);
+ retVal = PR_FAILURE;
+ }
+ DeletePidTable(pRec);
+ PR_DestroyCondVar(pRec->reapedCV);
+ PR_DELETE(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_REAPED == pRec->state);
+ PR_ASSERT(NULL == pRec->reapedCV);
+ DeletePidTable(pRec);
+ if (exitCode) {
+ *exitCode = pRec->exitStatus;
+ }
+ PR_DELETE(pRec);
+ }
+ PR_DELETE(process);
+
+done:
+ PR_Unlock(pr_wp.ml);
+ return retVal;
+} /* _MD_WaitUnixProcess */
+
+PRStatus _MD_KillUnixProcess(PRProcess *process)
+{
+ PRErrorCode prerror;
+ PRInt32 oserror;
+
+#ifdef SYMBIAN
+ /* In Symbian OS, we can not kill other process with Open C */
+ PR_SetError(PR_OPERATION_NOT_SUPPORTED_ERROR, oserror);
+ return PR_FAILURE;
+#else
+ if (kill(process->md.pid, SIGKILL) == 0) {
+ return PR_SUCCESS;
+ }
+ oserror = errno;
+ switch (oserror) {
+ case EPERM:
+ prerror = PR_NO_ACCESS_RIGHTS_ERROR;
+ break;
+ case ESRCH:
+ prerror = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ default:
+ prerror = PR_UNKNOWN_ERROR;
+ break;
+ }
+ PR_SetError(prerror, oserror);
+ return PR_FAILURE;
+#endif
+} /* _MD_KillUnixProcess */
diff --git a/nsprpub/pr/src/md/unix/uxrng.c b/nsprpub/pr/src/md/unix/uxrng.c
new file mode 100644
index 000000000..da2f7e9fe
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/uxrng.c
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#include "primpl.h"
+
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/time.h>
+
+
+#if defined(SOLARIS)
+
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ hrtime_t t;
+ t = gethrtime();
+ if (t) {
+ return _pr_CopyLowBits(buf, maxbytes, &t, sizeof(t));
+ }
+ return 0;
+}
+
+#elif defined(HPUX)
+
+#ifdef __ia64
+#include <ia64/sys/inline.h>
+
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ PRUint64 t;
+
+#ifdef __GNUC__
+ __asm__ __volatile__("mov %0 = ar.itc" : "=r" (t));
+#else
+ t = _Asm_mov_from_ar(_AREG44);
+#endif
+ return _pr_CopyLowBits(buf, maxbytes, &t, sizeof(t));
+}
+#else
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ extern int ret_cr16();
+ int cr16val;
+
+ cr16val = ret_cr16();
+ return(_pr_CopyLowBits(buf, maxbytes, &cr16val, sizeof(cr16val)));
+}
+#endif
+
+#elif defined(OSF1)
+
+#include <c_asm.h>
+
+/*
+ * Use the "get the cycle counter" instruction on the alpha.
+ * The low 32 bits completely turn over in less than a minute.
+ * The high 32 bits are some non-counter gunk that changes sometimes.
+ */
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ unsigned long t;
+
+#ifdef __GNUC__
+ __asm__("rpcc %0" : "=r" (t));
+#else
+ t = asm("rpcc %v0");
+#endif
+ return _pr_CopyLowBits(buf, maxbytes, &t, sizeof(t));
+}
+
+#elif defined(AIX)
+
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ return 0;
+}
+
+#elif (defined(LINUX) || defined(FREEBSD) || defined(__FreeBSD_kernel__) \
+ || defined(NETBSD) || defined(__NetBSD_kernel__) || defined(OPENBSD) \
+ || defined(SYMBIAN) || defined(__GNU__))
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+static int fdDevURandom;
+static PRCallOnceType coOpenDevURandom;
+
+static PRStatus OpenDevURandom( void )
+{
+ fdDevURandom = open( "/dev/urandom", O_RDONLY );
+ return((-1 == fdDevURandom)? PR_FAILURE : PR_SUCCESS );
+} /* end OpenDevURandom() */
+
+static size_t GetDevURandom( void *buf, size_t size )
+{
+ int bytesIn;
+ int rc;
+
+ rc = PR_CallOnce( &coOpenDevURandom, OpenDevURandom );
+ if ( PR_FAILURE == rc ) {
+ _PR_MD_MAP_OPEN_ERROR( errno );
+ return(0);
+ }
+
+ bytesIn = read( fdDevURandom, buf, size );
+ if ( -1 == bytesIn ) {
+ _PR_MD_MAP_READ_ERROR( errno );
+ return(0);
+ }
+
+ return( bytesIn );
+} /* end GetDevURandom() */
+
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ return(GetDevURandom( buf, maxbytes ));
+}
+
+#elif defined(IRIX)
+#include <fcntl.h>
+#undef PRIVATE
+#include <sys/mman.h>
+#include <sys/syssgi.h>
+#include <sys/immu.h>
+#include <sys/systeminfo.h>
+#include <sys/utsname.h>
+
+static size_t GetHighResClock(void *buf, size_t maxbuf)
+{
+ unsigned phys_addr, raddr, cycleval;
+ static volatile unsigned *iotimer_addr = NULL;
+ static int tries = 0;
+ static int cntr_size;
+ int mfd;
+ unsigned s0[2];
+
+#ifndef SGI_CYCLECNTR_SIZE
+#define SGI_CYCLECNTR_SIZE 165 /* Size user needs to use to read CC */
+#endif
+
+ if (iotimer_addr == NULL) {
+ if (tries++ > 1) {
+ /* Don't keep trying if it didn't work */
+ return 0;
+ }
+
+ /*
+ ** For SGI machines we can use the cycle counter, if it has one,
+ ** to generate some truly random numbers
+ */
+ phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
+ if (phys_addr) {
+ int pgsz = getpagesize();
+ int pgoffmask = pgsz - 1;
+
+ raddr = phys_addr & ~pgoffmask;
+ mfd = open("/dev/mmem", O_RDONLY);
+ if (mfd < 0) {
+ return 0;
+ }
+ iotimer_addr = (unsigned *)
+ mmap(0, pgoffmask, PROT_READ, MAP_PRIVATE, mfd, (int)raddr);
+ if (iotimer_addr == (unsigned*)-1) {
+ close(mfd);
+ iotimer_addr = NULL;
+ return 0;
+ }
+ iotimer_addr = (unsigned*)
+ ((__psint_t)iotimer_addr | (phys_addr & pgoffmask));
+ /*
+ * The file 'mfd' is purposefully not closed.
+ */
+ cntr_size = syssgi(SGI_CYCLECNTR_SIZE);
+ if (cntr_size < 0) {
+ struct utsname utsinfo;
+
+ /*
+ * We must be executing on a 6.0 or earlier system, since the
+ * SGI_CYCLECNTR_SIZE call is not supported.
+ *
+ * The only pre-6.1 platforms with 64-bit counters are
+ * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
+ */
+ uname(&utsinfo);
+ if (!strncmp(utsinfo.machine, "IP19", 4) ||
+ !strncmp(utsinfo.machine, "IP21", 4))
+ cntr_size = 64;
+ else
+ cntr_size = 32;
+ }
+ cntr_size /= 8; /* Convert from bits to bytes */
+ }
+ }
+
+ s0[0] = *iotimer_addr;
+ if (cntr_size > 4)
+ s0[1] = *(iotimer_addr + 1);
+ memcpy(buf, (char *)&s0[0], cntr_size);
+ return _pr_CopyLowBits(buf, maxbuf, &s0, cntr_size);
+}
+
+#elif defined(SCO) || defined(UNIXWARE) || defined(BSDI) || defined(NTO) \
+ || defined(QNX) || defined(DARWIN) || defined(RISCOS)
+#include <sys/times.h>
+
+static size_t
+GetHighResClock(void *buf, size_t maxbytes)
+{
+ int ticks;
+ struct tms buffer;
+
+ ticks=times(&buffer);
+ return _pr_CopyLowBits(buf, maxbytes, &ticks, sizeof(ticks));
+}
+#else
+#error! Platform undefined
+#endif /* defined(SOLARIS) */
+
+extern PRSize _PR_MD_GetRandomNoise( void *buf, PRSize size )
+{
+ struct timeval tv;
+ int n = 0;
+ int s;
+
+ n += GetHighResClock(buf, size);
+ size -= n;
+
+ GETTIMEOFDAY(&tv);
+
+ if ( size > 0 ) {
+ s = _pr_CopyLowBits((char*)buf+n, size, &tv.tv_usec, sizeof(tv.tv_usec));
+ size -= s;
+ n += s;
+ }
+ if ( size > 0 ) {
+ s = _pr_CopyLowBits((char*)buf+n, size, &tv.tv_sec, sizeof(tv.tv_usec));
+ size -= s;
+ n += s;
+ }
+
+ return n;
+} /* end _PR_MD_GetRandomNoise() */
diff --git a/nsprpub/pr/src/md/unix/uxshm.c b/nsprpub/pr/src/md/unix/uxshm.c
new file mode 100644
index 000000000..dec4e3a7a
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/uxshm.c
@@ -0,0 +1,643 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+** uxshm.c -- Unix Implementations NSPR Named Shared Memory
+**
+**
+** lth. Jul-1999.
+**
+*/
+#include <string.h>
+#include <prshm.h>
+#include <prerr.h>
+#include <prmem.h>
+#include "primpl.h"
+#include <fcntl.h>
+
+extern PRLogModuleInfo *_pr_shm_lm;
+
+
+#define NSPR_IPC_SHM_KEY 'b'
+/*
+** Implementation for System V
+*/
+#if defined PR_HAVE_SYSV_NAMED_SHARED_MEMORY
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define _MD_OPEN_SHARED_MEMORY _MD_OpenSharedMemory
+#define _MD_ATTACH_SHARED_MEMORY _MD_AttachSharedMemory
+#define _MD_DETACH_SHARED_MEMORY _MD_DetachSharedMemory
+#define _MD_CLOSE_SHARED_MEMORY _MD_CloseSharedMemory
+#define _MD_DELETE_SHARED_MEMORY _MD_DeleteSharedMemory
+
+extern PRSharedMemory * _MD_OpenSharedMemory(
+ const char *name,
+ PRSize size,
+ PRIntn flags,
+ PRIntn mode
+)
+{
+ PRStatus rc = PR_SUCCESS;
+ key_t key;
+ PRSharedMemory *shm;
+ char ipcname[PR_IPC_NAME_SIZE];
+
+ rc = _PR_MakeNativeIPCName( name, ipcname, PR_IPC_NAME_SIZE, _PRIPCShm );
+ if ( PR_FAILURE == rc )
+ {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): _PR_MakeNativeIPCName() failed: %s", name ));
+ return( NULL );
+ }
+
+ shm = PR_NEWZAP( PRSharedMemory );
+ if ( NULL == shm )
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0 );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG, ( "PR_OpenSharedMemory: New PRSharedMemory out of memory"));
+ return( NULL );
+ }
+
+ shm->ipcname = (char*)PR_MALLOC( strlen( ipcname ) + 1 );
+ if ( NULL == shm->ipcname )
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0 );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG, ( "PR_OpenSharedMemory: New shm->ipcname out of memory"));
+ PR_DELETE( shm );
+ return( NULL );
+ }
+
+ /* copy args to struct */
+ strcpy( shm->ipcname, ipcname );
+ shm->size = size;
+ shm->mode = mode;
+ shm->flags = flags;
+ shm->ident = _PR_SHM_IDENT;
+
+ /* create the file first */
+ if ( flags & PR_SHM_CREATE ) {
+ int osfd = open( shm->ipcname, (O_RDWR | O_CREAT), shm->mode );
+ if ( -1 == osfd ) {
+ _PR_MD_MAP_OPEN_ERROR( errno );
+ PR_FREEIF( shm->ipcname );
+ PR_DELETE( shm );
+ return( NULL );
+ }
+ if ( close(osfd) == -1 ) {
+ _PR_MD_MAP_CLOSE_ERROR( errno );
+ PR_FREEIF( shm->ipcname );
+ PR_DELETE( shm );
+ return( NULL );
+ }
+ }
+
+ /* hash the shm.name to an ID */
+ key = ftok( shm->ipcname, NSPR_IPC_SHM_KEY );
+ if ( -1 == key )
+ {
+ rc = PR_FAILURE;
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): ftok() failed on name: %s", shm->ipcname));
+ PR_FREEIF( shm->ipcname );
+ PR_DELETE( shm );
+ return( NULL );
+ }
+
+ /* get the shared memory */
+ if ( flags & PR_SHM_CREATE ) {
+ shm->id = shmget( key, shm->size, ( shm->mode | IPC_CREAT|IPC_EXCL));
+ if ( shm->id >= 0 ) {
+ return( shm );
+ }
+ if ((errno == EEXIST) && (flags & PR_SHM_EXCL)) {
+ PR_SetError( PR_FILE_EXISTS_ERROR, errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): shmget() exclusive failed, errno: %d", errno));
+ PR_FREEIF(shm->ipcname);
+ PR_DELETE(shm);
+ return(NULL);
+ }
+ }
+
+ shm->id = shmget( key, shm->size, shm->mode );
+ if ( -1 == shm->id ) {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): shmget() failed, errno: %d", errno));
+ PR_FREEIF(shm->ipcname);
+ PR_DELETE(shm);
+ return(NULL);
+ }
+
+ return( shm );
+} /* end _MD_OpenSharedMemory() */
+
+extern void * _MD_AttachSharedMemory( PRSharedMemory *shm, PRIntn flags )
+{
+ void *addr;
+ PRUint32 aFlags = shm->mode;
+
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ aFlags |= (flags & PR_SHM_READONLY )? SHM_RDONLY : 0;
+
+ addr = shmat( shm->id, NULL, aFlags );
+ if ( (void*)-1 == addr )
+ {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_AttachSharedMemory(): shmat() failed on name: %s, OsError: %d",
+ shm->ipcname, PR_GetOSError() ));
+ addr = NULL;
+ }
+
+ return addr;
+}
+
+extern PRStatus _MD_DetachSharedMemory( PRSharedMemory *shm, void *addr )
+{
+ PRStatus rc = PR_SUCCESS;
+ PRIntn urc;
+
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ urc = shmdt( addr );
+ if ( -1 == urc )
+ {
+ rc = PR_FAILURE;
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DetachSharedMemory(): shmdt() failed on name: %s", shm->ipcname ));
+ }
+
+ return rc;
+}
+
+extern PRStatus _MD_CloseSharedMemory( PRSharedMemory *shm )
+{
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ PR_FREEIF(shm->ipcname);
+ PR_DELETE(shm);
+
+ return PR_SUCCESS;
+}
+
+extern PRStatus _MD_DeleteSharedMemory( const char *name )
+{
+ PRStatus rc = PR_SUCCESS;
+ key_t key;
+ int id;
+ PRIntn urc;
+ char ipcname[PR_IPC_NAME_SIZE];
+
+ rc = _PR_MakeNativeIPCName( name, ipcname, PR_IPC_NAME_SIZE, _PRIPCShm );
+ if ( PR_FAILURE == rc )
+ {
+ PR_SetError( PR_UNKNOWN_ERROR , errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): _PR_MakeNativeIPCName() failed: %s", name ));
+ return(PR_FAILURE);
+ }
+
+ /* create the file first */
+ {
+ int osfd = open( ipcname, (O_RDWR | O_CREAT), 0666 );
+ if ( -1 == osfd ) {
+ _PR_MD_MAP_OPEN_ERROR( errno );
+ return( PR_FAILURE );
+ }
+ if ( close(osfd) == -1 ) {
+ _PR_MD_MAP_CLOSE_ERROR( errno );
+ return( PR_FAILURE );
+ }
+ }
+
+ /* hash the shm.name to an ID */
+ key = ftok( ipcname, NSPR_IPC_SHM_KEY );
+ if ( -1 == key )
+ {
+ rc = PR_FAILURE;
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): ftok() failed on name: %s", ipcname));
+ }
+
+#ifdef SYMBIAN
+ /* In Symbian OS the system imposed minimum is 1 byte, instead of ZERO */
+ id = shmget( key, 1, 0 );
+#else
+ id = shmget( key, 0, 0 );
+#endif
+ if ( -1 == id ) {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): shmget() failed, errno: %d", errno));
+ return(PR_FAILURE);
+ }
+
+ urc = shmctl( id, IPC_RMID, NULL );
+ if ( -1 == urc )
+ {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): shmctl() failed on name: %s", ipcname ));
+ return(PR_FAILURE);
+ }
+
+ urc = unlink( ipcname );
+ if ( -1 == urc ) {
+ _PR_MD_MAP_UNLINK_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): unlink() failed: %s", ipcname ));
+ return(PR_FAILURE);
+ }
+
+ return rc;
+} /* end _MD_DeleteSharedMemory() */
+
+/*
+** Implementation for Posix
+*/
+#elif defined PR_HAVE_POSIX_NAMED_SHARED_MEMORY
+#include <sys/mman.h>
+
+#define _MD_OPEN_SHARED_MEMORY _MD_OpenSharedMemory
+#define _MD_ATTACH_SHARED_MEMORY _MD_AttachSharedMemory
+#define _MD_DETACH_SHARED_MEMORY _MD_DetachSharedMemory
+#define _MD_CLOSE_SHARED_MEMORY _MD_CloseSharedMemory
+#define _MD_DELETE_SHARED_MEMORY _MD_DeleteSharedMemory
+
+struct _MDSharedMemory {
+ int handle;
+};
+
+extern PRSharedMemory * _MD_OpenSharedMemory(
+ const char *name,
+ PRSize size,
+ PRIntn flags,
+ PRIntn mode
+)
+{
+ PRStatus rc = PR_SUCCESS;
+ PRInt32 end;
+ PRSharedMemory *shm;
+ char ipcname[PR_IPC_NAME_SIZE];
+
+ rc = _PR_MakeNativeIPCName( name, ipcname, PR_IPC_NAME_SIZE, _PRIPCShm );
+ if ( PR_FAILURE == rc )
+ {
+ PR_SetError( PR_UNKNOWN_ERROR , errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): _PR_MakeNativeIPCName() failed: %s", name ));
+ return( NULL );
+ }
+
+ shm = PR_NEWZAP( PRSharedMemory );
+ if ( NULL == shm )
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0 );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG, ( "PR_OpenSharedMemory: New PRSharedMemory out of memory"));
+ return( NULL );
+ }
+
+ shm->ipcname = PR_MALLOC( strlen( ipcname ) + 1 );
+ if ( NULL == shm->ipcname )
+ {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0 );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG, ( "PR_OpenSharedMemory: New shm->ipcname out of memory"));
+ return( NULL );
+ }
+
+ /* copy args to struct */
+ strcpy( shm->ipcname, ipcname );
+ shm->size = size;
+ shm->mode = mode;
+ shm->flags = flags;
+ shm->ident = _PR_SHM_IDENT;
+
+ /*
+ ** Create the shared memory
+ */
+ if ( flags & PR_SHM_CREATE ) {
+ int oflag = (O_CREAT | O_RDWR);
+
+ if ( flags & PR_SHM_EXCL )
+ oflag |= O_EXCL;
+ shm->id = shm_open( shm->ipcname, oflag, shm->mode );
+ } else {
+ shm->id = shm_open( shm->ipcname, O_RDWR, shm->mode );
+ }
+
+ if ( -1 == shm->id ) {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): shm_open failed: %s, OSError: %d",
+ shm->ipcname, PR_GetOSError()));
+ PR_DELETE( shm->ipcname );
+ PR_DELETE( shm );
+ return(NULL);
+ }
+
+ end = ftruncate( shm->id, shm->size );
+ if ( -1 == end ) {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG(_pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): ftruncate failed, OSError: %d",
+ PR_GetOSError()));
+ PR_DELETE( shm->ipcname );
+ PR_DELETE( shm );
+ return(NULL);
+ }
+
+ return(shm);
+} /* end _MD_OpenSharedMemory() */
+
+extern void * _MD_AttachSharedMemory( PRSharedMemory *shm, PRIntn flags )
+{
+ void *addr;
+ PRIntn prot = (PROT_READ | PROT_WRITE);
+
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ if ( PR_SHM_READONLY == flags)
+ prot ^= PROT_WRITE;
+
+ addr = mmap( (void*)0, shm->size, prot, MAP_SHARED, shm->id, 0 );
+ if ((void*)-1 == addr )
+ {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_AttachSharedMemory(): mmap failed: %s, errno: %d",
+ shm->ipcname, PR_GetOSError()));
+ addr = NULL;
+ } else {
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_AttachSharedMemory(): name: %s, attached at: %p", shm->ipcname, addr));
+ }
+
+ return addr;
+}
+
+extern PRStatus _MD_DetachSharedMemory( PRSharedMemory *shm, void *addr )
+{
+ PRStatus rc = PR_SUCCESS;
+ PRIntn urc;
+
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ urc = munmap( addr, shm->size );
+ if ( -1 == urc )
+ {
+ rc = PR_FAILURE;
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DetachSharedMemory(): munmap failed: %s, errno: %d",
+ shm->ipcname, PR_GetOSError()));
+ }
+ return rc;
+}
+
+extern PRStatus _MD_CloseSharedMemory( PRSharedMemory *shm )
+{
+ int urc;
+
+ PR_ASSERT( shm->ident == _PR_SHM_IDENT );
+
+ urc = close( shm->id );
+ if ( -1 == urc ) {
+ _PR_MD_MAP_CLOSE_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_CloseSharedMemory(): close() failed, error: %d", PR_GetOSError()));
+ return(PR_FAILURE);
+ }
+ PR_DELETE( shm->ipcname );
+ PR_DELETE( shm );
+ return PR_SUCCESS;
+}
+
+extern PRStatus _MD_DeleteSharedMemory( const char *name )
+{
+ PRStatus rc = PR_SUCCESS;
+ PRUintn urc;
+ char ipcname[PR_IPC_NAME_SIZE];
+
+ rc = _PR_MakeNativeIPCName( name, ipcname, PR_IPC_NAME_SIZE, _PRIPCShm );
+ if ( PR_FAILURE == rc )
+ {
+ PR_SetError( PR_UNKNOWN_ERROR , errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_OpenSharedMemory(): _PR_MakeNativeIPCName() failed: %s", name ));
+ return rc;
+ }
+
+ urc = shm_unlink( ipcname );
+ if ( -1 == urc ) {
+ rc = PR_FAILURE;
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): shm_unlink failed: %s, errno: %d",
+ ipcname, PR_GetOSError()));
+ } else {
+ PR_LOG( _pr_shm_lm, PR_LOG_DEBUG,
+ ("_MD_DeleteSharedMemory(): %s, success", ipcname));
+ }
+
+ return rc;
+} /* end _MD_DeleteSharedMemory() */
+#endif
+
+
+
+/*
+** Unix implementation for anonymous memory (file) mapping
+*/
+extern PRLogModuleInfo *_pr_shma_lm;
+
+#include <unistd.h>
+
+extern PRFileMap* _md_OpenAnonFileMap(
+ const char *dirName,
+ PRSize size,
+ PRFileMapProtect prot
+)
+{
+ PRFileMap *fm = NULL;
+ PRFileDesc *fd;
+ int osfd;
+ PRIntn urc;
+ PRIntn mode = 0600;
+ char *genName;
+ pid_t pid = getpid(); /* for generating filename */
+ PRThread *tid = PR_GetCurrentThread(); /* for generating filename */
+ int incr; /* for generating filename */
+ const int maxTries = 20; /* maximum # attempts at a unique filename */
+ PRInt64 size64; /* 64-bit version of 'size' */
+
+ /*
+ ** generate a filename from input and runtime environment
+ ** open the file, unlink the file.
+ ** make maxTries number of attempts at uniqueness in the filename
+ */
+ for ( incr = 0; incr < maxTries ; incr++ ) {
+#if defined(SYMBIAN)
+#define NSPR_AFM_FILENAME "%s\\NSPR-AFM-%d-%p.%d"
+#else
+#define NSPR_AFM_FILENAME "%s/.NSPR-AFM-%d-%p.%d"
+#endif
+ genName = PR_smprintf( NSPR_AFM_FILENAME,
+ dirName, (int) pid, tid, incr );
+ if ( NULL == genName ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): PR_snprintf(): failed, generating filename"));
+ goto Finished;
+ }
+
+ /* create the file */
+ osfd = open( genName, (O_CREAT | O_EXCL | O_RDWR), mode );
+ if ( -1 == osfd ) {
+ if ( EEXIST == errno ) {
+ PR_smprintf_free( genName );
+ continue; /* name exists, try again */
+ } else {
+ _PR_MD_MAP_OPEN_ERROR( errno );
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): open(): failed, filename: %s, errno: %d",
+ genName, PR_GetOSError()));
+ PR_smprintf_free( genName );
+ goto Finished;
+ }
+ }
+ break; /* name generation and open successful, break; */
+ } /* end for() */
+
+ if ( incr == maxTries ) {
+ PR_ASSERT( -1 == osfd );
+ PR_ASSERT( EEXIST == errno );
+ _PR_MD_MAP_OPEN_ERROR( errno );
+ goto Finished;
+ }
+
+ urc = unlink( genName );
+#if defined(SYMBIAN) && defined(__WINS__)
+ /* If it is being used by the system or another process, Symbian OS
+ * Emulator(WINS) considers this an error. */
+ if ( -1 == urc && EACCES != errno ) {
+#else
+ if ( -1 == urc ) {
+#endif
+ _PR_MD_MAP_UNLINK_ERROR( errno );
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): failed on unlink(), errno: %d", errno));
+ PR_smprintf_free( genName );
+ close( osfd );
+ goto Finished;
+ }
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): unlink(): %s", genName ));
+
+ PR_smprintf_free( genName );
+
+ fd = PR_ImportFile( osfd );
+ if ( NULL == fd ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): PR_ImportFile(): failed"));
+ goto Finished;
+ }
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): fd: %p", fd ));
+
+ urc = ftruncate( fd->secret->md.osfd, size );
+ if ( -1 == urc ) {
+ _PR_MD_MAP_DEFAULT_ERROR( errno );
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): failed on ftruncate(), errno: %d", errno));
+ PR_Close( fd );
+ goto Finished;
+ }
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): ftruncate(): size: %d", size ));
+
+ LL_UI2L(size64, size); /* PRSize (size_t) is unsigned */
+ fm = PR_CreateFileMap( fd, size64, prot );
+ if ( NULL == fm ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("PR_OpenAnonFileMap(): failed"));
+ PR_Close( fd );
+ goto Finished;
+ }
+ fm->md.isAnonFM = PR_TRUE; /* set fd close */
+
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_OpenAnonFileMap(): PR_CreateFileMap(): fm: %p", fm ));
+
+Finished:
+ return(fm);
+} /* end md_OpenAnonFileMap() */
+
+/*
+** _md_ExportFileMapAsString()
+**
+**
+*/
+extern PRStatus _md_ExportFileMapAsString(
+ PRFileMap *fm,
+ PRSize bufSize,
+ char *buf
+)
+{
+ PRIntn written;
+ PRIntn prot = (PRIntn)fm->prot;
+
+ written = PR_snprintf( buf, bufSize, "%ld:%d",
+ fm->fd->secret->md.osfd, prot );
+
+ return((written == -1)? PR_FAILURE : PR_SUCCESS);
+} /* end _md_ExportFileMapAsString() */
+
+
+extern PRFileMap * _md_ImportFileMapFromString(
+ const char *fmstring
+)
+{
+ PRStatus rc;
+ PRInt32 osfd;
+ PRIntn prot; /* really: a PRFileMapProtect */
+ PRFileDesc *fd;
+ PRFileMap *fm = NULL; /* default return value */
+ PRFileInfo64 info;
+
+ PR_sscanf( fmstring, "%ld:%d", &osfd, &prot );
+
+ /* import the os file descriptor */
+ fd = PR_ImportFile( osfd );
+ if ( NULL == fd ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_ImportFileMapFromString(): PR_ImportFile() failed"));
+ goto Finished;
+ }
+
+ rc = PR_GetOpenFileInfo64( fd, &info );
+ if ( PR_FAILURE == rc ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_ImportFileMapFromString(): PR_GetOpenFileInfo64() failed"));
+ goto Finished;
+ }
+
+ fm = PR_CreateFileMap( fd, info.size, (PRFileMapProtect)prot );
+ if ( NULL == fm ) {
+ PR_LOG( _pr_shma_lm, PR_LOG_DEBUG,
+ ("_md_ImportFileMapFromString(): PR_CreateFileMap() failed"));
+ }
+
+Finished:
+ return(fm);
+} /* end _md_ImportFileMapFromString() */
diff --git a/nsprpub/pr/src/md/unix/uxwrap.c b/nsprpub/pr/src/md/unix/uxwrap.c
new file mode 100644
index 000000000..3f8e149ef
--- /dev/null
+++ b/nsprpub/pr/src/md/unix/uxwrap.c
@@ -0,0 +1,513 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ *------------------------------------------------------------------------
+ * File: uxwrap.c
+ *
+ * Our wrapped versions of the Unix select() and poll() system calls.
+ *
+ *------------------------------------------------------------------------
+ */
+
+#include "primpl.h"
+
+#if defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY) || defined(QNX)
+/* Do not wrap select() and poll(). */
+#else /* defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY) */
+/* The include files for select() */
+#ifdef IRIX
+#include <unistd.h>
+#include <bstring.h>
+#endif
+
+#include <string.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+#define ZAP_SET(_to, _width) \
+ PR_BEGIN_MACRO \
+ memset(_to, 0, \
+ ((_width + 8*sizeof(int)-1) / (8*sizeof(int))) \
+ * sizeof(int) \
+ ); \
+ PR_END_MACRO
+
+/* see comments in ns/cmd/xfe/mozilla.c (look for "PR_XGetXtHackFD") */
+static int _pr_xt_hack_fd = -1;
+
+int PR_XGetXtHackFD(void)
+{
+ int fds[2];
+
+ if (_pr_xt_hack_fd == -1) {
+ if (!pipe(fds)) {
+ _pr_xt_hack_fd = fds[0];
+ }
+ }
+ return _pr_xt_hack_fd;
+}
+
+static int (*_pr_xt_hack_okayToReleaseXLock)(void) = 0;
+
+void PR_SetXtHackOkayToReleaseXLockFn(int (*fn)(void))
+{
+ _pr_xt_hack_okayToReleaseXLock = fn;
+}
+
+
+/*
+ *-----------------------------------------------------------------------
+ * select() --
+ *
+ * Wrap up the select system call so that we can deschedule
+ * a thread that tries to wait for i/o.
+ *
+ *-----------------------------------------------------------------------
+ */
+
+#if defined(HPUX9)
+int select(size_t width, int *rl, int *wl, int *el, const struct timeval *tv)
+#elif defined(AIX_RENAME_SELECT)
+int wrap_select(unsigned long width, void *rl, void *wl, void *el,
+ struct timeval *tv)
+#elif defined(_PR_SELECT_CONST_TIMEVAL)
+int select(int width, fd_set *rd, fd_set *wr, fd_set *ex,
+ const struct timeval *tv)
+#else
+int select(int width, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *tv)
+#endif
+{
+ int osfd;
+ _PRUnixPollDesc *unixpds, *unixpd, *eunixpd;
+ PRInt32 pdcnt;
+ PRIntervalTime timeout;
+ int retVal;
+#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
+ fd_set *rd = (fd_set*) rl;
+ fd_set *wr = (fd_set*) wl;
+ fd_set *ex = (fd_set*) el;
+#endif
+
+#if 0
+ /*
+ * Easy special case: zero timeout. Simply call the native
+ * select() with no fear of blocking.
+ */
+ if (tv != NULL && tv->tv_sec == 0 && tv->tv_usec == 0) {
+#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
+ return _MD_SELECT(width, rl, wl, el, tv);
+#else
+ return _MD_SELECT(width, rd, wr, ex, tv);
+#endif
+ }
+#endif
+
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
+
+#ifndef _PR_LOCAL_THREADS_ONLY
+ if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
+ return _MD_SELECT(width, rd, wr, ex, tv);
+ }
+#endif
+
+ if (width < 0 || width > FD_SETSIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Compute timeout */
+ if (tv) {
+ /*
+ * These acceptable ranges for t_sec and t_usec are taken
+ * from the select() man pages.
+ */
+ if (tv->tv_sec < 0 || tv->tv_sec > 100000000
+ || tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Convert microseconds to ticks */
+ timeout = PR_MicrosecondsToInterval(1000000*tv->tv_sec + tv->tv_usec);
+ } else {
+ /* tv being a NULL pointer means blocking indefinitely */
+ timeout = PR_INTERVAL_NO_TIMEOUT;
+ }
+
+ /* Check for no descriptors case (just doing a timeout) */
+ if ((!rd && !wr && !ex) || !width) {
+ PR_Sleep(timeout);
+ return 0;
+ }
+
+ /*
+ * Set up for PR_Poll(). The PRPollDesc array is allocated
+ * dynamically. If this turns out to have high performance
+ * penalty, one can change to use a large PRPollDesc array
+ * on the stack, and allocate dynamically only when it turns
+ * out to be not large enough.
+ *
+ * I allocate an array of size 'width', which is the maximum
+ * number of fds we may need to poll.
+ */
+ unixpds = (_PRUnixPollDesc *) PR_CALLOC(width * sizeof(_PRUnixPollDesc));
+ if (!unixpds) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ pdcnt = 0;
+ unixpd = unixpds;
+ for (osfd = 0; osfd < width; osfd++) {
+ int in_flags = 0;
+ if (rd && FD_ISSET(osfd, rd)) {
+ in_flags |= _PR_UNIX_POLL_READ;
+ }
+ if (wr && FD_ISSET(osfd, wr)) {
+ in_flags |= _PR_UNIX_POLL_WRITE;
+ }
+ if (ex && FD_ISSET(osfd, ex)) {
+ in_flags |= _PR_UNIX_POLL_EXCEPT;
+ }
+ if (in_flags) {
+ unixpd->osfd = osfd;
+ unixpd->in_flags = in_flags;
+ unixpd->out_flags = 0;
+ unixpd++;
+ pdcnt++;
+ }
+ }
+
+ /*
+ * see comments in mozilla/cmd/xfe/mozilla.c (look for
+ * "PR_XGetXtHackFD")
+ */
+ {
+ int needToLockXAgain;
+
+ needToLockXAgain = 0;
+ if (rd && (_pr_xt_hack_fd != -1)
+ && FD_ISSET(_pr_xt_hack_fd, rd) && PR_XIsLocked()
+ && (!_pr_xt_hack_okayToReleaseXLock
+ || _pr_xt_hack_okayToReleaseXLock())) {
+ PR_XUnlock();
+ needToLockXAgain = 1;
+ }
+
+ /* This is the potentially blocking step */
+ retVal = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout);
+
+ if (needToLockXAgain) {
+ PR_XLock();
+ }
+ }
+
+ if (retVal > 0) {
+ /* Compute select results */
+ if (rd) ZAP_SET(rd, width);
+ if (wr) ZAP_SET(wr, width);
+ if (ex) ZAP_SET(ex, width);
+
+ /*
+ * The return value can be either the number of ready file
+ * descriptors or the number of set bits in the three fd_set's.
+ */
+ retVal = 0; /* we're going to recompute */
+ eunixpd = unixpds + pdcnt;
+ for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
+ if (unixpd->out_flags) {
+ int nbits = 0; /* The number of set bits on for this fd */
+
+ if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) {
+ errno = EBADF;
+ PR_LOG(_pr_io_lm, PR_LOG_ERROR,
+ ("select returns EBADF for %d", unixpd->osfd));
+ retVal = -1;
+ break;
+ }
+ /*
+ * If a socket has a pending error, it is considered
+ * both readable and writable. (See W. Richard Stevens,
+ * Unix Network Programming, Vol. 1, 2nd Ed., Section 6.3,
+ * pp. 153-154.) We also consider a socket readable if
+ * it has a hangup condition.
+ */
+ if (rd && (unixpd->in_flags & _PR_UNIX_POLL_READ)
+ && (unixpd->out_flags & (_PR_UNIX_POLL_READ
+ | _PR_UNIX_POLL_ERR | _PR_UNIX_POLL_HUP))) {
+ FD_SET(unixpd->osfd, rd);
+ nbits++;
+ }
+ if (wr && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
+ && (unixpd->out_flags & (_PR_UNIX_POLL_WRITE
+ | _PR_UNIX_POLL_ERR))) {
+ FD_SET(unixpd->osfd, wr);
+ nbits++;
+ }
+ if (ex && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
+ && (unixpd->out_flags & PR_POLL_EXCEPT)) {
+ FD_SET(unixpd->osfd, ex);
+ nbits++;
+ }
+ PR_ASSERT(nbits > 0);
+#if defined(HPUX) || defined(SOLARIS) || defined(OSF1) || defined(AIX)
+ retVal += nbits;
+#else /* IRIX */
+ retVal += 1;
+#endif
+ }
+ }
+ }
+
+ PR_ASSERT(tv || retVal != 0);
+ PR_LOG(_pr_io_lm, PR_LOG_MIN, ("select returns %d", retVal));
+ PR_DELETE(unixpds);
+
+ return retVal;
+}
+
+/*
+ * Redefine poll, when supported on platforms, for local threads
+ */
+
+/*
+ * I am commenting out the poll() wrapper for Linux for now
+ * because it is difficult to define _MD_POLL that works on all
+ * Linux varieties. People reported that glibc 2.0.7 on Debian
+ * 2.0 Linux machines doesn't have the __syscall_poll symbol
+ * defined. (WTC 30 Nov. 1998)
+ */
+#if defined(_PR_POLL_AVAILABLE) && !defined(LINUX)
+
+/*
+ *-----------------------------------------------------------------------
+ * poll() --
+ *
+ * RETURN VALUES:
+ * -1: fails, errno indicates the error.
+ * 0: timed out, the revents bitmasks are not set.
+ * positive value: the number of file descriptors for which poll()
+ * has set the revents bitmask.
+ *
+ *-----------------------------------------------------------------------
+ */
+
+#include <poll.h>
+
+#if defined(AIX_RENAME_SELECT)
+int wrap_poll(void *listptr, unsigned long nfds, long timeout)
+#elif (defined(AIX) && !defined(AIX_RENAME_SELECT))
+int poll(void *listptr, unsigned long nfds, long timeout)
+#elif defined(OSF1) || (defined(HPUX) && !defined(HPUX9))
+int poll(struct pollfd filedes[], unsigned int nfds, int timeout)
+#elif defined(HPUX9)
+int poll(struct pollfd filedes[], int nfds, int timeout)
+#elif defined(NETBSD)
+int poll(struct pollfd *filedes, nfds_t nfds, int timeout)
+#elif defined(OPENBSD)
+int poll(struct pollfd filedes[], nfds_t nfds, int timeout)
+#elif defined(FREEBSD)
+int poll(struct pollfd *filedes, unsigned nfds, int timeout)
+#else
+int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
+#endif
+{
+#ifdef AIX
+ struct pollfd *filedes = (struct pollfd *) listptr;
+#endif
+ struct pollfd *pfd, *epfd;
+ _PRUnixPollDesc *unixpds, *unixpd, *eunixpd;
+ PRIntervalTime ticks;
+ PRInt32 pdcnt;
+ int ready;
+
+ /*
+ * Easy special case: zero timeout. Simply call the native
+ * poll() with no fear of blocking.
+ */
+ if (timeout == 0) {
+#if defined(AIX)
+ return _MD_POLL(listptr, nfds, timeout);
+#else
+ return _MD_POLL(filedes, nfds, timeout);
+#endif
+ }
+
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
+
+#ifndef _PR_LOCAL_THREADS_ONLY
+ if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
+ return _MD_POLL(filedes, nfds, timeout);
+ }
+#endif
+
+ /* We do not support the pollmsg structures on AIX */
+#ifdef AIX
+ PR_ASSERT((nfds & 0xff00) == 0);
+#endif
+
+ if (timeout < 0 && timeout != -1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Convert timeout from miliseconds to ticks */
+ if (timeout == -1) {
+ ticks = PR_INTERVAL_NO_TIMEOUT;
+ } else {
+ ticks = PR_MillisecondsToInterval(timeout);
+ }
+
+ /* Check for no descriptor case (just do a timeout) */
+ if (nfds == 0) {
+ PR_Sleep(ticks);
+ return 0;
+ }
+
+ unixpds = (_PRUnixPollDesc *)
+ PR_MALLOC(nfds * sizeof(_PRUnixPollDesc));
+ if (NULL == unixpds) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ pdcnt = 0;
+ epfd = filedes + nfds;
+ unixpd = unixpds;
+ for (pfd = filedes; pfd < epfd; pfd++) {
+ /*
+ * poll() ignores negative fd's.
+ */
+ if (pfd->fd >= 0) {
+ unixpd->osfd = pfd->fd;
+#ifdef _PR_USE_POLL
+ unixpd->in_flags = pfd->events;
+#else
+ /*
+ * Map the poll events to one of the three that can be
+ * represented by the select fd_sets:
+ * POLLIN, POLLRDNORM ===> readable
+ * POLLOUT, POLLWRNORM ===> writable
+ * POLLPRI, POLLRDBAND ===> exception
+ * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
+ * are ignored.
+ *
+ * The output events POLLERR and POLLHUP are never turned on.
+ * POLLNVAL may be turned on.
+ */
+ unixpd->in_flags = 0;
+ if (pfd->events & (POLLIN
+#ifdef POLLRDNORM
+ | POLLRDNORM
+#endif
+ )) {
+ unixpd->in_flags |= _PR_UNIX_POLL_READ;
+ }
+ if (pfd->events & (POLLOUT
+#ifdef POLLWRNORM
+ | POLLWRNORM
+#endif
+ )) {
+ unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
+ }
+ if (pfd->events & (POLLPRI
+#ifdef POLLRDBAND
+ | POLLRDBAND
+#endif
+ )) {
+ unixpd->in_flags |= PR_POLL_EXCEPT;
+ }
+#endif /* _PR_USE_POLL */
+ unixpd->out_flags = 0;
+ unixpd++;
+ pdcnt++;
+ }
+ }
+
+ ready = _PR_WaitForMultipleFDs(unixpds, pdcnt, ticks);
+ if (-1 == ready) {
+ if (PR_GetError() == PR_PENDING_INTERRUPT_ERROR) {
+ errno = EINTR; /* XXX we aren't interrupted by a signal, but... */
+ } else {
+ errno = PR_GetOSError();
+ }
+ }
+ if (ready <= 0) {
+ goto done;
+ }
+
+ /*
+ * Copy the out_flags from the _PRUnixPollDesc structures to the
+ * user's pollfd structures and free the allocated memory
+ */
+ unixpd = unixpds;
+ for (pfd = filedes; pfd < epfd; pfd++) {
+ pfd->revents = 0;
+ if (pfd->fd >= 0) {
+#ifdef _PR_USE_POLL
+ pfd->revents = unixpd->out_flags;
+#else
+ if (0 != unixpd->out_flags) {
+ if (unixpd->out_flags & _PR_UNIX_POLL_READ) {
+ if (pfd->events & POLLIN) {
+ pfd->revents |= POLLIN;
+ }
+#ifdef POLLRDNORM
+ if (pfd->events & POLLRDNORM) {
+ pfd->revents |= POLLRDNORM;
+ }
+#endif
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_WRITE) {
+ if (pfd->events & POLLOUT) {
+ pfd->revents |= POLLOUT;
+ }
+#ifdef POLLWRNORM
+ if (pfd->events & POLLWRNORM) {
+ pfd->revents |= POLLWRNORM;
+ }
+#endif
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_EXCEPT) {
+ if (pfd->events & POLLPRI) {
+ pfd->revents |= POLLPRI;
+ }
+#ifdef POLLRDBAND
+ if (pfd->events & POLLRDBAND) {
+ pfd->revents |= POLLRDBAND;
+ }
+#endif
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_ERR) {
+ pfd->revents |= POLLERR;
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) {
+ pfd->revents |= POLLNVAL;
+ }
+ if (unixpd->out_flags & _PR_UNIX_POLL_HUP) {
+ pfd->revents |= POLLHUP;
+ }
+ }
+#endif /* _PR_USE_POLL */
+ unixpd++;
+ }
+ }
+
+done:
+ PR_DELETE(unixpds);
+ return ready;
+}
+
+#endif /* !defined(LINUX) */
+
+#endif /* defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY) */
+
+/* uxwrap.c */
+