summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/gecko_templates.mozbuild5
-rw-r--r--ipc/chromium/src/third_party/libevent/evutil_rand.c9
-rw-r--r--ipc/chromium/src/third_party/libevent/include/event2/util.h18
-rw-r--r--memory/mozjemalloc/jemalloc.c336
-rw-r--r--python/psutil/psutil/_psutil_bsd.c85
-rw-r--r--python/psutil/psutil/_psutil_bsd.h4
6 files changed, 375 insertions, 82 deletions
diff --git a/build/gecko_templates.mozbuild b/build/gecko_templates.mozbuild
index e2bc999b7..a15fdd77f 100644
--- a/build/gecko_templates.mozbuild
+++ b/build/gecko_templates.mozbuild
@@ -63,6 +63,11 @@ def GeckoBinary(linkage='dependent', msvcrt='dynamic', mozglue=None):
LDFLAGS += ['-rdynamic']
if CONFIG['MOZ_MEMORY']:
USE_LIBS += ['memory']
+ if CONFIG['OS_ARCH'] == 'FreeBSD':
+ # Make sure this function is linked in, so that it is
+ # executed at executable load (it has the 'constructor'
+ # flag).
+ LDFLAGS += ['-u', 'jemalloc_FreeBSD_init']
elif mozglue == 'library':
LIBRARY_DEFINES['MOZ_HAS_MOZGLUE'] = True
if not CONFIG['MOZ_GLUE_IN_PROGRAM']:
diff --git a/ipc/chromium/src/third_party/libevent/evutil_rand.c b/ipc/chromium/src/third_party/libevent/evutil_rand.c
index 7c92bae23..3f5c05b34 100644
--- a/ipc/chromium/src/third_party/libevent/evutil_rand.c
+++ b/ipc/chromium/src/third_party/libevent/evutil_rand.c
@@ -138,12 +138,3 @@ evutil_secure_rng_get_bytes(void *buf, size_t n)
{
ev_arc4random_buf(buf, n);
}
-
-#if !defined(__OpenBSD__) && !defined(ANDROID) && !defined(__sun__)
-void
-evutil_secure_rng_add_bytes(const char *buf, size_t n)
-{
- arc4random_addrandom((unsigned char*)buf,
- n>(size_t)INT_MAX ? INT_MAX : (int)n);
-}
-#endif
diff --git a/ipc/chromium/src/third_party/libevent/include/event2/util.h b/ipc/chromium/src/third_party/libevent/include/event2/util.h
index 78516c156..0f9212af1 100644
--- a/ipc/chromium/src/third_party/libevent/include/event2/util.h
+++ b/ipc/chromium/src/third_party/libevent/include/event2/util.h
@@ -672,24 +672,6 @@ void evutil_secure_rng_get_bytes(void *buf, size_t n);
*/
int evutil_secure_rng_init(void);
-#if !defined(__OpenBSD__) && !defined(ANDROID) && !defined(__sun__)
-/** Seed the random number generator with extra random bytes.
-
- You should almost never need to call this function; it should be
- sufficient to invoke evutil_secure_rng_init(), or let Libevent take
- care of calling evutil_secure_rng_init() on its own.
-
- If you call this function as a _replacement_ for the regular
- entropy sources, then you need to be sure that your input
- contains a fairly large amount of strong entropy. Doing so is
- notoriously hard: most people who try get it wrong. Watch out!
-
- @param dat a buffer full of a strong source of random numbers
- @param datlen the number of bytes to read from datlen
- */
-void evutil_secure_rng_add_bytes(const char *dat, size_t datlen);
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/memory/mozjemalloc/jemalloc.c b/memory/mozjemalloc/jemalloc.c
index 0eb5241c7..bbd68365b 100644
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -174,6 +174,9 @@
#endif
#include <sys/types.h>
+#ifdef MOZ_MEMORY_BSD
+#include <sys/sysctl.h>
+#endif
#include <errno.h>
#include <stdlib.h>
@@ -486,10 +489,10 @@ typedef pthread_mutex_t malloc_spinlock_t;
#endif
/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
+static volatile bool malloc_initialized = false;
-#if defined(MOZ_MEMORY_WINDOWS)
-/* No init lock for Windows. */
+#if defined(MOZ_MEMORY_WINDOWS) || defined(__FreeBSD__)
+/* No init lock for Windows nor FreeBSD. */
#elif defined(MOZ_MEMORY_DARWIN)
static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
#elif defined(MOZ_MEMORY_LINUX)
@@ -1385,6 +1388,11 @@ void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
* cases.
*/
+#ifdef __FreeBSD__
+// If true, memory calls must be diverted to the bootstrap allocator
+static __thread bool in_mutex_init = false;
+#endif
+
static bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
@@ -1402,6 +1410,19 @@ malloc_mutex_init(malloc_mutex_t *mutex)
return (true);
}
pthread_mutexattr_destroy(&attr);
+#elif defined(__FreeBSD__)
+ in_mutex_init = true;
+
+ *mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
+
+ // Make sure necessary mutex memory is allocated right now, with
+ // 'in_mutex_init' set to true (allocations to be diverted to the
+ // bootstrap allocator). Also force multi-thread initialization in
+ // libthr (checked and performed in 'pthread_mutex_lock').
+ pthread_mutex_lock(mutex);
+ pthread_mutex_unlock(mutex);
+
+ in_mutex_init = false;
#else
if (pthread_mutex_init(mutex, NULL) != 0)
return (true);
@@ -1455,6 +1476,8 @@ malloc_spin_init(malloc_spinlock_t *lock)
return (true);
}
pthread_mutexattr_destroy(&attr);
+#elif defined(__FreeBSD__)
+ malloc_lock_init(lock);
#else
if (pthread_mutex_init(lock, NULL) != 0)
return (true);
@@ -4739,25 +4762,7 @@ huge_dalloc(void *ptr)
* Platform-specific methods to determine the number of CPUs in a system.
* This will be used to determine the desired number of arenas.
*/
-#ifdef MOZ_MEMORY_BSD
-static inline unsigned
-malloc_ncpus(void)
-{
- unsigned ret;
- int mib[2];
- size_t len;
-
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
- len = sizeof(ret);
- if (sysctl(mib, 2, &ret, &len, (void *) 0, 0) == -1) {
- /* Error. */
- return (1);
- }
-
- return (ret);
-}
-#elif (defined(MOZ_MEMORY_LINUX))
+#if (defined(MOZ_MEMORY_LINUX))
#include <fcntl.h>
static inline unsigned
@@ -4829,8 +4834,7 @@ malloc_ncpus(void)
else
return (n);
}
-#elif (defined(MOZ_MEMORY_SOLARIS))
-
+#elif (defined(MOZ_MEMORY_SOLARIS) || defined(MOZ_MEMORY_BSD))
static inline unsigned
malloc_ncpus(void)
{
@@ -4975,18 +4979,13 @@ malloc_print_stats(void)
}
}
-/*
- * FreeBSD's pthreads implementation calls malloc(3), so the malloc
- * implementation has to take pains to avoid infinite recursion during
- * initialization.
- */
+
#if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN))
#define malloc_init() false
#else
static inline bool
malloc_init(void)
{
-
if (malloc_initialized == false)
return (malloc_init_hard());
@@ -4994,6 +4993,238 @@ malloc_init(void)
}
#endif
+
+#ifdef __FreeBSD__
+// There are several problematic interactions between FreeBSD's libthr and this
+// jemalloc.
+//
+// 1. This malloc calls pthread_mutex_init at init, but in libthr this triggers
+// an allocation, causing an infinite recursion.
+// 2. Actually, this malloc assumes that lock initialization never triggers a
+// memory allocation, even after initialization (see 'arena_new').
+// 3. First use of a lock routine ('pthread_mutex_lock') in libthr triggers
+// initialization of the process as a multi-threaded process. Unfortunately,
+// libthr calls regular malloc as part of this bootstrap process.
+//
+// If there was no problem 3, we could have resolved this easily by using
+// constant mutex initializers, since then libthr's uses its own internal
+// allocator instead of regular malloc (this appears to have been the case for
+// years now). However, problem 3 requires this malloc to provide some memory
+// at places where it is not able to, so we need a way to divert standard
+// allocator functions to some simple bootstrap allocator. And once we have
+// done this, using constant mutex initializers looses most of its appeal,
+// because allocations for problems 1 & 2 can be fulfilled by the simple
+// allocator as well, without the drawback of being dependent on libthr's
+// specific behavior.
+//
+// Since the init lock controls the 'malloc_initialized' flag, it is not
+// possible to reliably check whether jemalloc is initialized in the case of
+// multiple threads with the given tools (pthread cannot be used yet, but
+// mutual exclusion is required). One solution would be to code simple
+// user-space locks for this (e.g., spinlocks using GCC's builtins). But an
+// even "simpler" solution is in fact to just remove the lock, on the ground
+// that there must be some memory allocation before multithreading is enabled,
+// so jemalloc is in fact always initialized before that point. And if there
+// is not, we'll provoke it.
+//
+// At some point, I implemented a solution using __constructor__, as
+// 'jemalloc_darwin_init', and tweaked the build so that it is included in
+// executables (in platform/build/gecko_templates.mozbuild). But this was not
+// enough: Clearly it could happen that some other library would be initialized
+// before jemalloc, calling malloc in its contructor. Could have tried to work
+// around this with constructor priorities, but this seemed fragile as well. So
+// in the end, I kept the calls to 'malloc_init' from the interface's
+// functions, and had to introduce 'malloc_initializing' to know when (part of
+// the) calls should be diverted. I finally kept the constructor as well, just
+// to absolutely guarantee that jemalloc is initialized during executable load,
+// that is to say, before multi-threading happens, in case initialization in
+// libthr or glib is removed at some point. It just doesn't call
+// 'malloc_init_hard', contrary to Darwin's, but 'malloc_init' (because
+// jemalloc normally has already been initialized at this point).
+//
+// During lock initialization, malloc is temporarily diverted to the bootstrap
+// allocator to avoid harmful recursion. This is achieved using a flag
+// indicating whether lock initialization is under way (in order to work also
+// after malloc_init_hard() has completed). The flag *must* be per-thread,
+// because creation of new arenas, which causes creation of new locks, can
+// happen at unpredictable moments after multi-threading has been enabled (and
+// malloc has been initialized), which means concurrent allocation requests can
+// occur, and must not all be diverted. With this flag in place, and an
+// additional change to ensure that libthr's multi-thread init is indeed done
+// during mutex init (through 'pthread_lock_mutex'), there was no need to keep
+// the 'malloc_initializing' flag (see previous paragraph).
+//
+// The most likely change this whole architecture is not immune to would be if
+// jemalloc starts initializing new locks after malloc_init_hard() has finished
+// but not under an existing lock (new arena's lock is currently initialized
+// under the arenas lock), because bootstrap allocator functions are not
+// thread-safe per se. If this happens, then a very simple spinlock
+// implementation on top of GCC's atomics will be in order. But I don't think
+// this is very likely to happen.
+
+// Diverts key (de)allocation functions when jemalloc's mutexes are
+// initializing (malloc_init_hard(), but also arena_new() and
+// malloc_rtree_new(), as of this writing).
+#define BA_DIVERT(code) \
+ do { \
+ if (in_mutex_init) { \
+ code; \
+ } \
+ } while (0)
+
+
+// Bootstrap allocator
+//
+// It is not FreeBSD-specific, and could be used by any POSIX-compliant
+// platform if needed.
+//
+// Allocates one page at a time (relies on 'pagesize' as defined above in this
+// file), and returns memory from it. Does not accept allocations larger than a
+// single page (minus alignment). Will waste space at end of pages. Never frees
+// memory.
+//
+// All these constraints are not a problem, since this allocator is meant to
+// serve only some requests at initialization (no more than a few kB).
+
+// Number of really allocated bytes
+static size_t ba_allocated_bn = 0;
+
+// Number of requested bytes
+static size_t ba_requested_bn = 0;
+
+// Current address we are allocating from, or NULL if a new page has to be
+// allocated.
+static void *ba_cur_free = NULL;
+
+
+static void ba_alloc_new_page()
+{
+ ba_cur_free = mmap(NULL, pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ if (ba_cur_free == MAP_FAILED)
+ abort();
+
+ ba_allocated_bn += pagesize;
+}
+
+// Returns the offset to point to have point a multiple of alignment
+static size_t
+ba_offset_to_aligned(uintptr_t point, size_t alignment) {
+ if (alignment != 0) {
+ size_t rest = point % alignment;
+
+ if (rest != 0)
+ return alignment - rest;
+ }
+
+ return 0;
+}
+
+static void * ba_memalign(size_t alignment, size_t size)
+{
+ // We don't care about alignment being a power of 2, nor pagesize. Code
+ // below supports everything, provided that alignment divides the page
+ // size.
+
+ // Impose cache-line size minimum alignment, so that there is no cache
+ // trashing between fundamental structures.
+ if (alignment < CACHELINE)
+ alignment = CACHELINE;
+
+ if (size > pagesize ||
+ alignment > pagesize ||
+ size + alignment > pagesize ||
+ pagesize % alignment != 0)
+ abort();
+
+ // Address to be returned
+ uintptr_t cur_free;
+
+ // Allocate a new page if no current page (startup or previous one was
+ // exhausted) or there is not enough remaining space in it.
+
+ if (ba_cur_free == NULL) {
+ // No current page
+ ba_alloc_new_page();
+ cur_free = (uintptr_t)ba_cur_free;
+ } else {
+ cur_free = (uintptr_t)ba_cur_free;
+
+ uintptr_t off = cur_free % pagesize;
+ uintptr_t al_off = ba_offset_to_aligned(off, alignment);
+
+ if (off + al_off + size > pagesize) {
+ // Not enough room. Need a new page.
+ ba_alloc_new_page();
+ cur_free = (uintptr_t)ba_cur_free;
+ } else
+ // Account for alignment
+ cur_free += al_off;
+ }
+
+ // Compute the next free address
+ uintptr_t next_free = cur_free + size;
+ if (next_free % pagesize == 0 && size != 0)
+ next_free = 0;
+
+ // Set it
+ ba_cur_free = (void *)next_free;
+
+ // Stats
+ ba_requested_bn += size;
+
+ // Done
+ return (void *)cur_free;
+}
+
+static void * ba_malloc(size_t size)
+{
+ // 64-bit alignment by default. ba_memalign imposes an even greater
+ // alignment anyway.
+ return ba_memalign(8, size);
+}
+
+static void * ba_calloc(size_t number, size_t size)
+{
+ size_t const bn = number * size;
+
+ if ((bn < number || bn < size) && bn != 0)
+ // Overflow
+ abort();
+
+ void * const res = ba_malloc(bn);
+ memset(res, 0, bn);
+ return res;
+}
+
+static void ba_free(void * ptr) {
+#ifdef MALLOC_DEBUG
+ malloc_printf("Bootstrap allocator: Request to free at %p\n", ptr);
+#endif
+
+ // Do nothing
+ return;
+}
+
+#ifdef MALLOC_STATS
+static void ba_print_stats() {
+ malloc_printf("Bootstrap allocator: %zu bytes requested, "
+ "%zu allocated\n",
+ ba_requested_bn, ba_allocated_bn);
+}
+#endif
+
+
+__attribute__((constructor))
+void
+jemalloc_FreeBSD_init(void)
+{
+ if (malloc_init())
+ abort();
+}
+#endif // #ifdef __FreeBSD__
+
+
#if !defined(MOZ_MEMORY_WINDOWS)
static
#endif
@@ -5011,7 +5242,7 @@ malloc_init_hard(void)
malloc_zone_t* default_zone;
#endif
-#ifndef MOZ_MEMORY_WINDOWS
+#if !(defined(MOZ_MEMORY_WINDOWS) || defined(__FreeBSD__))
malloc_mutex_lock(&init_lock);
#endif
@@ -5409,7 +5640,7 @@ MALLOC_OUT:
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
if (arenas == NULL) {
-#ifndef MOZ_MEMORY_WINDOWS
+#if !(defined(MOZ_MEMORY_WINDOWS) || defined(__FreeBSD__))
malloc_mutex_unlock(&init_lock);
#endif
return (true);
@@ -5426,7 +5657,7 @@ MALLOC_OUT:
*/
arenas_extend(0);
if (arenas[0] == NULL) {
-#ifndef MOZ_MEMORY_WINDOWS
+#if !(defined(MOZ_MEMORY_WINDOWS) || defined(__FreeBSD__))
malloc_mutex_unlock(&init_lock);
#endif
return (true);
@@ -5499,9 +5730,15 @@ MALLOC_OUT:
}
#endif
-#ifndef MOZ_MEMORY_WINDOWS
+#if defined(__FreeBSD__) && defined(MALLOC_STATS)
+ malloc_printf("Bootstrap allocator: malloc_init_hard stats:\n");
+ ba_print_stats();
+#endif
+
+#if !(defined(MOZ_MEMORY_WINDOWS) || defined(__FreeBSD__))
malloc_mutex_unlock(&init_lock);
#endif
+
return (false);
}
@@ -5542,13 +5779,22 @@ malloc_shutdown()
#define DARWIN_ONLY(A)
#endif
+#ifdef __FreeBSD__
+#define FREEBSD_ONLY(code) code
+#else
+#define FREEBSD_ONLY(code)
+#endif
+
+
MOZ_MEMORY_API void *
malloc_impl(size_t size)
{
- void *ret;
-
DARWIN_ONLY(return (szone->malloc)(szone, size));
+ FREEBSD_ONLY(BA_DIVERT(return ba_malloc(size)));
+
+ void *ret;
+
if (malloc_init()) {
ret = NULL;
goto RETURN;
@@ -5624,10 +5870,12 @@ MOZ_MEMORY_API
void *
MEMALIGN(size_t alignment, size_t size)
{
- void *ret;
-
DARWIN_ONLY(return (szone->memalign)(szone, alignment, size));
+ FREEBSD_ONLY(BA_DIVERT(return ba_memalign(alignment, size)));
+
+ void *ret;
+
assert(((alignment - 1) & alignment) == 0);
if (malloc_init()) {
@@ -5722,11 +5970,13 @@ valloc_impl(size_t size)
MOZ_MEMORY_API void *
calloc_impl(size_t num, size_t size)
{
+ DARWIN_ONLY(return (szone->calloc)(szone, num, size));
+
+ FREEBSD_ONLY(BA_DIVERT(return ba_calloc(num, size)));
+
void *ret;
size_t num_size;
- DARWIN_ONLY(return (szone->calloc)(szone, num, size));
-
if (malloc_init()) {
num_size = 0;
ret = NULL;
@@ -5841,10 +6091,12 @@ RETURN:
MOZ_MEMORY_API void
free_impl(void *ptr)
{
- size_t offset;
-
DARWIN_ONLY((szone->free)(szone, ptr); return);
+ FREEBSD_ONLY(BA_DIVERT(return ba_free(ptr)));
+
+ size_t offset;
+
/*
* A version of idalloc that checks for NULL pointer but only for
* huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
diff --git a/python/psutil/psutil/_psutil_bsd.c b/python/psutil/psutil/_psutil_bsd.c
index 7b6e56173..db4bc2d7a 100644
--- a/python/psutil/psutil/_psutil_bsd.c
+++ b/python/psutil/psutil/_psutil_bsd.c
@@ -38,7 +38,7 @@
#include <netinet/tcp_fsm.h> // for TCP connection states
#include <arpa/inet.h> // for inet_ntop()
-#if __FreeBSD_version < 900000
+#if !defined(__FreeBSD_version)
#include <utmp.h> // system users
#else
#include <utmpx.h>
@@ -601,11 +601,7 @@ psutil_virtual_mem(PyObject *self, PyObject *args)
struct vmtotal vm;
int mib[] = {CTL_VM, VM_METER};
long pagesize = getpagesize();
-#if __FreeBSD_version > 702101
long buffers;
-#else
- int buffers;
-#endif
size_t buffers_size = sizeof(buffers);
if (sysctlbyname("vm.stats.vm.v_page_count", &total, &size, NULL, 0))
@@ -724,13 +720,13 @@ psutil_cpu_times(PyObject *self, PyObject *args)
/*
* XXX
- * These functions are available on FreeBSD 8 only.
+ * These functions were seen available on FreeBSD only.
* In the upper python layer we do various tricks to avoid crashing
* and/or to provide alternatives where possible.
*/
-#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+#if defined(__FreeBSD_version)
/*
* Return files opened by process as a list of (path, fd) tuples.
* TODO: this is broken as it may report empty paths. 'procstat'
@@ -943,11 +939,19 @@ psutil_sockaddr_matches(int family, int port, void *pcb_addr,
psutil_sockaddr_addrlen(family)) == 0);
}
+#if __FreeBSD_version >= 1200026
+static struct xtcpcb *
+psutil_search_tcplist(char *buf, struct kinfo_file *kif)
+{
+ struct xtcpcb *tp;
+ struct xinpcb *inp;
+#else
static struct tcpcb *
psutil_search_tcplist(char *buf, struct kinfo_file *kif)
{
struct tcpcb *tp;
struct inpcb *inp;
+#endif
struct xinpgen *xig, *oxig;
struct xsocket *so;
@@ -955,9 +959,15 @@ psutil_search_tcplist(char *buf, struct kinfo_file *kif)
for (xig = (struct xinpgen *)((char *)xig + xig->xig_len);
xig->xig_len > sizeof(struct xinpgen);
xig = (struct xinpgen *)((char *)xig + xig->xig_len)) {
+#if __FreeBSD_version >= 1200026
+ tp = (struct xtcpcb *)xig;
+ inp = &tp->xt_inp;
+ so = &inp->xi_socket;
+#else
tp = &((struct xtcpcb *)xig)->xt_tp;
inp = &((struct xtcpcb *)xig)->xt_inp;
so = &((struct xtcpcb *)xig)->xt_socket;
+#endif
if (so->so_type != kif->kf_sock_type ||
so->xso_family != kif->kf_sock_domain ||
@@ -967,20 +977,36 @@ psutil_search_tcplist(char *buf, struct kinfo_file *kif)
if (kif->kf_sock_domain == AF_INET) {
if (!psutil_sockaddr_matches(
AF_INET, inp->inp_lport, &inp->inp_laddr,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_local))
+#else
+ &kif->kf_un.kf_sock.kf_sa_local))
+#endif
continue;
if (!psutil_sockaddr_matches(
AF_INET, inp->inp_fport, &inp->inp_faddr,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_peer))
+#else
+ &kif->kf_un.kf_sock.kf_sa_peer))
+#endif
continue;
} else {
if (!psutil_sockaddr_matches(
AF_INET6, inp->inp_lport, &inp->in6p_laddr,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_local))
+#else
+ &kif->kf_un.kf_sock.kf_sa_local))
+#endif
continue;
if (!psutil_sockaddr_matches(
AF_INET6, inp->inp_fport, &inp->in6p_faddr,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_peer))
+#else
+ &kif->kf_un.kf_sock.kf_sa_peer))
+#endif
continue;
}
@@ -1005,7 +1031,11 @@ psutil_proc_connections(PyObject *self, PyObject *args)
struct kinfo_file *freep = NULL;
struct kinfo_file *kif;
char *tcplist = NULL;
+#if __FreeBSD_version >= 1200026
+ struct xtcpcb *tcp;
+#else
struct tcpcb *tcp;
+#endif
PyObject *retList = PyList_New(0);
PyObject *tuple = NULL;
@@ -1074,19 +1104,35 @@ psutil_proc_connections(PyObject *self, PyObject *args)
inet_ntop(
kif->kf_sock_domain,
psutil_sockaddr_addr(kif->kf_sock_domain,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_local),
+#else
+ &kif->kf_un.kf_sock.kf_sa_local),
+#endif
lip,
sizeof(lip));
inet_ntop(
kif->kf_sock_domain,
psutil_sockaddr_addr(kif->kf_sock_domain,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_peer),
+#else
+ &kif->kf_un.kf_sock.kf_sa_peer),
+#endif
rip,
sizeof(rip));
lport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_local));
+#else
+ &kif->kf_un.kf_sock.kf_sa_local));
+#endif
rport = htons(psutil_sockaddr_port(kif->kf_sock_domain,
+#if __FreeBSD_version < 1200031
&kif->kf_sa_peer));
+#else
+ &kif->kf_un.kf_sock.kf_sa_peer));
+#endif
// construct python tuple/list
laddr = Py_BuildValue("(si)", lip, lport);
@@ -1115,7 +1161,11 @@ psutil_proc_connections(PyObject *self, PyObject *args)
else if (kif->kf_sock_domain == AF_UNIX) {
struct sockaddr_un *sun;
+#if __FreeBSD_version < 1200031
sun = (struct sockaddr_un *)&kif->kf_sa_local;
+#else
+ sun = (struct sockaddr_un *)&kif->kf_un.kf_sock.kf_sa_local;
+#endif
snprintf(
path, sizeof(path), "%.*s",
(int)(sun->sun_len - (sizeof(*sun) - sizeof(sun->sun_path))),
@@ -1626,7 +1676,7 @@ psutil_users(PyObject *self, PyObject *args)
if (ret_list == NULL)
return NULL;
-#if __FreeBSD_version < 900000
+#if !defined(__FreeBSD_version)
struct utmp ut;
FILE *fp;
@@ -1736,7 +1786,7 @@ psutil_get_pid_from_sock(int sock_hash)
struct xfile *xf;
int hash, n;
for (xf = psutil_xfiles, n = 0; n < psutil_nxfiles; ++n, ++xf) {
- if (xf->xf_data == NULL)
+ if (xf->xf_data == 0)
continue;
hash = (int)((uintptr_t)xf->xf_data % HASHSIZE);
if (sock_hash == hash)
@@ -1754,7 +1804,11 @@ int psutil_gather_inet(int proto, PyObject *py_retlist)
struct xinpgen *xig, *exig;
struct xinpcb *xip;
struct xtcpcb *xtp;
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200026
+ struct xinpcb *inp;
+#else
struct inpcb *inp;
+#endif
struct xsocket *so;
const char *varname = NULL;
size_t len, bufsize;
@@ -1820,8 +1874,13 @@ int psutil_gather_inet(int proto, PyObject *py_retlist)
goto error;
}
inp = &xtp->xt_inp;
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200026
+ so = &inp->xi_socket;
+ status = xtp->t_state;
+#else
so = &xtp->xt_socket;
status = xtp->xt_tp.t_state;
+#endif
break;
case IPPROTO_UDP:
xip = (struct xinpcb *)xig;
@@ -1830,7 +1889,11 @@ int psutil_gather_inet(int proto, PyObject *py_retlist)
"struct xinpcb size mismatch");
goto error;
}
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200026
+ inp = xip;
+#else
inp = &xip->xi_inp;
+#endif
so = &xip->xi_socket;
status = PSUTIL_CONN_NONE;
break;
@@ -2166,7 +2229,7 @@ PsutilMethods[] =
"Return process CPU affinity."},
{"proc_cpu_affinity_set", psutil_proc_cpu_affinity_set, METH_VARARGS,
"Set process CPU affinity."},
-#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+#if defined(__FreeBSD_version)
{"proc_open_files", psutil_proc_open_files, METH_VARARGS,
"Return files opened by process as a list of (path, fd) tuples"},
{"proc_cwd", psutil_proc_cwd, METH_VARARGS,
@@ -2191,7 +2254,7 @@ PsutilMethods[] =
"Return swap mem stats"},
{"cpu_times", psutil_cpu_times, METH_VARARGS,
"Return system cpu times as a tuple (user, system, nice, idle, irc)"},
-#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+#if defined(__FreeBSD_version)
{"per_cpu_times", psutil_per_cpu_times, METH_VARARGS,
"Return system per-cpu times as a list of tuples"},
#endif
diff --git a/python/psutil/psutil/_psutil_bsd.h b/python/psutil/psutil/_psutil_bsd.h
index 803957dac..dc4c77685 100644
--- a/python/psutil/psutil/_psutil_bsd.h
+++ b/python/psutil/psutil/_psutil_bsd.h
@@ -29,7 +29,7 @@ static PyObject* psutil_proc_uids(PyObject* self, PyObject* args);
static PyObject* psutil_proc_cpu_affinity_get(PyObject* self, PyObject* args);
static PyObject* psutil_proc_cpu_affinity_set(PyObject* self, PyObject* args);
-#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+#if defined(__FreeBSD_version)
static PyObject* psutil_proc_open_files(PyObject* self, PyObject* args);
static PyObject* psutil_proc_cwd(PyObject* self, PyObject* args);
#endif
@@ -48,6 +48,6 @@ static PyObject* psutil_swap_mem(PyObject* self, PyObject* args);
static PyObject* psutil_users(PyObject* self, PyObject* args);
static PyObject* psutil_virtual_mem(PyObject* self, PyObject* args);
-#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000
+#if defined(__FreeBSD_version)
static PyObject* psutil_per_cpu_times(PyObject* self, PyObject* args);
#endif