summaryrefslogtreecommitdiffstats
path: root/modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch
diff options
context:
space:
mode:
Diffstat (limited to 'modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch')
-rw-r--r--modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch130
1 files changed, 130 insertions, 0 deletions
diff --git a/modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch b/modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch
new file mode 100644
index 000000000..6d1baa23a
--- /dev/null
+++ b/modules/fdlibm/patches/19_remove_unneeded_round_to_integer_helpers.patch
@@ -0,0 +1,130 @@
+diff --git a/modules/fdlibm/src/math_private.h b/modules/fdlibm/src/math_private.h
+--- a/modules/fdlibm/src/math_private.h
++++ b/modules/fdlibm/src/math_private.h
+@@ -586,126 +586,16 @@ CMPLXL(long double x, long double y)
+ REALPART(z) = x;
+ IMAGPART(z) = y;
+ return (z.f);
+ }
+ #endif
+
+ #endif /* _COMPLEX_H */
+
+-/*
+- * The rnint() family rounds to the nearest integer for a restricted range
+- * range of args (up to about 2**MANT_DIG). We assume that the current
+- * rounding mode is FE_TONEAREST so that this can be done efficiently.
+- * Extra precision causes more problems in practice, and we only centralize
+- * this here to reduce those problems, and have not solved the efficiency
+- * problems. The exp2() family uses a more delicate version of this that
+- * requires extracting bits from the intermediate value, so it is not
+- * centralized here and should copy any solution of the efficiency problems.
+- */
+-
+-static inline double
+-rnint(__double_t x)
+-{
+- /*
+- * This casts to double to kill any extra precision. This depends
+- * on the cast being applied to a double_t to avoid compiler bugs
+- * (this is a cleaner version of STRICT_ASSIGN()). This is
+- * inefficient if there actually is extra precision, but is hard
+- * to improve on. We use double_t in the API to minimise conversions
+- * for just calling here. Note that we cannot easily change the
+- * magic number to the one that works directly with double_t, since
+- * the rounding precision is variable at runtime on x86 so the
+- * magic number would need to be variable. Assuming that the
+- * rounding precision is always the default is too fragile. This
+- * and many other complications will move when the default is
+- * changed to FP_PE.
+- */
+- return ((double)(x + 0x1.8p52) - 0x1.8p52);
+-}
+-
+-static inline float
+-rnintf(__float_t x)
+-{
+- /*
+- * As for rnint(), except we could just call that to handle the
+- * extra precision case, usually without losing efficiency.
+- */
+- return ((float)(x + 0x1.8p23F) - 0x1.8p23F);
+-}
+-
+-#ifdef LDBL_MANT_DIG
+-/*
+- * The complications for extra precision are smaller for rnintl() since it
+- * can safely assume that the rounding precision has been increased from
+- * its default to FP_PE on x86. We don't exploit that here to get small
+- * optimizations from limiting the rangle to double. We just need it for
+- * the magic number to work with long doubles. ld128 callers should use
+- * rnint() instead of this if possible. ld80 callers should prefer
+- * rnintl() since for amd64 this avoids swapping the register set, while
+- * for i386 it makes no difference (assuming FP_PE), and for other arches
+- * it makes little difference.
+- */
+-static inline long double
+-rnintl(long double x)
+-{
+- return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 -
+- __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2);
+-}
+-#endif /* LDBL_MANT_DIG */
+-
+-/*
+- * irint() and i64rint() give the same result as casting to their integer
+- * return type provided their arg is a floating point integer. They can
+- * sometimes be more efficient because no rounding is required.
+- */
+-#if (defined(amd64) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
+-#define irint(x) \
+- (sizeof(x) == sizeof(float) && \
+- sizeof(__float_t) == sizeof(long double) ? irintf(x) : \
+- sizeof(x) == sizeof(double) && \
+- sizeof(__double_t) == sizeof(long double) ? irintd(x) : \
+- sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
+-#else
+-#define irint(x) ((int)(x))
+-#endif
+-
+-#define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */
+-
+-#if defined(__i386__) && defined(__GNUCLIKE_ASM)
+-static __inline int
+-irintf(float x)
+-{
+- int n;
+-
+- __asm("fistl %0" : "=m" (n) : "t" (x));
+- return (n);
+-}
+-
+-static __inline int
+-irintd(double x)
+-{
+- int n;
+-
+- __asm("fistl %0" : "=m" (n) : "t" (x));
+- return (n);
+-}
+-#endif
+-
+-#if (defined(__amd64__) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
+-static __inline int
+-irintl(long double x)
+-{
+- int n;
+-
+- __asm("fistl %0" : "=m" (n) : "t" (x));
+- return (n);
+-}
+-#endif
+-
+ #ifdef DEBUG
+ #if defined(__amd64__) || defined(__i386__)
+ #define breakpoint() asm("int $3")
+ #else
+ #include <signal.h>
+
+ #define breakpoint() raise(SIGTRAP)
+ #endif