1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
diff --git a/modules/fdlibm/src/math_private.h b/modules/fdlibm/src/math_private.h
--- a/modules/fdlibm/src/math_private.h
+++ b/modules/fdlibm/src/math_private.h
@@ -586,126 +586,16 @@ CMPLXL(long double x, long double y)
REALPART(z) = x;
IMAGPART(z) = y;
return (z.f);
}
#endif
#endif /* _COMPLEX_H */
-/*
- * The rnint() family rounds to the nearest integer for a restricted range
- * range of args (up to about 2**MANT_DIG). We assume that the current
- * rounding mode is FE_TONEAREST so that this can be done efficiently.
- * Extra precision causes more problems in practice, and we only centralize
- * this here to reduce those problems, and have not solved the efficiency
- * problems. The exp2() family uses a more delicate version of this that
- * requires extracting bits from the intermediate value, so it is not
- * centralized here and should copy any solution of the efficiency problems.
- */
-
-static inline double
-rnint(__double_t x)
-{
- /*
- * This casts to double to kill any extra precision. This depends
- * on the cast being applied to a double_t to avoid compiler bugs
- * (this is a cleaner version of STRICT_ASSIGN()). This is
- * inefficient if there actually is extra precision, but is hard
- * to improve on. We use double_t in the API to minimise conversions
- * for just calling here. Note that we cannot easily change the
- * magic number to the one that works directly with double_t, since
- * the rounding precision is variable at runtime on x86 so the
- * magic number would need to be variable. Assuming that the
- * rounding precision is always the default is too fragile. This
- * and many other complications will move when the default is
- * changed to FP_PE.
- */
- return ((double)(x + 0x1.8p52) - 0x1.8p52);
-}
-
-static inline float
-rnintf(__float_t x)
-{
- /*
- * As for rnint(), except we could just call that to handle the
- * extra precision case, usually without losing efficiency.
- */
- return ((float)(x + 0x1.8p23F) - 0x1.8p23F);
-}
-
-#ifdef LDBL_MANT_DIG
-/*
- * The complications for extra precision are smaller for rnintl() since it
- * can safely assume that the rounding precision has been increased from
- * its default to FP_PE on x86. We don't exploit that here to get small
- * optimizations from limiting the rangle to double. We just need it for
- * the magic number to work with long doubles. ld128 callers should use
- * rnint() instead of this if possible. ld80 callers should prefer
- * rnintl() since for amd64 this avoids swapping the register set, while
- * for i386 it makes no difference (assuming FP_PE), and for other arches
- * it makes little difference.
- */
-static inline long double
-rnintl(long double x)
-{
- return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 -
- __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2);
-}
-#endif /* LDBL_MANT_DIG */
-
-/*
- * irint() and i64rint() give the same result as casting to their integer
- * return type provided their arg is a floating point integer. They can
- * sometimes be more efficient because no rounding is required.
- */
-#if (defined(amd64) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
-#define irint(x) \
- (sizeof(x) == sizeof(float) && \
- sizeof(__float_t) == sizeof(long double) ? irintf(x) : \
- sizeof(x) == sizeof(double) && \
- sizeof(__double_t) == sizeof(long double) ? irintd(x) : \
- sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
-#else
-#define irint(x) ((int)(x))
-#endif
-
-#define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */
-
-#if defined(__i386__) && defined(__GNUCLIKE_ASM)
-static __inline int
-irintf(float x)
-{
- int n;
-
- __asm("fistl %0" : "=m" (n) : "t" (x));
- return (n);
-}
-
-static __inline int
-irintd(double x)
-{
- int n;
-
- __asm("fistl %0" : "=m" (n) : "t" (x));
- return (n);
-}
-#endif
-
-#if (defined(__amd64__) || defined(__i386__)) && defined(__GNUCLIKE_ASM)
-static __inline int
-irintl(long double x)
-{
- int n;
-
- __asm("fistl %0" : "=m" (n) : "t" (x));
- return (n);
-}
-#endif
-
#ifdef DEBUG
#if defined(__amd64__) || defined(__i386__)
#define breakpoint() asm("int $3")
#else
#include <signal.h>
#define breakpoint() raise(SIGTRAP)
#endif
|