diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /memory/jemalloc/src/test | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'memory/jemalloc/src/test')
87 files changed, 14668 insertions, 0 deletions
diff --git a/memory/jemalloc/src/test/include/test/SFMT-alti.h b/memory/jemalloc/src/test/include/test/SFMT-alti.h new file mode 100644 index 000000000..0005df6b4 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-alti.h @@ -0,0 +1,186 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-alti.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * pseudorandom number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + */ + +#ifndef SFMT_ALTI_H +#define SFMT_ALTI_H + +/** + * This function represents the recursion formula in AltiVec and BIG ENDIAN. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @return output + */ +JEMALLOC_ALWAYS_INLINE +vector unsigned int vec_recursion(vector unsigned int a, + vector unsigned int b, + vector unsigned int c, + vector unsigned int d) { + + const vector unsigned int sl1 = ALTI_SL1; + const vector unsigned int sr1 = ALTI_SR1; +#ifdef ONLY64 + const vector unsigned int mask = ALTI_MSK64; + const vector unsigned char perm_sl = ALTI_SL2_PERM64; + const vector unsigned char perm_sr = ALTI_SR2_PERM64; +#else + const vector unsigned int mask = ALTI_MSK; + const vector unsigned char perm_sl = ALTI_SL2_PERM; + const vector unsigned char perm_sr = ALTI_SR2_PERM; +#endif + vector unsigned int v, w, x, y, z; + x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); + v = a; + y = vec_sr(b, sr1); + z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); + w = vec_sl(d, sl1); + z = vec_xor(z, w); + y = vec_and(y, mask); + v = vec_xor(v, x); + z = vec_xor(z, y); + z = vec_xor(z, v); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j].s = array[j + size - N].s; + } + for (; i < size; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + ctx->sfmt[j++].s = r; + r1 = r2; + r2 = r; + } +} + +#ifndef ONLY64 +#if defined(__APPLE__) +#define ALTI_SWAP (vector unsigned char) \ + (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) +#else +#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} +#endif +/** + * This function swaps high and low 32-bit of 64-bit integers in user + * specified array. + * + * @param array an 128-bit array to be swaped. + * @param size size of 128-bit array. + */ +JEMALLOC_INLINE void swap(w128_t *array, int size) { + int i; + const vector unsigned char perm = ALTI_SWAP; + + for (i = 0; i < size; i++) { + array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); + } +} +#endif + +#endif diff --git a/memory/jemalloc/src/test/include/test/SFMT-params.h b/memory/jemalloc/src/test/include/test/SFMT-params.h new file mode 100644 index 000000000..ade662220 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params.h @@ -0,0 +1,132 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS_H +#define SFMT_PARAMS_H + +#if !defined(MEXP) +#ifdef __GNUC__ + #warning "MEXP is not defined. I assume MEXP is 19937." +#endif + #define MEXP 19937 +#endif +/*----------------- + BASIC DEFINITIONS + -----------------*/ +/** Mersenne Exponent. The period of the sequence + * is a multiple of 2^MEXP-1. + * #define MEXP 19937 */ +/** SFMT generator has an internal state array of 128-bit integers, + * and N is its size. */ +#define N (MEXP / 128 + 1) +/** N32 is the size of internal state array when regarded as an array + * of 32-bit integers.*/ +#define N32 (N * 4) +/** N64 is the size of internal state array when regarded as an array + * of 64-bit integers.*/ +#define N64 (N * 2) + +/*---------------------- + the parameters of SFMT + following definitions are in paramsXXXX.h file. + ----------------------*/ +/** the pick up position of the array. +#define POS1 122 +*/ + +/** the parameter of shift left as four 32-bit registers. +#define SL1 18 + */ + +/** the parameter of shift left as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SL2 1 +*/ + +/** the parameter of shift right as four 32-bit registers. +#define SR1 11 +*/ + +/** the parameter of shift right as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SR2 1 +*/ + +/** A bitmask, used in the recursion. These parameters are introduced + * to break symmetry of SIMD. +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +*/ + +/** These definitions are part of a 128-bit period certification vector. +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xc98e126aU +*/ + +#if MEXP == 607 + #include "test/SFMT-params607.h" +#elif MEXP == 1279 + #include "test/SFMT-params1279.h" +#elif MEXP == 2281 + #include "test/SFMT-params2281.h" +#elif MEXP == 4253 + #include "test/SFMT-params4253.h" +#elif MEXP == 11213 + #include "test/SFMT-params11213.h" +#elif MEXP == 19937 + #include "test/SFMT-params19937.h" +#elif MEXP == 44497 + #include "test/SFMT-params44497.h" +#elif MEXP == 86243 + #include "test/SFMT-params86243.h" +#elif MEXP == 132049 + #include "test/SFMT-params132049.h" +#elif MEXP == 216091 + #include "test/SFMT-params216091.h" +#else +#ifdef __GNUC__ + #error "MEXP is not valid." + #undef MEXP +#else + #undef MEXP +#endif + +#endif + +#endif /* SFMT_PARAMS_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params11213.h b/memory/jemalloc/src/test/include/test/SFMT-params11213.h new file mode 100644 index 000000000..2994bd21d --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params11213.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS11213_H +#define SFMT_PARAMS11213_H + +#define POS1 68 +#define SL1 14 +#define SL2 3 +#define SR1 7 +#define SR2 3 +#define MSK1 0xeffff7fbU +#define MSK2 0xffffffefU +#define MSK3 0xdfdfbfffU +#define MSK4 0x7fffdbfdU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xe8148000U +#define PARITY4 0xd0c7afa3U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" + +#endif /* SFMT_PARAMS11213_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params1279.h b/memory/jemalloc/src/test/include/test/SFMT-params1279.h new file mode 100644 index 000000000..d7959f980 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params1279.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS1279_H +#define SFMT_PARAMS1279_H + +#define POS1 7 +#define SL1 14 +#define SL2 3 +#define SR1 5 +#define SR2 1 +#define MSK1 0xf7fefffdU +#define MSK2 0x7fefcfffU +#define MSK3 0xaff3ef3fU +#define MSK4 0xb5ffff7fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x20000000U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" + +#endif /* SFMT_PARAMS1279_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params132049.h b/memory/jemalloc/src/test/include/test/SFMT-params132049.h new file mode 100644 index 000000000..a1dcec392 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params132049.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS132049_H +#define SFMT_PARAMS132049_H + +#define POS1 110 +#define SL1 19 +#define SL2 1 +#define SR1 21 +#define SR2 1 +#define MSK1 0xffffbb5fU +#define MSK2 0xfb6ebf95U +#define MSK3 0xfffefffaU +#define MSK4 0xcff77fffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xcb520000U +#define PARITY4 0xc7e91c7dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" + +#endif /* SFMT_PARAMS132049_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params19937.h b/memory/jemalloc/src/test/include/test/SFMT-params19937.h new file mode 100644 index 000000000..fb92b4c9b --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params19937.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS19937_H +#define SFMT_PARAMS19937_H + +#define POS1 122 +#define SL1 18 +#define SL2 1 +#define SR1 11 +#define SR2 1 +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x13c9e684U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" + +#endif /* SFMT_PARAMS19937_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params216091.h b/memory/jemalloc/src/test/include/test/SFMT-params216091.h new file mode 100644 index 000000000..125ce2820 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params216091.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS216091_H +#define SFMT_PARAMS216091_H + +#define POS1 627 +#define SL1 11 +#define SL2 3 +#define SR1 10 +#define SR2 1 +#define MSK1 0xbff7bff7U +#define MSK2 0xbfffffffU +#define MSK3 0xbffffa7fU +#define MSK4 0xffddfbfbU +#define PARITY1 0xf8000001U +#define PARITY2 0x89e80709U +#define PARITY3 0x3bd2b64bU +#define PARITY4 0x0c64b1e4U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" + +#endif /* SFMT_PARAMS216091_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params2281.h b/memory/jemalloc/src/test/include/test/SFMT-params2281.h new file mode 100644 index 000000000..0ef85c407 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params2281.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS2281_H +#define SFMT_PARAMS2281_H + +#define POS1 12 +#define SL1 19 +#define SL2 1 +#define SR1 5 +#define SR2 1 +#define MSK1 0xbff7ffbfU +#define MSK2 0xfdfffffeU +#define MSK3 0xf7ffef7fU +#define MSK4 0xf2f7cbbfU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x41dfa600U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" + +#endif /* SFMT_PARAMS2281_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params4253.h b/memory/jemalloc/src/test/include/test/SFMT-params4253.h new file mode 100644 index 000000000..9f07bc67e --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params4253.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS4253_H +#define SFMT_PARAMS4253_H + +#define POS1 17 +#define SL1 20 +#define SL2 1 +#define SR1 7 +#define SR2 1 +#define MSK1 0x9f7bffffU +#define MSK2 0x9fffff5fU +#define MSK3 0x3efffffbU +#define MSK4 0xfffff7bbU +#define PARITY1 0xa8000001U +#define PARITY2 0xaf5390a3U +#define PARITY3 0xb740b3f8U +#define PARITY4 0x6c11486dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" + +#endif /* SFMT_PARAMS4253_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params44497.h b/memory/jemalloc/src/test/include/test/SFMT-params44497.h new file mode 100644 index 000000000..85598fed5 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params44497.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS44497_H +#define SFMT_PARAMS44497_H + +#define POS1 330 +#define SL1 5 +#define SL2 3 +#define SR1 9 +#define SR2 3 +#define MSK1 0xeffffffbU +#define MSK2 0xdfbebfffU +#define MSK3 0xbfbf7befU +#define MSK4 0x9ffd7bffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xa3ac4000U +#define PARITY4 0xecc1327aU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" + +#endif /* SFMT_PARAMS44497_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params607.h b/memory/jemalloc/src/test/include/test/SFMT-params607.h new file mode 100644 index 000000000..bc76485f8 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params607.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS607_H +#define SFMT_PARAMS607_H + +#define POS1 2 +#define SL1 15 +#define SL2 3 +#define SR1 13 +#define SR2 3 +#define MSK1 0xfdff37ffU +#define MSK2 0xef7f3f7dU +#define MSK3 0xff777b7dU +#define MSK4 0x7ff7fb2fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x5986f054U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" + +#endif /* SFMT_PARAMS607_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-params86243.h b/memory/jemalloc/src/test/include/test/SFMT-params86243.h new file mode 100644 index 000000000..5e4d783c5 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-params86243.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS86243_H +#define SFMT_PARAMS86243_H + +#define POS1 366 +#define SL1 6 +#define SL2 7 +#define SR1 19 +#define SR2 1 +#define MSK1 0xfdbffbffU +#define MSK2 0xbff7ff3fU +#define MSK3 0xfd77efffU +#define MSK4 0xbf9ff3ffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xe9528d85U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} + #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" + +#endif /* SFMT_PARAMS86243_H */ diff --git a/memory/jemalloc/src/test/include/test/SFMT-sse2.h b/memory/jemalloc/src/test/include/test/SFMT-sse2.h new file mode 100644 index 000000000..0314a163d --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT-sse2.h @@ -0,0 +1,157 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-sse2.h + * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * @note We assume LITTLE ENDIAN in this file + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ + +#ifndef SFMT_SSE2_H +#define SFMT_SSE2_H + +/** + * This function represents the recursion formula. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @param mask 128-bit mask + * @return output + */ +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, + __m128i c, __m128i d, __m128i mask) { + __m128i v, x, y, z; + + x = _mm_load_si128(a); + y = _mm_srli_epi32(*b, SR1); + z = _mm_srli_si128(c, SR2); + v = _mm_slli_epi32(d, SL1); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, v); + x = _mm_slli_si128(x, SL2); + y = _mm_and_si128(y, mask); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, y); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + r = _mm_load_si128(&array[j + size - N].si); + _mm_store_si128(&ctx->sfmt[j].si, r); + } + for (; i < size; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + _mm_store_si128(&ctx->sfmt[j++].si, r); + r1 = r2; + r2 = r; + } +} + +#endif diff --git a/memory/jemalloc/src/test/include/test/SFMT.h b/memory/jemalloc/src/test/include/test/SFMT.h new file mode 100644 index 000000000..09c1607dd --- /dev/null +++ b/memory/jemalloc/src/test/include/test/SFMT.h @@ -0,0 +1,171 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom + * number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + * + * @note We assume that your system has inttypes.h. If your system + * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, + * and you have to define PRIu64 and PRIx64 in this file as follows: + * @verbatim + typedef unsigned int uint32_t + typedef unsigned long long uint64_t + #define PRIu64 "llu" + #define PRIx64 "llx" +@endverbatim + * uint32_t must be exactly 32-bit unsigned integer type (no more, no + * less), and uint64_t must be exactly 64-bit unsigned integer type. + * PRIu64 and PRIx64 are used for printf function to print 64-bit + * unsigned int and 64-bit unsigned int in hexadecimal format. + */ + +#ifndef SFMT_H +#define SFMT_H + +typedef struct sfmt_s sfmt_t; + +uint32_t gen_rand32(sfmt_t *ctx); +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); +uint64_t gen_rand64(sfmt_t *ctx); +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); +void fill_array32(sfmt_t *ctx, uint32_t *array, int size); +void fill_array64(sfmt_t *ctx, uint64_t *array, int size); +sfmt_t *init_gen_rand(uint32_t seed); +sfmt_t *init_by_array(uint32_t *init_key, int key_length); +void fini_gen_rand(sfmt_t *ctx); +const char *get_idstring(void); +int get_min_array_size32(void); +int get_min_array_size64(void); + +#ifndef JEMALLOC_ENABLE_INLINE +double to_real1(uint32_t v); +double genrand_real1(sfmt_t *ctx); +double to_real2(uint32_t v); +double genrand_real2(sfmt_t *ctx); +double to_real3(uint32_t v); +double genrand_real3(sfmt_t *ctx); +double to_res53(uint64_t v); +double to_res53_mix(uint32_t x, uint32_t y); +double genrand_res53(sfmt_t *ctx); +double genrand_res53_mix(sfmt_t *ctx); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) +/* These real versions are due to Isaku Wada */ +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double to_real1(uint32_t v) +{ + return v * (1.0/4294967295.0); + /* divided by 2^32-1 */ +} + +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) +{ + return to_real1(gen_rand32(ctx)); +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double to_real2(uint32_t v) +{ + return v * (1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) +{ + return to_real2(gen_rand32(ctx)); +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double to_real3(uint32_t v) +{ + return (((double)v) + 0.5)*(1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) +{ + return to_real3(gen_rand32(ctx)); +} +/** These real versions are due to Isaku Wada */ + +/** generates a random number on [0,1) with 53-bit resolution*/ +JEMALLOC_INLINE double to_res53(uint64_t v) +{ + return v * (1.0/18446744073709551616.0L); +} + +/** generates a random number on [0,1) with 53-bit resolution from two + * 32 bit integers */ +JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) +{ + return to_res53(x | ((uint64_t)y << 32)); +} + +/** generates a random number on [0,1) with 53-bit resolution + */ +JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) +{ + return to_res53(gen_rand64(ctx)); +} + +/** generates a random number on [0,1) with 53-bit resolution + using 32bit integer. + */ +JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) +{ + uint32_t x, y; + + x = gen_rand32(ctx); + y = gen_rand32(ctx); + return to_res53_mix(x, y); +} +#endif +#endif diff --git a/memory/jemalloc/src/test/include/test/btalloc.h b/memory/jemalloc/src/test/include/test/btalloc.h new file mode 100644 index 000000000..c3f9d4df7 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/btalloc.h @@ -0,0 +1,31 @@ +/* btalloc() provides a mechanism for allocating via permuted backtraces. */ +void *btalloc(size_t size, unsigned bits); + +#define btalloc_n_proto(n) \ +void *btalloc_##n(size_t size, unsigned bits); +btalloc_n_proto(0) +btalloc_n_proto(1) + +#define btalloc_n_gen(n) \ +void * \ +btalloc_##n(size_t size, unsigned bits) \ +{ \ + void *p; \ + \ + if (bits == 0) \ + p = mallocx(size, 0); \ + else { \ + switch (bits & 0x1U) { \ + case 0: \ + p = (btalloc_0(size, bits >> 1)); \ + break; \ + case 1: \ + p = (btalloc_1(size, bits >> 1)); \ + break; \ + default: not_reached(); \ + } \ + } \ + /* Intentionally sabotage tail call optimization. */ \ + assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ + return (p); \ +} diff --git a/memory/jemalloc/src/test/include/test/jemalloc_test.h.in b/memory/jemalloc/src/test/include/test/jemalloc_test.h.in new file mode 100644 index 000000000..1f36e4695 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/jemalloc_test.h.in @@ -0,0 +1,163 @@ +#include <limits.h> +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include <stdlib.h> +#include <stdarg.h> +#include <stdbool.h> +#include <errno.h> +#include <math.h> +#include <string.h> +#ifdef _WIN32 +# include "msvc_compat/strings.h" +#endif + +#ifdef _WIN32 +# include <windows.h> +# include "msvc_compat/windows_extra.h" +#else +# include <pthread.h> +#endif + +#include "test/jemalloc_test_defs.h" + +#ifdef JEMALLOC_OSSPIN +# include <libkern/OSAtomic.h> +#endif + +#if defined(HAVE_ALTIVEC) && !defined(__APPLE__) +# include <altivec.h> +#endif +#ifdef HAVE_SSE2 +# include <emmintrin.h> +#endif + +/******************************************************************************/ +/* + * For unit tests, expose all public and private interfaces. + */ +#ifdef JEMALLOC_UNIT_TEST +# define JEMALLOC_JET +# define JEMALLOC_MANGLE +# include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* + * For integration tests, expose the public jemalloc interfaces, but only + * expose the minimum necessary internal utility code (to avoid re-implementing + * essentially identical code within the test infrastructure). + */ +#elif defined(JEMALLOC_INTEGRATION_TEST) +# define JEMALLOC_MANGLE +# include "jemalloc/jemalloc@install_suffix@.h" +# include "jemalloc/internal/jemalloc_internal_defs.h" +# include "jemalloc/internal/jemalloc_internal_macros.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; + +# define JEMALLOC_N(n) @private_namespace@##n +# include "jemalloc/internal/private_namespace.h" + +# define JEMALLOC_H_TYPES +# define JEMALLOC_H_STRUCTS +# define JEMALLOC_H_EXTERNS +# define JEMALLOC_H_INLINES +# include "jemalloc/internal/nstime.h" +# include "jemalloc/internal/util.h" +# include "jemalloc/internal/qr.h" +# include "jemalloc/internal/ql.h" +# undef JEMALLOC_H_TYPES +# undef JEMALLOC_H_STRUCTS +# undef JEMALLOC_H_EXTERNS +# undef JEMALLOC_H_INLINES + +/******************************************************************************/ +/* + * For stress tests, expose the public jemalloc interfaces with name mangling + * so that they can be tested as e.g. malloc() and free(). Also expose the + * public jemalloc interfaces with jet_ prefixes, so that stress tests can use + * a separate allocator for their internal data structures. + */ +#elif defined(JEMALLOC_STRESS_TEST) +# include "jemalloc/jemalloc@install_suffix@.h" + +# include "jemalloc/jemalloc_protos_jet.h" + +# define JEMALLOC_JET +# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/public_unnamespace.h" +# undef JEMALLOC_JET + +# include "jemalloc/jemalloc_rename.h" +# define JEMALLOC_MANGLE +# ifdef JEMALLOC_STRESS_TESTLIB +# include "jemalloc/jemalloc_mangle_jet.h" +# else +# include "jemalloc/jemalloc_mangle.h" +# endif + +/******************************************************************************/ +/* + * This header does dangerous things, the effects of which only test code + * should be subject to. + */ +#else +# error "This header cannot be included outside a testing context" +#endif + +/******************************************************************************/ +/* + * Common test utilities. + */ +#include "test/btalloc.h" +#include "test/math.h" +#include "test/mtx.h" +#include "test/mq.h" +#include "test/test.h" +#include "test/timer.h" +#include "test/thd.h" +#define MEXP 19937 +#include "test/SFMT.h" + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented + +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) \ + not_implemented(); \ +} while (0) diff --git a/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in b/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in new file mode 100644 index 000000000..5cc8532a3 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/jemalloc_test_defs.h.in @@ -0,0 +1,9 @@ +#include "jemalloc/internal/jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +/* + * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its + * dependencies are notoriously unportable in practice. + */ +#undef HAVE_SSE2 +#undef HAVE_ALTIVEC diff --git a/memory/jemalloc/src/test/include/test/math.h b/memory/jemalloc/src/test/include/test/math.h new file mode 100644 index 000000000..b057b29a1 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/math.h @@ -0,0 +1,311 @@ +#ifndef JEMALLOC_ENABLE_INLINE +double ln_gamma(double x); +double i_gamma(double x, double p, double ln_gamma_p); +double pt_norm(double p); +double pt_chi2(double p, double df, double ln_gamma_df_2); +double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) +/* + * Compute the natural log of Gamma(x), accurate to 10 decimal places. + * + * This implementation is based on: + * + * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function + * [S14]. Communications of the ACM 9(9):684. + */ +JEMALLOC_INLINE double +ln_gamma(double x) +{ + double f, z; + + assert(x > 0.0); + + if (x < 7.0) { + f = 1.0; + z = x; + while (z < 7.0) { + f *= z; + z += 1.0; + } + x = z; + f = -log(f); + } else + f = 0.0; + + z = 1.0 / (x * x); + + return (f + (x-0.5) * log(x) - x + 0.918938533204673 + + (((-0.000595238095238 * z + 0.000793650793651) * z - + 0.002777777777778) * z + 0.083333333333333) / x); +} + +/* + * Compute the incomplete Gamma ratio for [0..x], where p is the shape + * parameter, and ln_gamma_p is ln_gamma(p). + * + * This implementation is based on: + * + * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. + * Applied Statistics 19:285-287. + */ +JEMALLOC_INLINE double +i_gamma(double x, double p, double ln_gamma_p) +{ + double acu, factor, oflo, gin, term, rn, a, b, an, dif; + double pn[6]; + unsigned i; + + assert(p > 0.0); + assert(x >= 0.0); + + if (x == 0.0) + return (0.0); + + acu = 1.0e-10; + oflo = 1.0e30; + gin = 0.0; + factor = exp(p * log(x) - x - ln_gamma_p); + + if (x <= 1.0 || x < p) { + /* Calculation by series expansion. */ + gin = 1.0; + term = 1.0; + rn = p; + + while (true) { + rn += 1.0; + term *= x / rn; + gin += term; + if (term <= acu) { + gin *= factor / p; + return (gin); + } + } + } else { + /* Calculation by continued fraction. */ + a = 1.0 - p; + b = a + x + 1.0; + term = 0.0; + pn[0] = 1.0; + pn[1] = x; + pn[2] = x + 1.0; + pn[3] = x * b; + gin = pn[2] / pn[3]; + + while (true) { + a += 1.0; + b += 2.0; + term += 1.0; + an = a * term; + for (i = 0; i < 2; i++) + pn[i+4] = b * pn[i+2] - an * pn[i]; + if (pn[5] != 0.0) { + rn = pn[4] / pn[5]; + dif = fabs(gin - rn); + if (dif <= acu && dif <= acu * rn) { + gin = 1.0 - factor * gin; + return (gin); + } + gin = rn; + } + for (i = 0; i < 4; i++) + pn[i] = pn[i+2]; + + if (fabs(pn[4]) >= oflo) { + for (i = 0; i < 4; i++) + pn[i] /= oflo; + } + } + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the normal distribution, + * compute the limit on the definite integral from [-inf..z] that satisfies p, + * accurate to 16 decimal places. + * + * This implementation is based on: + * + * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal + * distribution. Applied Statistics 37(3):477-484. + */ +JEMALLOC_INLINE double +pt_norm(double p) +{ + double q, r, ret; + + assert(p > 0.0 && p < 1.0); + + q = p - 0.5; + if (fabs(q) <= 0.425) { + /* p close to 1/2. */ + r = 0.180625 - q * q; + return (q * (((((((2.5090809287301226727e3 * r + + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * + r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) + * r + 3.3871328727963666080e0) / + (((((((5.2264952788528545610e3 * r + + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * + r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) + * r + 1.0)); + } else { + if (q < 0.0) + r = p; + else + r = 1.0 - p; + assert(r > 0.0); + + r = sqrt(-log(r)); + if (r <= 5.0) { + /* p neither close to 1/2 nor 0 or 1. */ + r -= 1.6; + ret = ((((((((7.74545014278341407640e-4 * r + + 2.27238449892691845833e-2) * r + + 2.41780725177450611770e-1) * r + + 1.27045825245236838258e0) * r + + 3.64784832476320460504e0) * r + + 5.76949722146069140550e0) * r + + 4.63033784615654529590e0) * r + + 1.42343711074968357734e0) / + (((((((1.05075007164441684324e-9 * r + + 5.47593808499534494600e-4) * r + + 1.51986665636164571966e-2) + * r + 1.48103976427480074590e-1) * r + + 6.89767334985100004550e-1) * r + + 1.67638483018380384940e0) * r + + 2.05319162663775882187e0) * r + 1.0)); + } else { + /* p near 0 or 1. */ + r -= 5.0; + ret = ((((((((2.01033439929228813265e-7 * r + + 2.71155556874348757815e-5) * r + + 1.24266094738807843860e-3) * r + + 2.65321895265761230930e-2) * r + + 2.96560571828504891230e-1) * r + + 1.78482653991729133580e0) * r + + 5.46378491116411436990e0) * r + + 6.65790464350110377720e0) / + (((((((2.04426310338993978564e-15 * r + + 1.42151175831644588870e-7) * r + + 1.84631831751005468180e-5) * r + + 7.86869131145613259100e-4) * r + + 1.48753612908506148525e-2) * r + + 1.36929880922735805310e-1) * r + + 5.99832206555887937690e-1) + * r + 1.0)); + } + if (q < 0.0) + ret = -ret; + return (ret); + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution + * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute + * the upper limit on the definite integral from [0..z] that satisfies p, + * accurate to 12 decimal places. + * + * This implementation is based on: + * + * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of + * the Chi^2 distribution. Applied Statistics 24(3):385-388. + * + * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage + * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. + */ +JEMALLOC_INLINE double +pt_chi2(double p, double df, double ln_gamma_df_2) +{ + double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; + unsigned i; + + assert(p >= 0.0 && p < 1.0); + assert(df > 0.0); + + e = 5.0e-7; + aa = 0.6931471805; + + xx = 0.5 * df; + c = xx - 1.0; + + if (df < -1.24 * log(p)) { + /* Starting approximation for small Chi^2. */ + ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); + if (ch - e < 0.0) + return (ch); + } else { + if (df > 0.32) { + x = pt_norm(p); + /* + * Starting approximation using Wilson and Hilferty + * estimate. + */ + p1 = 0.222222 / df; + ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); + /* Starting approximation for p tending to 1. */ + if (ch > 2.2 * df + 6.0) { + ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + + ln_gamma_df_2); + } + } else { + ch = 0.4; + a = log(1.0 - p); + while (true) { + q = ch; + p1 = 1.0 + ch * (4.67 + ch); + p2 = ch * (6.73 + ch * (6.66 + ch)); + t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch + * (13.32 + 3.0 * ch)) / p2; + ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + + c * aa) * p2 / p1) / t; + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) + break; + } + } + } + + for (i = 0; i < 20; i++) { + /* Calculation of seven-term Taylor series. */ + q = ch; + p1 = 0.5 * ch; + if (p1 < 0.0) + return (-1.0); + p2 = p - i_gamma(p1, xx, ln_gamma_df_2); + t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); + b = t / ch; + a = 0.5 * t - b * c; + s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + + 60.0 * a))))) / 420.0; + s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * + a)))) / 2520.0; + s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; + s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * + (889.0 + 1740.0 * a))) / 5040.0; + s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; + s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; + ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 + - b * (s4 - b * (s5 - b * s6)))))); + if (fabs(q / ch - 1.0) <= e) + break; + } + + return (ch); +} + +/* + * Given a value p in [0..1] and Gamma distribution shape and scale parameters, + * compute the upper limit on the definite integral from [0..z] that satisfies + * p. + */ +JEMALLOC_INLINE double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) +{ + + return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); +} +#endif diff --git a/memory/jemalloc/src/test/include/test/mq.h b/memory/jemalloc/src/test/include/test/mq.h new file mode 100644 index 000000000..7c4df4931 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/mq.h @@ -0,0 +1,109 @@ +void mq_nanosleep(unsigned ns); + +/* + * Simple templated message queue implementation that relies on only mutexes for + * synchronization (which reduces portability issues). Given the following + * setup: + * + * typedef struct mq_msg_s mq_msg_t; + * struct mq_msg_s { + * mq_msg(mq_msg_t) link; + * [message data] + * }; + * mq_gen(, mq_, mq_t, mq_msg_t, link) + * + * The API is as follows: + * + * bool mq_init(mq_t *mq); + * void mq_fini(mq_t *mq); + * unsigned mq_count(mq_t *mq); + * mq_msg_t *mq_tryget(mq_t *mq); + * mq_msg_t *mq_get(mq_t *mq); + * void mq_put(mq_t *mq, mq_msg_t *msg); + * + * The message queue linkage embedded in each message is to be treated as + * externally opaque (no need to initialize or clean up externally). mq_fini() + * does not perform any cleanup of messages, since it knows nothing of their + * payloads. + */ +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) + +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +typedef struct { \ + mtx_t lock; \ + ql_head(a_mq_msg_type) msgs; \ + unsigned count; \ +} a_mq_type; \ +a_attr bool \ +a_prefix##init(a_mq_type *mq) { \ + \ + if (mtx_init(&mq->lock)) \ + return (true); \ + ql_new(&mq->msgs); \ + mq->count = 0; \ + return (false); \ +} \ +a_attr void \ +a_prefix##fini(a_mq_type *mq) \ +{ \ + \ + mtx_fini(&mq->lock); \ +} \ +a_attr unsigned \ +a_prefix##count(a_mq_type *mq) \ +{ \ + unsigned count; \ + \ + mtx_lock(&mq->lock); \ + count = mq->count; \ + mtx_unlock(&mq->lock); \ + return (count); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##tryget(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + \ + mtx_lock(&mq->lock); \ + msg = ql_first(&mq->msgs); \ + if (msg != NULL) { \ + ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ + mq->count--; \ + } \ + mtx_unlock(&mq->lock); \ + return (msg); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##get(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + unsigned ns; \ + \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + \ + ns = 1; \ + while (true) { \ + mq_nanosleep(ns); \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + if (ns < 1000*1000*1000) { \ + /* Double sleep time, up to max 1 second. */ \ + ns <<= 1; \ + if (ns > 1000*1000*1000) \ + ns = 1000*1000*1000; \ + } \ + } \ +} \ +a_attr void \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ +{ \ + \ + mtx_lock(&mq->lock); \ + ql_elm_new(msg, a_field); \ + ql_tail_insert(&mq->msgs, msg, a_field); \ + mq->count++; \ + mtx_unlock(&mq->lock); \ +} diff --git a/memory/jemalloc/src/test/include/test/mtx.h b/memory/jemalloc/src/test/include/test/mtx.h new file mode 100644 index 000000000..58afbc3d1 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/mtx.h @@ -0,0 +1,23 @@ +/* + * mtx is a slightly simplified version of malloc_mutex. This code duplication + * is unfortunate, but there are allocator bootstrapping considerations that + * would leak into the test infrastructure if malloc_mutex were used directly + * in tests. + */ + +typedef struct { +#ifdef _WIN32 + CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#else + pthread_mutex_t lock; +#endif +} mtx_t; + +bool mtx_init(mtx_t *mtx); +void mtx_fini(mtx_t *mtx); +void mtx_lock(mtx_t *mtx); +void mtx_unlock(mtx_t *mtx); diff --git a/memory/jemalloc/src/test/include/test/test.h b/memory/jemalloc/src/test/include/test/test.h new file mode 100644 index 000000000..c8112eb8b --- /dev/null +++ b/memory/jemalloc/src/test/include/test/test.h @@ -0,0 +1,333 @@ +#define ASSERT_BUFSIZE 256 + +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ + t a_ = (a); \ + t b_ = (b); \ + if (!(a_ cmp b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) "#cmp" (%s) --> " \ + "%"pri" "#neg_cmp" %"pri": ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_, b_); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ + ==, "p", __VA_ARGS__) +#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ + ==, "p", __VA_ARGS__) + +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) + +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) + +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) + +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) + +#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ + !=, "ld", __VA_ARGS__) +#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ + ==, "ld", __VA_ARGS__) +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ + >=, "ld", __VA_ARGS__) +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ + >, "ld", __VA_ARGS__) +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ + <, "ld", __VA_ARGS__) +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ + <=, "ld", __VA_ARGS__) + +#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ + a, b, ==, !=, "lu", __VA_ARGS__) +#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ + a, b, !=, ==, "lu", __VA_ARGS__) +#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ + a, b, <, >=, "lu", __VA_ARGS__) +#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ + a, b, <=, >, "lu", __VA_ARGS__) +#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ + a, b, >=, <, "lu", __VA_ARGS__) +#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ + a, b, >, <=, "lu", __VA_ARGS__) + +#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ + !=, "qd", __VA_ARGS__) +#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ + ==, "qd", __VA_ARGS__) +#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ + >=, "qd", __VA_ARGS__) +#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ + >, "qd", __VA_ARGS__) +#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ + <, "qd", __VA_ARGS__) +#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ + <=, "qd", __VA_ARGS__) + +#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ + a, b, ==, !=, "qu", __VA_ARGS__) +#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ + a, b, !=, ==, "qu", __VA_ARGS__) +#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <, >=, "qu", __VA_ARGS__) +#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <=, >, "qu", __VA_ARGS__) +#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >=, <, "qu", __VA_ARGS__) +#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >, <=, "qu", __VA_ARGS__) + +#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ + !=, "jd", __VA_ARGS__) +#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ + ==, "jd", __VA_ARGS__) +#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ + >=, "jd", __VA_ARGS__) +#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ + >, "jd", __VA_ARGS__) +#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ + <, "jd", __VA_ARGS__) +#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ + <=, "jd", __VA_ARGS__) + +#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ + !=, "ju", __VA_ARGS__) +#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ + ==, "ju", __VA_ARGS__) +#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ + >=, "ju", __VA_ARGS__) +#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ + >, "ju", __VA_ARGS__) +#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ + <, "ju", __VA_ARGS__) +#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ + <=, "ju", __VA_ARGS__) + +#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ + !=, "zd", __VA_ARGS__) +#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ + ==, "zd", __VA_ARGS__) +#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ + >=, "zd", __VA_ARGS__) +#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ + >, "zd", __VA_ARGS__) +#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ + <, "zd", __VA_ARGS__) +#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ + <=, "zd", __VA_ARGS__) + +#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ + !=, "zu", __VA_ARGS__) +#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ + ==, "zu", __VA_ARGS__) +#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ + >=, "zu", __VA_ARGS__) +#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ + >, "zu", __VA_ARGS__) +#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ + <, "zu", __VA_ARGS__) +#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ + <=, "zu", __VA_ARGS__) + +#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ + !=, FMTd32, __VA_ARGS__) +#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ + ==, FMTd32, __VA_ARGS__) +#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ + >=, FMTd32, __VA_ARGS__) +#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ + >, FMTd32, __VA_ARGS__) +#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ + <, FMTd32, __VA_ARGS__) +#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ + <=, FMTd32, __VA_ARGS__) + +#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ + !=, FMTu32, __VA_ARGS__) +#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ + ==, FMTu32, __VA_ARGS__) +#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ + >=, FMTu32, __VA_ARGS__) +#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ + >, FMTu32, __VA_ARGS__) +#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ + <, FMTu32, __VA_ARGS__) +#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ + <=, FMTu32, __VA_ARGS__) + +#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ + !=, FMTd64, __VA_ARGS__) +#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ + ==, FMTd64, __VA_ARGS__) +#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ + >=, FMTd64, __VA_ARGS__) +#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ + >, FMTd64, __VA_ARGS__) +#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ + <, FMTd64, __VA_ARGS__) +#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ + <=, FMTd64, __VA_ARGS__) + +#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ + !=, FMTu64, __VA_ARGS__) +#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ + ==, FMTu64, __VA_ARGS__) +#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ + >=, FMTu64, __VA_ARGS__) +#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ + >, FMTu64, __VA_ARGS__) +#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ + <, FMTu64, __VA_ARGS__) +#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ + <=, FMTu64, __VA_ARGS__) + +#define assert_b_eq(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_b_ne(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) + +#define assert_str_eq(a, b, ...) do { \ + if (strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_str_ne(a, b, ...) do { \ + if (!strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_not_reached(...) do { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Unreachable code reached: ", \ + __func__, __FILE__, __LINE__); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ +} while (0) + +/* + * If this enum changes, corresponding changes in test/test.sh.in are also + * necessary. + */ +typedef enum { + test_status_pass = 0, + test_status_skip = 1, + test_status_fail = 2, + + test_status_count = 3 +} test_status_t; + +typedef void (test_t)(void); + +#define TEST_BEGIN(f) \ +static void \ +f(void) \ +{ \ + p_test_init(#f); + +#define TEST_END \ + goto label_test_end; \ +label_test_end: \ + p_test_fini(); \ +} + +#define test(...) \ + p_test(__VA_ARGS__, NULL) + +#define test_no_malloc_init(...) \ + p_test_no_malloc_init(__VA_ARGS__, NULL) + +#define test_skip_if(e) do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", \ + __func__, __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ +} while (0) + +void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +/* For private use by macros. */ +test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_malloc_init(test_t *t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(const char *prefix, const char *message); diff --git a/memory/jemalloc/src/test/include/test/thd.h b/memory/jemalloc/src/test/include/test/thd.h new file mode 100644 index 000000000..47a51262e --- /dev/null +++ b/memory/jemalloc/src/test/include/test/thd.h @@ -0,0 +1,9 @@ +/* Abstraction layer for threading in tests. */ +#ifdef _WIN32 +typedef HANDLE thd_t; +#else +typedef pthread_t thd_t; +#endif + +void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); +void thd_join(thd_t thd, void **ret); diff --git a/memory/jemalloc/src/test/include/test/timer.h b/memory/jemalloc/src/test/include/test/timer.h new file mode 100644 index 000000000..ace6191b8 --- /dev/null +++ b/memory/jemalloc/src/test/include/test/timer.h @@ -0,0 +1,11 @@ +/* Simple timer, for use in benchmark reporting. */ + +typedef struct { + nstime_t t0; + nstime_t t1; +} timedelta_t; + +void timer_start(timedelta_t *timer); +void timer_stop(timedelta_t *timer); +uint64_t timer_usec(const timedelta_t *timer); +void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c new file mode 100644 index 000000000..30c203ae6 --- /dev/null +++ b/memory/jemalloc/src/test/integration/MALLOCX_ARENA.c @@ -0,0 +1,69 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +static bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + unsigned thread_ind = (unsigned)(uintptr_t)arg; + unsigned arena_ind; + void *p; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Error in arenas.extend"); + + if (thread_ind % 4 != 3) { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + const char *dss_precs[] = {"disabled", "primary", "secondary"}; + unsigned prec_ind = thread_ind % + (sizeof(dss_precs)/sizeof(char*)); + const char *dss = dss_precs[prec_ind]; + int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Error in mallctlnametomib()"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, + sizeof(const char *)), expected_err, + "Error in mallctlbymib()"); + } + + p = mallocx(1, MALLOCX_ARENA(arena_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + return (NULL); +} + +TEST_BEGIN(test_MALLOCX_ARENA) +{ + thd_t thds[NTHREADS]; + unsigned i; + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)(uintptr_t)i); + } + + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_MALLOCX_ARENA)); +} diff --git a/memory/jemalloc/src/test/integration/aligned_alloc.c b/memory/jemalloc/src/test/integration/aligned_alloc.c new file mode 100644 index 000000000..58438421d --- /dev/null +++ b/memory/jemalloc/src/test/integration/aligned_alloc.c @@ -0,0 +1,139 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) +{ + size_t alignment; + void *p; + + alignment = 0; + set_errno(0); + p = aligned_alloc(alignment, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", alignment); + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + set_errno(0); + p = aligned_alloc(alignment + 1, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ +#define NITER 4 + size_t alignment, size, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 1; + size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + ps[i] = aligned_alloc(alignment, size); + if (ps[i] == NULL) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + purge(); + } +#undef NITER +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); +} diff --git a/memory/jemalloc/src/test/integration/allocated.c b/memory/jemalloc/src/test/integration/allocated.c new file mode 100644 index 000000000..3630e80ce --- /dev/null +++ b/memory/jemalloc/src/test/integration/allocated.c @@ -0,0 +1,125 @@ +#include "test/jemalloc_test.h" + +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + void *p; + uint64_t a0, a1, d0, d1; + uint64_t *ap0, *ap1, *dp0, *dp1; + size_t sz, usize; + + sz = sizeof(a0); + if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(ap0); + if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*ap0, a0, + "\"thread.allocatedp\" should provide a pointer to internal " + "storage"); + + sz = sizeof(d0); + if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(dp0); + if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*dp0, d0, + "\"thread.deallocatedp\" should provide a pointer to internal " + "storage"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() error"); + + sz = sizeof(a1); + mallctl("thread.allocated", &a1, &sz, NULL, 0); + sz = sizeof(ap1); + mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); + assert_u64_eq(*ap1, a1, + "Dereferenced \"thread.allocatedp\" value should equal " + "\"thread.allocated\" value"); + assert_ptr_eq(ap0, ap1, + "Pointer returned by \"thread.allocatedp\" should not change"); + + usize = malloc_usable_size(p); + assert_u64_le(a0 + usize, a1, + "Allocated memory counter should increase by at least the amount " + "explicitly allocated"); + + free(p); + + sz = sizeof(d1); + mallctl("thread.deallocated", &d1, &sz, NULL, 0); + sz = sizeof(dp1); + mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); + assert_u64_eq(*dp1, d1, + "Dereferenced \"thread.deallocatedp\" value should equal " + "\"thread.deallocated\" value"); + assert_ptr_eq(dp0, dp1, + "Pointer returned by \"thread.deallocatedp\" should not change"); + + assert_u64_le(d0 + usize, d1, + "Deallocated memory counter should increase by at least the amount " + "explicitly deallocated"); + + return (NULL); +label_ENOENT: + assert_false(config_stats, + "ENOENT should only be returned if stats are disabled"); + test_skip("\"thread.allocated\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/memory/jemalloc/src/test/integration/chunk.c b/memory/jemalloc/src/test/integration/chunk.c new file mode 100644 index 000000000..ff9bf967a --- /dev/null +++ b/memory/jemalloc/src/test/integration/chunk.c @@ -0,0 +1,293 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + +static chunk_hooks_t orig_hooks; +static chunk_hooks_t old_hooks; + +static bool do_dalloc = true; +static bool do_decommit; + +static bool did_alloc; +static bool did_dalloc; +static bool did_commit; +static bool did_decommit; +static bool did_purge; +static bool did_split; +static bool did_merge; + +#if 0 +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +#else +# define TRACE_HOOK(fmt, ...) +#endif + +void * +chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + + TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, " + "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment, + *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); + did_alloc = true; + return (old_hooks.alloc(new_addr, size, alignment, zero, commit, + arena_ind)); +} + +bool +chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n", + __func__, chunk, size, committed ? "true" : "false", arena_ind); + did_dalloc = true; + if (!do_dalloc) + return (true); + return (old_hooks.dalloc(chunk, size, committed, arena_ind)); +} + +bool +chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + bool err; + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + err = old_hooks.commit(chunk, size, offset, length, arena_ind); + did_commit = !err; + return (err); +} + +bool +chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + bool err; + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + if (!do_decommit) + return (true); + err = old_hooks.decommit(chunk, size, offset, length, arena_ind); + did_decommit = !err; + return (err); +} + +bool +chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + did_purge = true; + return (old_hooks.purge(chunk, size, offset, length, arena_ind)); +} + +bool +chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, " + "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a, + size_b, committed ? "true" : "false", arena_ind); + did_split = true; + return (old_hooks.split(chunk, size, size_a, size_b, committed, + arena_ind)); +} + +bool +chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, " + "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b, + size_b, committed ? "true" : "false", arena_ind); + did_merge = true; + return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b, + committed, arena_ind)); +} + +TEST_BEGIN(test_chunk) +{ + void *p; + size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; + unsigned arena_ind; + int flags; + size_t hooks_mib[3], purge_mib[3]; + size_t hooks_miblen, purge_miblen; + chunk_hooks_t new_hooks = { + chunk_alloc, + chunk_dalloc, + chunk_commit, + chunk_decommit, + chunk_purge, + chunk_split, + chunk_merge + }; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + /* Install custom chunk hooks. */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = (size_t)arena_ind; + old_size = sizeof(chunk_hooks_t); + new_size = sizeof(chunk_hooks_t); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size, + &new_hooks, new_size), 0, "Unexpected chunk_hooks error"); + orig_hooks = old_hooks; + assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); + assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, + "Unexpected dalloc error"); + assert_ptr_ne(old_hooks.commit, chunk_commit, + "Unexpected commit error"); + assert_ptr_ne(old_hooks.decommit, chunk_decommit, + "Unexpected decommit error"); + assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error"); + assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); + assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); + + /* Get large size classes. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, + "Unexpected arenas.lrun.0.size failure"); + assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0, + "Unexpected arenas.lrun.1.size failure"); + + /* Get huge size classes. */ + assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.0.size failure"); + assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.1.size failure"); + assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.2.size failure"); + + /* Test dalloc/decommit/purge cascade. */ + purge_miblen = sizeof(purge_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), + 0, "Unexpected mallctlnametomib() failure"); + purge_mib[1] = (size_t)arena_ind; + do_dalloc = false; + do_decommit = false; + p = mallocx(huge0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_dalloc = false; + did_decommit = false; + did_purge = false; + did_split = false; + xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_a) { + assert_true(did_dalloc, "Expected dalloc"); + assert_false(did_decommit, "Unexpected decommit"); + assert_true(did_purge, "Expected purge"); + } + assert_true(did_split, "Expected split"); + dallocx(p, flags); + do_dalloc = true; + + /* Test decommit/commit and observe split/merge. */ + do_dalloc = false; + do_decommit = true; + p = mallocx(huge0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_decommit = false; + did_commit = false; + did_split = false; + did_merge = false; + xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_b) + assert_true(did_split, "Expected split"); + xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2); + assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); + if (xallocx_success_b && xallocx_success_c) + assert_true(did_merge, "Expected merge"); + dallocx(p, flags); + do_dalloc = true; + do_decommit = false; + + /* Test purge for partial-chunk huge allocations. */ + if (huge0 * 2 > huge2) { + /* + * There are at least four size classes per doubling, so a + * successful xallocx() from size=huge2 to size=huge1 is + * guaranteed to leave trailing purgeable memory. + */ + p = mallocx(huge2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_purge = false; + assert_zu_eq(xallocx(p, huge1, 0, flags), huge1, + "Unexpected xallocx() failure"); + assert_true(did_purge, "Expected purge"); + dallocx(p, flags); + } + + /* Test decommit for large allocations. */ + do_decommit = true; + p = mallocx(large1, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + did_decommit = false; + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() failure"); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + did_commit = false; + assert_zu_eq(xallocx(p, large1, 0, flags), large1, + "Unexpected xallocx() failure"); + assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); + dallocx(p, flags); + do_decommit = false; + + /* Make sure non-huge allocation succeeds. */ + p = mallocx(42, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, flags); + + /* Restore chunk hooks. */ + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, + &old_hooks, new_size), 0, "Unexpected chunk_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size, + NULL, 0), 0, "Unexpected chunk_hooks error"); + assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, + "Unexpected alloc error"); + assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, + "Unexpected dalloc error"); + assert_ptr_eq(old_hooks.commit, orig_hooks.commit, + "Unexpected commit error"); + assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit, + "Unexpected decommit error"); + assert_ptr_eq(old_hooks.purge, orig_hooks.purge, + "Unexpected purge error"); + assert_ptr_eq(old_hooks.split, orig_hooks.split, + "Unexpected split error"); + assert_ptr_eq(old_hooks.merge, orig_hooks.merge, + "Unexpected merge error"); +} +TEST_END + +int +main(void) +{ + + return (test(test_chunk)); +} diff --git a/memory/jemalloc/src/test/integration/mallocx.c b/memory/jemalloc/src/test/integration/mallocx.c new file mode 100644 index 000000000..43b76ebac --- /dev/null +++ b/memory/jemalloc/src/test/integration/mallocx.c @@ -0,0 +1,234 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_overflow) +{ + size_t hugemax; + + hugemax = get_huge_size(get_nhuge()-1); + + assert_ptr_null(mallocx(hugemax+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1); + + assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(mallocx(SIZE_T_MAX, 0), + "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); +} +TEST_END + +TEST_BEGIN(test_oom) +{ + size_t hugemax; + bool oom; + void *ptrs[3]; + unsigned i; + + /* + * It should be impossible to allocate three objects that each consume + * nearly half the virtual address space. + */ + hugemax = get_huge_size(get_nhuge()-1); + oom = false; + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + ptrs[i] = mallocx(hugemax, 0); + if (ptrs[i] == NULL) + oom = true; + } + assert_true(oom, + "Expected OOM during series of calls to mallocx(size=%zu, 0)", + hugemax); + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + if (ptrs[i] != NULL) + dallocx(ptrs[i], 0); + } + purge(); + +#if LG_SIZEOF_PTR == 3 + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x8000000000000000ULL)), + "Expected OOM for mallocx()"); + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x80000000)), + "Expected OOM for mallocx()"); +#else + assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), + "Expected OOM for mallocx()"); +#endif +} +TEST_END + +TEST_BEGIN(test_basic) +{ +#define MAXSZ (((size_t)1) << 23) + size_t sz; + + for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { + size_t nsz, rsz; + void *p; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", + nsz); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); + purge(); + } +#undef MAXSZ +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ +#define MAXALIGN (((size_t)1) << 23) +#define NITER 4 + size_t nsz, rsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + purge(); + } +#undef MAXALIGN +#undef NITER +} +TEST_END + +int +main(void) +{ + + return (test( + test_overflow, + test_oom, + test_basic, + test_alignment_and_size)); +} diff --git a/memory/jemalloc/src/test/integration/overflow.c b/memory/jemalloc/src/test/integration/overflow.c new file mode 100644 index 000000000..303d9b2d3 --- /dev/null +++ b/memory/jemalloc/src/test/integration/overflow.c @@ -0,0 +1,49 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_overflow) +{ + unsigned nhchunks; + size_t mib[4]; + size_t sz, miblen, max_size_class; + void *p; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nhchunks - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); + + assert_ptr_null(malloc(max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(malloc(SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + assert_ptr_null(calloc(1, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(calloc(1, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() OOM"); + assert_ptr_null(realloc(p, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(realloc(p, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + free(p); +} +TEST_END + +int +main(void) +{ + + return (test( + test_overflow)); +} diff --git a/memory/jemalloc/src/test/integration/posix_memalign.c b/memory/jemalloc/src/test/integration/posix_memalign.c new file mode 100644 index 000000000..e22e10200 --- /dev/null +++ b/memory/jemalloc/src/test/integration/posix_memalign.c @@ -0,0 +1,133 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) +{ + size_t alignment; + void *p; + + for (alignment = 0; alignment < sizeof(void *); alignment++) { + assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, + "Expected error for invalid alignment %zu", + alignment); + } + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ +#define NITER 4 + size_t alignment, size, total; + unsigned i; + int err; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 1; + size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + err = posix_memalign(&ps[i], + alignment, size); + if (err) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + purge(); + } +#undef NITER +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); +} diff --git a/memory/jemalloc/src/test/integration/rallocx.c b/memory/jemalloc/src/test/integration/rallocx.c new file mode 100644 index 000000000..66ad8660a --- /dev/null +++ b/memory/jemalloc/src/test/integration/rallocx.c @@ -0,0 +1,259 @@ +#include "test/jemalloc_test.h" + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_grow_and_shrink) +{ + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 2500 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + szs[0] = sallocx(p, 0); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = rallocx(p, szs[j-1]+1, 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + szs[j] = sallocx(q, 0); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to be at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = rallocx(p, szs[j-1], 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j], szs[j-1]); + tsz = sallocx(q, 0); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + dallocx(p, 0); +#undef MAXSZ +#undef NSZS +#undef NCYCLES +} +TEST_END + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) +{ + bool ret = false; + const uint8_t *buf = (const uint8_t *)p; + size_t i; + + for (i = 0; i < len; i++) { + uint8_t b = buf[offset+i]; + if (b != c) { + test_fail("Allocation at %p (len=%zu) contains %#x " + "rather than %#x at offset %zu", p, len, b, c, + offset+i); + ret = true; + } + } + + return (ret); +} + +TEST_BEGIN(test_zero) +{ + void *p, *q; + size_t psz, qsz, i, j; + size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; +#define FILL_BYTE 0xaaU +#define RANGE 2048 + + for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { + size_t start_size = start_sizes[i]; + p = mallocx(start_size, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + psz = sallocx(p, 0); + + assert_false(validate_fill(p, 0, 0, psz), + "Expected zeroed memory"); + memset(p, FILL_BYTE, psz); + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + + for (j = 1; j < RANGE; j++) { + q = rallocx(p, start_size+j, MALLOCX_ZERO); + assert_ptr_not_null(q, "Unexpected rallocx() error"); + qsz = sallocx(q, 0); + if (q != p || qsz != psz) { + assert_false(validate_fill(q, FILL_BYTE, 0, + psz), "Expected filled memory"); + assert_false(validate_fill(q, 0, psz, qsz-psz), + "Expected zeroed memory"); + } + if (psz != qsz) { + memset((void *)((uintptr_t)q+psz), FILL_BYTE, + qsz-psz); + psz = qsz; + } + p = q; + } + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + dallocx(p, 0); + } +#undef FILL_BYTE +} +TEST_END + +TEST_BEGIN(test_align) +{ + void *p, *q; + size_t align; +#define MAX_ALIGN (ZU(1) << 25) + + align = ZU(1); + p = mallocx(1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { + q = rallocx(p, 1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(q, + "Unexpected rallocx() error for align=%zu", align); + assert_ptr_null( + (void *)((uintptr_t)q & (align-1)), + "%p inadequately aligned for align=%zu", + q, align); + p = q; + } + dallocx(p, 0); +#undef MAX_ALIGN +} +TEST_END + +TEST_BEGIN(test_lg_align_and_zero) +{ + void *p, *q; + unsigned lg_align; + size_t sz; +#define MAX_LG_ALIGN 25 +#define MAX_VALIDATE (ZU(1) << 22) + + lg_align = 0; + p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { + q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(q, + "Unexpected rallocx() error for lg_align=%u", lg_align); + assert_ptr_null( + (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), + "%p inadequately aligned for lg_align=%u", q, lg_align); + sz = sallocx(q, 0); + if ((sz << 1) <= MAX_VALIDATE) { + assert_false(validate_fill(q, 0, 0, sz), + "Expected zeroed memory"); + } else { + assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), + "Expected zeroed memory"); + assert_false(validate_fill( + (void *)((uintptr_t)q+sz-MAX_VALIDATE), + 0, 0, MAX_VALIDATE), "Expected zeroed memory"); + } + p = q; + } + dallocx(p, 0); +#undef MAX_VALIDATE +#undef MAX_LG_ALIGN +} +TEST_END + +TEST_BEGIN(test_overflow) +{ + size_t hugemax; + void *p; + + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_ptr_null(rallocx(p, hugemax+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1); + + assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_grow_and_shrink, + test_zero, + test_align, + test_lg_align_and_zero, + test_overflow)); +} diff --git a/memory/jemalloc/src/test/integration/sdallocx.c b/memory/jemalloc/src/test/integration/sdallocx.c new file mode 100644 index 000000000..b84817d76 --- /dev/null +++ b/memory/jemalloc/src/test/integration/sdallocx.c @@ -0,0 +1,57 @@ +#include "test/jemalloc_test.h" + +#define MAXALIGN (((size_t)1) << 25) +#define NITER 4 + +TEST_BEGIN(test_basic) +{ + void *ptr = mallocx(64, 0); + sdallocx(ptr, 64, 0); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t nsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + total += nsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + sdallocx(ps[i], sz, + MALLOCX_ALIGN(alignment)); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_basic, + test_alignment_and_size)); +} diff --git a/memory/jemalloc/src/test/integration/thread_arena.c b/memory/jemalloc/src/test/integration/thread_arena.c new file mode 100644 index 000000000..67be53513 --- /dev/null +++ b/memory/jemalloc/src/test/integration/thread_arena.c @@ -0,0 +1,79 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +void * +thd_start(void *arg) +{ + unsigned main_arena_ind = *(unsigned *)arg; + void *p; + unsigned arena_ind; + size_t size; + int err; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + free(p); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, + sizeof(main_arena_ind)))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + assert_u_eq(arena_ind, main_arena_ind, + "Arena index should be same as for main thread"); + + return (NULL); +} + +TEST_BEGIN(test_thread_arena) +{ + void *p; + unsigned arena_ind; + size_t size; + int err; + thd_t thds[NTHREADS]; + unsigned i; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)&arena_ind); + } + + for (i = 0; i < NTHREADS; i++) { + intptr_t join_ret; + thd_join(thds[i], (void *)&join_ret); + assert_zd_eq(join_ret, 0, "Unexpected thread join error"); + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_thread_arena)); +} diff --git a/memory/jemalloc/src/test/integration/thread_tcache_enabled.c b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c new file mode 100644 index 000000000..f4e89c682 --- /dev/null +++ b/memory/jemalloc/src/test/integration/thread_tcache_enabled.c @@ -0,0 +1,113 @@ +#include "test/jemalloc_test.h" + +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + size_t sz; + bool e0, e1; + + sz = sizeof(bool); + if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { + if (err == ENOENT) { + assert_false(config_tcache, + "ENOENT should only be returned if tcache is " + "disabled"); + } + goto label_ENOENT; + } + + if (e0) { + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), + 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + } + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + return (NULL); +label_ENOENT: + test_skip("\"thread.tcache.enabled\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/memory/jemalloc/src/test/integration/xallocx.c b/memory/jemalloc/src/test/integration/xallocx.c new file mode 100644 index 000000000..ad292bb56 --- /dev/null +++ b/memory/jemalloc/src/test/integration/xallocx.c @@ -0,0 +1,497 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + +/* + * Use a separate arena for xallocx() extension/contraction tests so that + * internal allocation e.g. by heap profiling can't interpose allocations where + * xallocx() would ordinarily be able to extend. + */ +static unsigned +arena_ind(void) +{ + static unsigned ind = 0; + + if (ind == 0) { + size_t sz = sizeof(ind); + assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0, + "Unexpected mallctl failure creating arena"); + } + + return (ind); +} + +TEST_BEGIN(test_same_size) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, sz-42, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz + 5, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nsmall(void) +{ + + return (get_nsizes_impl("arenas.nbins")); +} + +static unsigned +get_nlarge(void) +{ + + return (get_nsizes_impl("arenas.nlruns")); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_small_size(size_t ind) +{ + + return (get_size_impl("arenas.bin.0.size", ind)); +} + +static size_t +get_large_size(size_t ind) +{ + + return (get_size_impl("arenas.lrun.0.size", ind)); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_size) +{ + size_t small0, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test smallest supported size. */ + assert_zu_eq(xallocx(p, 1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test largest supported size. */ + assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + + /* Test size overflow. */ + assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_size_extra_overflow) +{ + size_t small0, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test overflows that can be resolved by clamping extra. */ + assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, + "Unexpected xallocx() behavior"); + + /* Test overflow such that hugemax-size underflows. */ + assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_small) +{ + size_t small0, small1, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + small1 = get_small_size(1); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test size+extra overflow. */ + assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_large) +{ + int flags = MALLOCX_ARENA(arena_ind()); + size_t smallmax, large0, large1, large2, huge0, hugemax; + void *p; + + /* Get size classes. */ + smallmax = get_small_size(get_nsmall()-1); + large0 = get_large_size(0); + large1 = get_large_size(1); + large2 = get_large_size(2); + huge0 = get_huge_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(large2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, 0, flags), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge0, 0, flags), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0, + "Unexpected xallocx() behavior"); + + dallocx(p, flags); +} +TEST_END + +TEST_BEGIN(test_extra_huge) +{ + int flags = MALLOCX_ARENA(arena_ind()); + size_t largemax, huge1, huge2, huge3, hugemax; + void *p; + + /* Get size classes. */ + largemax = get_large_size(get_nlarge()-1); + huge1 = get_huge_size(1); + huge2 = get_huge_size(2); + huge3 = get_huge_size(3); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(huge3, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, largemax, 0, flags), huge1, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_le(xallocx(p, huge3, 0, flags), huge3, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, flags); +} +TEST_END + +static void +print_filled_extents(const void *p, uint8_t c, size_t len) +{ + const uint8_t *pc = (const uint8_t *)p; + size_t i, range0; + uint8_t c0; + + malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); + range0 = 0; + c0 = pc[0]; + for (i = 0; i < len; i++) { + if (pc[i] != c0) { + malloc_printf(" %#x[%zu..%zu)", c0, range0, i); + range0 = i; + c0 = pc[i]; + } + } + malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); +} + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) +{ + const uint8_t *pc = (const uint8_t *)p; + bool err; + size_t i; + + for (i = offset, err = false; i < offset+len; i++) { + if (pc[i] != c) + err = true; + } + + if (err) + print_filled_extents(p, c, offset + len); + + return (err); +} + +static void +test_zero(size_t szmin, size_t szmax) +{ + int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; + size_t sz, nsz; + void *p; +#define FILL_BYTE 0x7aU + + sz = szmax; + p = mallocx(sz, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", + sz); + + /* + * Fill with non-zero so that non-debug builds are more likely to detect + * errors. + */ + memset(p, FILL_BYTE, sz); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + /* Shrink in place so that we can expect growing in place to succeed. */ + sz = szmin; + assert_zu_eq(xallocx(p, sz, 0, flags), sz, + "Unexpected xallocx() error"); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + for (sz = szmin; sz < szmax; sz = nsz) { + nsz = nallocx(sz+1, flags); + assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, + "Unexpected xallocx() failure"); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + assert_false(validate_fill(p, 0x00, sz, nsz-sz), + "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); + memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); + assert_false(validate_fill(p, FILL_BYTE, 0, nsz), + "Memory not filled: nsz=%zu", nsz); + } + + dallocx(p, flags); +} + +TEST_BEGIN(test_zero_large) +{ + size_t large0, largemax; + + /* Get size classes. */ + large0 = get_large_size(0); + largemax = get_large_size(get_nlarge()-1); + + test_zero(large0, largemax); +} +TEST_END + +TEST_BEGIN(test_zero_huge) +{ + size_t huge0, huge1; + + /* Get size classes. */ + huge0 = get_huge_size(0); + huge1 = get_huge_size(1); + + test_zero(huge1, huge0 * 2); +} +TEST_END + +int +main(void) +{ + + return (test( + test_same_size, + test_extra_no_move, + test_no_move_fail, + test_size, + test_size_extra_overflow, + test_extra_small, + test_extra_large, + test_extra_huge, + test_zero_large, + test_zero_huge)); +} diff --git a/memory/jemalloc/src/test/src/SFMT.c b/memory/jemalloc/src/test/src/SFMT.c new file mode 100644 index 000000000..80cabe05e --- /dev/null +++ b/memory/jemalloc/src/test/src/SFMT.c @@ -0,0 +1,719 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.c + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ +#define SFMT_C_ +#include "test/jemalloc_test.h" +#include "test/SFMT-params.h" + +#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(ONLY64) && !defined(BIG_ENDIAN64) + #if defined(__GNUC__) + #error "-DONLY64 must be specified with -DBIG_ENDIAN64" + #endif +#undef ONLY64 +#endif +/*------------------------------------------------------ + 128-bit SIMD data type for Altivec, SSE2 or standard C + ------------------------------------------------------*/ +#if defined(HAVE_ALTIVEC) +/** 128-bit data structure */ +union W128_T { + vector unsigned int s; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#elif defined(HAVE_SSE2) +/** 128-bit data structure */ +union W128_T { + __m128i si; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#else + +/** 128-bit data structure */ +struct W128_T { + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef struct W128_T w128_t; + +#endif + +struct sfmt_s { + /** the 128-bit internal state array */ + w128_t sfmt[N]; + /** index counter to the 32-bit internal state array */ + int idx; + /** a flag: it is 0 if and only if the internal state is not yet + * initialized. */ + int initialized; +}; + +/*-------------------------------------- + FILE GLOBAL VARIABLES + internal state, index counter and flag + --------------------------------------*/ + +/** a parity check vector which certificate the period of 2^{MEXP} */ +static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; + +/*---------------- + STATIC FUNCTIONS + ----------------*/ +JEMALLOC_INLINE_C int idxof(int i); +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); +#endif +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +JEMALLOC_INLINE_C uint32_t func1(uint32_t x); +JEMALLOC_INLINE_C uint32_t func2(uint32_t x); +static void period_certification(sfmt_t *ctx); +#if defined(BIG_ENDIAN64) && !defined(ONLY64) +JEMALLOC_INLINE_C void swap(w128_t *array, int size); +#endif + +#if defined(HAVE_ALTIVEC) + #include "test/SFMT-alti.h" +#elif defined(HAVE_SSE2) + #include "test/SFMT-sse2.h" +#endif + +/** + * This function simulate a 64-bit index of LITTLE ENDIAN + * in BIG ENDIAN machine. + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C int idxof(int i) { + return i ^ 1; +} +#else +JEMALLOC_INLINE_C int idxof(int i) { + return i; +} +#endif +/** + * This function simulates SIMD 128-bit right shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +/** + * This function simulates SIMD 128-bit left shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +#endif + +/** + * This function represents the recursion formula. + * @param r output + * @param a a 128-bit part of the internal state array + * @param b a 128-bit part of the internal state array + * @param c a 128-bit part of the internal state array + * @param d a 128-bit part of the internal state array + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#else +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#endif +#endif + +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { + int i; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } + for (; i < N; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pseudorandom numbers to be generated. + */ +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < N; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < size - N; i++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j] = array[j + size - N]; + } + for (; i < size; i++, j++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + ctx->sfmt[j] = array[i]; + } +} +#endif + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) +JEMALLOC_INLINE_C void swap(w128_t *array, int size) { + int i; + uint32_t x, y; + + for (i = 0; i < size; i++) { + x = array[i].u[0]; + y = array[i].u[2]; + array[i].u[0] = array[i].u[1]; + array[i].u[2] = array[i].u[3]; + array[i].u[1] = x; + array[i].u[3] = y; + } +} +#endif +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func1(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1664525UL; +} + +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func2(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1566083941UL; +} + +/** + * This function certificate the period of 2^{MEXP} + */ +static void period_certification(sfmt_t *ctx) { + int inner = 0; + int i, j; + uint32_t work; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + for (i = 0; i < 4; i++) + inner ^= psfmt32[idxof(i)] & parity[i]; + for (i = 16; i > 0; i >>= 1) + inner ^= inner >> i; + inner &= 1; + /* check OK */ + if (inner == 1) { + return; + } + /* check NG, and modification */ + for (i = 0; i < 4; i++) { + work = 1; + for (j = 0; j < 32; j++) { + if ((work & parity[i]) != 0) { + psfmt32[idxof(i)] ^= work; + return; + } + work = work << 1; + } + } +} + +/*---------------- + PUBLIC FUNCTIONS + ----------------*/ +/** + * This function returns the identification string. + * The string shows the word size, the Mersenne exponent, + * and all parameters of this generator. + */ +const char *get_idstring(void) { + return IDSTR; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array32() function. + * @return minimum size of array used for fill_array32() function. + */ +int get_min_array_size32(void) { + return N32; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array64() function. + * @return minimum size of array used for fill_array64() function. + */ +int get_min_array_size64(void) { + return N64; +} + +#ifndef ONLY64 +/** + * This function generates and returns 32-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * @return 32-bit pseudorandom number + */ +uint32_t gen_rand32(sfmt_t *ctx) { + uint32_t r; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + assert(ctx->initialized); + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } + r = psfmt32[ctx->idx++]; + return r; +} + +/* Generate a random integer in [0..limit). */ +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { + uint32_t ret, above; + + above = 0xffffffffU - (0xffffffffU % limit); + while (1) { + ret = gen_rand32(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} +#endif +/** + * This function generates and returns 64-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * The function gen_rand64 should not be called after gen_rand32, + * unless an initialization is again executed. + * @return 64-bit pseudorandom number + */ +uint64_t gen_rand64(sfmt_t *ctx) { +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + uint32_t r1, r2; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +#else + uint64_t r; + uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; +#endif + + assert(ctx->initialized); + assert(ctx->idx % 2 == 0); + + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + r1 = psfmt32[ctx->idx]; + r2 = psfmt32[ctx->idx + 1]; + ctx->idx += 2; + return ((uint64_t)r2 << 32) | r1; +#else + r = psfmt64[ctx->idx / 2]; + ctx->idx += 2; + return r; +#endif +} + +/* Generate a random integer in [0..limit). */ +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { + uint64_t ret, above; + + above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); + while (1) { + ret = gen_rand64(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} + +#ifndef ONLY64 +/** + * This function generates pseudorandom 32-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 624 and a + * multiple of four. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 32-bit integers are filled + * by this function. The pointer to the array must be \b "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 32-bit pseudorandom integers to be + * generated. size must be a multiple of 4, and greater than or equal + * to (MEXP / 128 + 1) * 4. + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 4 == 0); + assert(size >= N32); + + gen_rand_array(ctx, (w128_t *)array, size / 4); + ctx->idx = N32; +} +#endif + +/** + * This function generates pseudorandom 64-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 312 and a + * multiple of two. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 64-bit integers are filled + * by this function. The pointer to the array must be "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 64-bit pseudorandom integers to be + * generated. size must be a multiple of 2, and greater than or equal + * to (MEXP / 128 + 1) * 2 + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 2 == 0); + assert(size >= N64); + + gen_rand_array(ctx, (w128_t *)array, size / 2); + ctx->idx = N32; + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + swap((w128_t *)array, size /2); +#endif +} + +/** + * This function initializes the internal state array with a 32-bit + * integer seed. + * + * @param seed a 32-bit integer used as the seed. + */ +sfmt_t *init_gen_rand(uint32_t seed) { + void *p; + sfmt_t *ctx; + int i; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + psfmt32[idxof(0)] = seed; + for (i = 1; i < N32; i++) { + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + ^ (psfmt32[idxof(i - 1)] >> 30)) + + i; + } + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +/** + * This function initializes the internal state array, + * with an array of 32-bit integers used as the seeds + * @param init_key the array of 32-bit integers, used as a seed. + * @param key_length the length of init_key. + */ +sfmt_t *init_by_array(uint32_t *init_key, int key_length) { + void *p; + sfmt_t *ctx; + int i, j, count; + uint32_t r; + int lag; + int mid; + int size = N * 4; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); + if (key_length + 1 > N32) { + count = key_length + 1; + } else { + count = N32; + } + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + ^ psfmt32[idxof(N32 - 1)]); + psfmt32[idxof(mid)] += r; + r += key_length; + psfmt32[idxof(mid + lag)] += r; + psfmt32[idxof(0)] = r; + + count--; + for (i = 1, j = 0; (j < count) && (j < key_length); j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += init_key[j] + i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (; j < count; j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (j = 0; j < N32; j++) { + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + + psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] ^= r; + r -= i; + psfmt32[idxof((i + mid + lag) % N32)] ^= r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +void fini_gen_rand(sfmt_t *ctx) { + assert(ctx != NULL); + + ctx->initialized = 0; + free(ctx); +} diff --git a/memory/jemalloc/src/test/src/btalloc.c b/memory/jemalloc/src/test/src/btalloc.c new file mode 100644 index 000000000..9a253d978 --- /dev/null +++ b/memory/jemalloc/src/test/src/btalloc.c @@ -0,0 +1,8 @@ +#include "test/jemalloc_test.h" + +void * +btalloc(size_t size, unsigned bits) +{ + + return (btalloc_0(size, bits)); +} diff --git a/memory/jemalloc/src/test/src/btalloc_0.c b/memory/jemalloc/src/test/src/btalloc_0.c new file mode 100644 index 000000000..77d8904ea --- /dev/null +++ b/memory/jemalloc/src/test/src/btalloc_0.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(0) diff --git a/memory/jemalloc/src/test/src/btalloc_1.c b/memory/jemalloc/src/test/src/btalloc_1.c new file mode 100644 index 000000000..4c126c309 --- /dev/null +++ b/memory/jemalloc/src/test/src/btalloc_1.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(1) diff --git a/memory/jemalloc/src/test/src/math.c b/memory/jemalloc/src/test/src/math.c new file mode 100644 index 000000000..887a36390 --- /dev/null +++ b/memory/jemalloc/src/test/src/math.c @@ -0,0 +1,2 @@ +#define MATH_C_ +#include "test/jemalloc_test.h" diff --git a/memory/jemalloc/src/test/src/mq.c b/memory/jemalloc/src/test/src/mq.c new file mode 100644 index 000000000..40b31c15c --- /dev/null +++ b/memory/jemalloc/src/test/src/mq.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +/* + * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep + * time is guaranteed. + */ +void +mq_nanosleep(unsigned ns) +{ + + assert(ns <= 1000*1000*1000); + +#ifdef _WIN32 + Sleep(ns / 1000); +#else + { + struct timespec timeout; + + if (ns < 1000*1000*1000) { + timeout.tv_sec = 0; + timeout.tv_nsec = ns; + } else { + timeout.tv_sec = 1; + timeout.tv_nsec = 0; + } + nanosleep(&timeout, NULL); + } +#endif +} diff --git a/memory/jemalloc/src/test/src/mtx.c b/memory/jemalloc/src/test/src/mtx.c new file mode 100644 index 000000000..8a5dfdd99 --- /dev/null +++ b/memory/jemalloc/src/test/src/mtx.c @@ -0,0 +1,73 @@ +#include "test/jemalloc_test.h" + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +bool +mtx_init(mtx_t *mtx) +{ + +#ifdef _WIN32 + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) + return (true); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mtx->lock = OS_UNFAIR_LOCK_INIT; +#elif (defined(JEMALLOC_OSSPIN)) + mtx->lock = 0; +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); + if (pthread_mutex_init(&mtx->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); +#endif + return (false); +} + +void +mtx_fini(mtx_t *mtx) +{ + +#ifdef _WIN32 +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +#elif (defined(JEMALLOC_OSSPIN)) +#else + pthread_mutex_destroy(&mtx->lock); +#endif +} + +void +mtx_lock(mtx_t *mtx) +{ + +#ifdef _WIN32 + EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mtx->lock); +#else + pthread_mutex_lock(&mtx->lock); +#endif +} + +void +mtx_unlock(mtx_t *mtx) +{ + +#ifdef _WIN32 + LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mtx->lock); +#else + pthread_mutex_unlock(&mtx->lock); +#endif +} diff --git a/memory/jemalloc/src/test/src/test.c b/memory/jemalloc/src/test/src/test.c new file mode 100644 index 000000000..d70cc7501 --- /dev/null +++ b/memory/jemalloc/src/test/src/test.c @@ -0,0 +1,133 @@ +#include "test/jemalloc_test.h" + +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char * test_name = ""; + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_skip(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_skip; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_fail(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} + +static const char * +test_status_string(test_status_t test_status) +{ + + switch (test_status) { + case test_status_pass: return "pass"; + case test_status_skip: return "skip"; + case test_status_fail: return "fail"; + default: not_reached(); + } +} + +void +p_test_init(const char *name) +{ + + test_count++; + test_status = test_status_pass; + test_name = name; +} + +void +p_test_fini(void) +{ + + test_counts[test_status]++; + malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); +} + +static test_status_t +p_test_impl(bool do_malloc_init, test_t *t, va_list ap) +{ + test_status_t ret; + + if (do_malloc_init) { + /* + * Make sure initialization occurs prior to running tests. + * Tests are special because they may use internal facilities + * prior to triggering initialization as a side effect of + * calling into the public API. + */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return (test_status_fail); + } + } + + ret = test_status_pass; + for (; t != NULL; t = va_arg(ap, test_t *)) { + t(); + if (test_status > ret) + ret = test_status; + } + + malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", + test_status_string(test_status_pass), + test_counts[test_status_pass], test_count, + test_status_string(test_status_skip), + test_counts[test_status_skip], test_count, + test_status_string(test_status_fail), + test_counts[test_status_fail], test_count); + + return (ret); +} + +test_status_t +p_test(test_t *t, ...) +{ + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, t, ap); + va_end(ap); + + return (ret); +} + +test_status_t +p_test_no_malloc_init(test_t *t, ...) +{ + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(false, t, ap); + va_end(ap); + + return (ret); +} + +void +p_test_fail(const char *prefix, const char *message) +{ + + malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); + test_status = test_status_fail; +} diff --git a/memory/jemalloc/src/test/src/thd.c b/memory/jemalloc/src/test/src/thd.c new file mode 100644 index 000000000..c9d006586 --- /dev/null +++ b/memory/jemalloc/src/test/src/thd.c @@ -0,0 +1,39 @@ +#include "test/jemalloc_test.h" + +#ifdef _WIN32 +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; + *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); + if (*thd == NULL) + test_fail("Error in CreateThread()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { + DWORD exit_code; + GetExitCodeThread(thd, (LPDWORD) &exit_code); + *ret = (void *)(uintptr_t)exit_code; + } +} + +#else +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + + if (pthread_create(thd, NULL, proc, arg) != 0) + test_fail("Error in pthread_create()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + pthread_join(thd, ret); +} +#endif diff --git a/memory/jemalloc/src/test/src/timer.c b/memory/jemalloc/src/test/src/timer.c new file mode 100644 index 000000000..3c7e63a26 --- /dev/null +++ b/memory/jemalloc/src/test/src/timer.c @@ -0,0 +1,60 @@ +#include "test/jemalloc_test.h" + +void +timer_start(timedelta_t *timer) +{ + + nstime_init(&timer->t0, 0); + nstime_update(&timer->t0); +} + +void +timer_stop(timedelta_t *timer) +{ + + nstime_copy(&timer->t1, &timer->t0); + nstime_update(&timer->t1); +} + +uint64_t +timer_usec(const timedelta_t *timer) +{ + nstime_t delta; + + nstime_copy(&delta, &timer->t1); + nstime_subtract(&delta, &timer->t0); + return (nstime_ns(&delta) / 1000); +} + +void +timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) +{ + uint64_t t0 = timer_usec(a); + uint64_t t1 = timer_usec(b); + uint64_t mult; + size_t i = 0; + size_t j, n; + + /* Whole. */ + n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); + i += n; + if (i >= buflen) + return; + mult = 1; + for (j = 0; j < n; j++) + mult *= 10; + + /* Decimal. */ + n = malloc_snprintf(&buf[i], buflen-i, "."); + i += n; + + /* Fraction. */ + while (i < buflen-1) { + uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 + >= 5)) ? 1 : 0; + n = malloc_snprintf(&buf[i], buflen-i, + "%"FMTu64, (t0 * mult / t1) % 10 + round); + i += n; + mult *= 10; + } +} diff --git a/memory/jemalloc/src/test/stress/microbench.c b/memory/jemalloc/src/test/stress/microbench.c new file mode 100644 index 000000000..7dc45f89c --- /dev/null +++ b/memory/jemalloc/src/test/stress/microbench.c @@ -0,0 +1,182 @@ +#include "test/jemalloc_test.h" + +JEMALLOC_INLINE_C void +time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, + void (*func)(void)) +{ + uint64_t i; + + for (i = 0; i < nwarmup; i++) + func(); + timer_start(timer); + for (i = 0; i < niter; i++) + func(); + timer_stop(timer); +} + +void +compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, + void (*func_a), const char *name_b, void (*func_b)) +{ + timedelta_t timer_a, timer_b; + char ratio_buf[6]; + void *p; + + p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + + time_func(&timer_a, nwarmup, niter, func_a); + time_func(&timer_b, nwarmup, niter, func_b); + + timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); + malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " + "%s=%"FMTu64"us, ratio=1:%s\n", + niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), + ratio_buf); + + dallocx(p, 0); +} + +static void +malloc_free(void) +{ + /* The compiler can optimize away free(malloc(1))! */ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + free(p); +} + +static void +mallocx_free(void) +{ + void *p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + free(p); +} + +TEST_BEGIN(test_malloc_vs_mallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "malloc", + malloc_free, "mallocx", mallocx_free); +} +TEST_END + +static void +malloc_dallocx(void) +{ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + dallocx(p, 0); +} + +static void +malloc_sdallocx(void) +{ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + sdallocx(p, 1, 0); +} + +TEST_BEGIN(test_free_vs_dallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, + "dallocx", malloc_dallocx); +} +TEST_END + +TEST_BEGIN(test_dallocx_vs_sdallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, + "sdallocx", malloc_sdallocx); +} +TEST_END + +static void +malloc_mus_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + malloc_usable_size(p); + free(p); +} + +static void +malloc_sallocx_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (sallocx(p, 0) < 1) + test_fail("Unexpected sallocx() failure"); + free(p); +} + +TEST_BEGIN(test_mus_vs_sallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", + malloc_mus_free, "sallocx", malloc_sallocx_free); +} +TEST_END + +static void +malloc_nallocx_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (nallocx(1, 0) < 1) + test_fail("Unexpected nallocx() failure"); + free(p); +} + +TEST_BEGIN(test_sallocx_vs_nallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", + malloc_sallocx_free, "nallocx", malloc_nallocx_free); +} +TEST_END + +int +main(void) +{ + + return (test( + test_malloc_vs_mallocx, + test_free_vs_dallocx, + test_dallocx_vs_sdallocx, + test_mus_vs_sallocx, + test_sallocx_vs_nallocx)); +} diff --git a/memory/jemalloc/src/test/test.sh.in b/memory/jemalloc/src/test/test.sh.in new file mode 100644 index 000000000..a39f99f6b --- /dev/null +++ b/memory/jemalloc/src/test/test.sh.in @@ -0,0 +1,53 @@ +#!/bin/sh + +case @abi@ in + macho) + export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" + ;; + pecoff) + export PATH="${PATH}:@objroot@lib" + ;; + *) + ;; +esac + +# Corresponds to test_status_t. +pass_code=0 +skip_code=1 +fail_code=2 + +pass_count=0 +skip_count=0 +fail_count=0 +for t in $@; do + if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then + echo + fi + echo "=== ${t} ===" + ${t}@exe@ @abs_srcroot@ @abs_objroot@ + result_code=$? + case ${result_code} in + ${pass_code}) + pass_count=$((pass_count+1)) + ;; + ${skip_code}) + skip_count=$((skip_count+1)) + ;; + ${fail_code}) + fail_count=$((fail_count+1)) + ;; + *) + echo "Test harness error" 1>&2 + exit 1 + esac +done + +total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` +echo +echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" + +if [ ${fail_count} -eq 0 ] ; then + exit 0 +else + exit 1 +fi diff --git a/memory/jemalloc/src/test/unit/SFMT.c b/memory/jemalloc/src/test/unit/SFMT.c new file mode 100644 index 000000000..ba4be8702 --- /dev/null +++ b/memory/jemalloc/src/test/unit/SFMT.c @@ -0,0 +1,1605 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "test/jemalloc_test.h" + +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 + +static const uint32_t init_gen_rand_32_expected[] = { + 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, + 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, + 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, + 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, + 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, + 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, + 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, + 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, + 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, + 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, + 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, + 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, + 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, + 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, + 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, + 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, + 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, + 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, + 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, + 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, + 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, + 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, + 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, + 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, + 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, + 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, + 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, + 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, + 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, + 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, + 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, + 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, + 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, + 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, + 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, + 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, + 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, + 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, + 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, + 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, + 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, + 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, + 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, + 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, + 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, + 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, + 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, + 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, + 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, + 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, + 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, + 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, + 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, + 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, + 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, + 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, + 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, + 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, + 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, + 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, + 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, + 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, + 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, + 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, + 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, + 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, + 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, + 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, + 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, + 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, + 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, + 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, + 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, + 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, + 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, + 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, + 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, + 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, + 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, + 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, + 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, + 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, + 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, + 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, + 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, + 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, + 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, + 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, + 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, + 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, + 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, + 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, + 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, + 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, + 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, + 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, + 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, + 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, + 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, + 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, + 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, + 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, + 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, + 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, + 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, + 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, + 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, + 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, + 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, + 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, + 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, + 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, + 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, + 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, + 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, + 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, + 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, + 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, + 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, + 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, + 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, + 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, + 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, + 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, + 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, + 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, + 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, + 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, + 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, + 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, + 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, + 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, + 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, + 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, + 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, + 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, + 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, + 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, + 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, + 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, + 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, + 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, + 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, + 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, + 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, + 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, + 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, + 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, + 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, + 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, + 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, + 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, + 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, + 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, + 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, + 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, + 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, + 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, + 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, + 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, + 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, + 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, + 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, + 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, + 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, + 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, + 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, + 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, + 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, + 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, + 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, + 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, + 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, + 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, + 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, + 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, + 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, + 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, + 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, + 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, + 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, + 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, + 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, + 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, + 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, + 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, + 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, + 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, + 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, + 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, + 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, + 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, + 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, + 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, + 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, + 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, + 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, + 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, + 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, + 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U +}; +static const uint32_t init_by_array_32_expected[] = { + 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, + 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, + 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, + 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, + 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, + 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, + 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, + 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, + 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, + 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, + 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, + 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, + 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, + 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, + 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, + 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, + 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, + 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, + 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, + 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, + 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, + 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, + 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, + 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, + 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, + 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, + 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, + 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, + 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, + 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, + 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, + 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, + 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, + 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, + 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, + 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, + 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, + 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, + 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, + 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, + 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, + 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, + 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, + 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, + 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, + 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, + 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, + 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, + 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, + 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, + 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, + 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, + 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, + 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, + 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, + 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, + 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, + 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, + 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, + 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, + 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, + 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, + 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, + 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, + 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, + 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, + 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, + 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, + 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, + 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, + 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, + 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, + 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, + 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, + 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, + 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, + 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, + 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, + 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, + 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, + 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, + 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, + 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, + 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, + 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, + 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, + 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, + 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, + 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, + 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, + 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, + 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, + 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, + 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, + 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, + 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, + 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, + 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, + 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, + 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, + 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, + 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, + 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, + 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, + 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, + 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, + 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, + 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, + 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, + 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, + 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, + 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, + 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, + 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, + 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, + 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, + 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, + 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, + 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, + 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, + 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, + 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, + 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, + 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, + 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, + 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, + 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, + 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, + 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, + 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, + 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, + 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, + 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, + 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, + 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, + 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, + 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, + 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, + 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, + 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, + 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, + 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, + 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, + 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, + 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, + 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, + 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, + 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, + 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, + 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, + 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, + 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, + 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, + 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, + 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, + 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, + 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, + 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, + 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, + 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, + 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, + 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, + 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, + 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, + 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, + 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, + 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, + 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, + 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, + 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, + 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, + 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, + 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, + 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, + 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, + 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, + 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, + 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, + 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, + 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, + 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, + 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, + 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, + 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, + 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, + 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, + 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, + 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, + 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, + 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, + 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, + 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, + 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, + 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, + 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, + 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, + 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, + 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, + 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, + 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U +}; +static const uint64_t init_gen_rand_64_expected[] = { + KQU(16924766246869039260), KQU( 8201438687333352714), + KQU( 2265290287015001750), KQU(18397264611805473832), + KQU( 3375255223302384358), KQU( 6345559975416828796), + KQU(18229739242790328073), KQU( 7596792742098800905), + KQU( 255338647169685981), KQU( 2052747240048610300), + KQU(18328151576097299343), KQU(12472905421133796567), + KQU(11315245349717600863), KQU(16594110197775871209), + KQU(15708751964632456450), KQU(10452031272054632535), + KQU(11097646720811454386), KQU( 4556090668445745441), + KQU(17116187693090663106), KQU(14931526836144510645), + KQU( 9190752218020552591), KQU( 9625800285771901401), + KQU(13995141077659972832), KQU( 5194209094927829625), + KQU( 4156788379151063303), KQU( 8523452593770139494), + KQU(14082382103049296727), KQU( 2462601863986088483), + KQU( 3030583461592840678), KQU( 5221622077872827681), + KQU( 3084210671228981236), KQU(13956758381389953823), + KQU(13503889856213423831), KQU(15696904024189836170), + KQU( 4612584152877036206), KQU( 6231135538447867881), + KQU(10172457294158869468), KQU( 6452258628466708150), + KQU(14044432824917330221), KQU( 370168364480044279), + KQU(10102144686427193359), KQU( 667870489994776076), + KQU( 2732271956925885858), KQU(18027788905977284151), + KQU(15009842788582923859), KQU( 7136357960180199542), + KQU(15901736243475578127), KQU(16951293785352615701), + KQU(10551492125243691632), KQU(17668869969146434804), + KQU(13646002971174390445), KQU( 9804471050759613248), + KQU( 5511670439655935493), KQU(18103342091070400926), + KQU(17224512747665137533), KQU(15534627482992618168), + KQU( 1423813266186582647), KQU(15821176807932930024), + KQU( 30323369733607156), KQU(11599382494723479403), + KQU( 653856076586810062), KQU( 3176437395144899659), + KQU(14028076268147963917), KQU(16156398271809666195), + KQU( 3166955484848201676), KQU( 5746805620136919390), + KQU(17297845208891256593), KQU(11691653183226428483), + KQU(17900026146506981577), KQU(15387382115755971042), + KQU(16923567681040845943), KQU( 8039057517199388606), + KQU(11748409241468629263), KQU( 794358245539076095), + KQU(13438501964693401242), KQU(14036803236515618962), + KQU( 5252311215205424721), KQU(17806589612915509081), + KQU( 6802767092397596006), KQU(14212120431184557140), + KQU( 1072951366761385712), KQU(13098491780722836296), + KQU( 9466676828710797353), KQU(12673056849042830081), + KQU(12763726623645357580), KQU(16468961652999309493), + KQU(15305979875636438926), KQU(17444713151223449734), + KQU( 5692214267627883674), KQU(13049589139196151505), + KQU( 880115207831670745), KQU( 1776529075789695498), + KQU(16695225897801466485), KQU(10666901778795346845), + KQU( 6164389346722833869), KQU( 2863817793264300475), + KQU( 9464049921886304754), KQU( 3993566636740015468), + KQU( 9983749692528514136), KQU(16375286075057755211), + KQU(16042643417005440820), KQU(11445419662923489877), + KQU( 7999038846885158836), KQU( 6721913661721511535), + KQU( 5363052654139357320), KQU( 1817788761173584205), + KQU(13290974386445856444), KQU( 4650350818937984680), + KQU( 8219183528102484836), KQU( 1569862923500819899), + KQU( 4189359732136641860), KQU(14202822961683148583), + KQU( 4457498315309429058), KQU(13089067387019074834), + KQU(11075517153328927293), KQU(10277016248336668389), + KQU( 7070509725324401122), KQU(17808892017780289380), + KQU(13143367339909287349), KQU( 1377743745360085151), + KQU( 5749341807421286485), KQU(14832814616770931325), + KQU( 7688820635324359492), KQU(10960474011539770045), + KQU( 81970066653179790), KQU(12619476072607878022), + KQU( 4419566616271201744), KQU(15147917311750568503), + KQU( 5549739182852706345), KQU( 7308198397975204770), + KQU(13580425496671289278), KQU(17070764785210130301), + KQU( 8202832846285604405), KQU( 6873046287640887249), + KQU( 6927424434308206114), KQU( 6139014645937224874), + KQU(10290373645978487639), KQU(15904261291701523804), + KQU( 9628743442057826883), KQU(18383429096255546714), + KQU( 4977413265753686967), KQU( 7714317492425012869), + KQU( 9025232586309926193), KQU(14627338359776709107), + KQU(14759849896467790763), KQU(10931129435864423252), + KQU( 4588456988775014359), KQU(10699388531797056724), + KQU( 468652268869238792), KQU( 5755943035328078086), + KQU( 2102437379988580216), KQU( 9986312786506674028), + KQU( 2654207180040945604), KQU( 8726634790559960062), + KQU( 100497234871808137), KQU( 2800137176951425819), + KQU( 6076627612918553487), KQU( 5780186919186152796), + KQU( 8179183595769929098), KQU( 6009426283716221169), + KQU( 2796662551397449358), KQU( 1756961367041986764), + KQU( 6972897917355606205), KQU(14524774345368968243), + KQU( 2773529684745706940), KQU( 4853632376213075959), + KQU( 4198177923731358102), KQU( 8271224913084139776), + KQU( 2741753121611092226), KQU(16782366145996731181), + KQU(15426125238972640790), KQU(13595497100671260342), + KQU( 3173531022836259898), KQU( 6573264560319511662), + KQU(18041111951511157441), KQU( 2351433581833135952), + KQU( 3113255578908173487), KQU( 1739371330877858784), + KQU(16046126562789165480), KQU( 8072101652214192925), + KQU(15267091584090664910), KQU( 9309579200403648940), + KQU( 5218892439752408722), KQU(14492477246004337115), + KQU(17431037586679770619), KQU( 7385248135963250480), + KQU( 9580144956565560660), KQU( 4919546228040008720), + KQU(15261542469145035584), KQU(18233297270822253102), + KQU( 5453248417992302857), KQU( 9309519155931460285), + KQU(10342813012345291756), KQU(15676085186784762381), + KQU(15912092950691300645), KQU( 9371053121499003195), + KQU( 9897186478226866746), KQU(14061858287188196327), + KQU( 122575971620788119), KQU(12146750969116317754), + KQU( 4438317272813245201), KQU( 8332576791009527119), + KQU(13907785691786542057), KQU(10374194887283287467), + KQU( 2098798755649059566), KQU( 3416235197748288894), + KQU( 8688269957320773484), KQU( 7503964602397371571), + KQU(16724977015147478236), KQU( 9461512855439858184), + KQU(13259049744534534727), KQU( 3583094952542899294), + KQU( 8764245731305528292), KQU(13240823595462088985), + KQU(13716141617617910448), KQU(18114969519935960955), + KQU( 2297553615798302206), KQU( 4585521442944663362), + KQU(17776858680630198686), KQU( 4685873229192163363), + KQU( 152558080671135627), KQU(15424900540842670088), + KQU(13229630297130024108), KQU(17530268788245718717), + KQU(16675633913065714144), KQU( 3158912717897568068), + KQU(15399132185380087288), KQU( 7401418744515677872), + KQU(13135412922344398535), KQU( 6385314346100509511), + KQU(13962867001134161139), KQU(10272780155442671999), + KQU(12894856086597769142), KQU(13340877795287554994), + KQU(12913630602094607396), KQU(12543167911119793857), + KQU(17343570372251873096), KQU(10959487764494150545), + KQU( 6966737953093821128), KQU(13780699135496988601), + KQU( 4405070719380142046), KQU(14923788365607284982), + KQU( 2869487678905148380), KQU( 6416272754197188403), + KQU(15017380475943612591), KQU( 1995636220918429487), + KQU( 3402016804620122716), KQU(15800188663407057080), + KQU(11362369990390932882), KQU(15262183501637986147), + KQU(10239175385387371494), KQU( 9352042420365748334), + KQU( 1682457034285119875), KQU( 1724710651376289644), + KQU( 2038157098893817966), KQU( 9897825558324608773), + KQU( 1477666236519164736), KQU(16835397314511233640), + KQU(10370866327005346508), KQU(10157504370660621982), + KQU(12113904045335882069), KQU(13326444439742783008), + KQU(11302769043000765804), KQU(13594979923955228484), + KQU(11779351762613475968), KQU( 3786101619539298383), + KQU( 8021122969180846063), KQU(15745904401162500495), + KQU(10762168465993897267), KQU(13552058957896319026), + KQU(11200228655252462013), KQU( 5035370357337441226), + KQU( 7593918984545500013), KQU( 5418554918361528700), + KQU( 4858270799405446371), KQU( 9974659566876282544), + KQU(18227595922273957859), KQU( 2772778443635656220), + KQU(14285143053182085385), KQU( 9939700992429600469), + KQU(12756185904545598068), KQU( 2020783375367345262), + KQU( 57026775058331227), KQU( 950827867930065454), + KQU( 6602279670145371217), KQU( 2291171535443566929), + KQU( 5832380724425010313), KQU( 1220343904715982285), + KQU(17045542598598037633), KQU(15460481779702820971), + KQU(13948388779949365130), KQU(13975040175430829518), + KQU(17477538238425541763), KQU(11104663041851745725), + KQU(15860992957141157587), KQU(14529434633012950138), + KQU( 2504838019075394203), KQU( 7512113882611121886), + KQU( 4859973559980886617), KQU( 1258601555703250219), + KQU(15594548157514316394), KQU( 4516730171963773048), + KQU(11380103193905031983), KQU( 6809282239982353344), + KQU(18045256930420065002), KQU( 2453702683108791859), + KQU( 977214582986981460), KQU( 2006410402232713466), + KQU( 6192236267216378358), KQU( 3429468402195675253), + KQU(18146933153017348921), KQU(17369978576367231139), + KQU( 1246940717230386603), KQU(11335758870083327110), + KQU(14166488801730353682), KQU( 9008573127269635732), + KQU(10776025389820643815), KQU(15087605441903942962), + KQU( 1359542462712147922), KQU(13898874411226454206), + KQU(17911176066536804411), KQU( 9435590428600085274), + KQU( 294488509967864007), KQU( 8890111397567922046), + KQU( 7987823476034328778), KQU(13263827582440967651), + KQU( 7503774813106751573), KQU(14974747296185646837), + KQU( 8504765037032103375), KQU(17340303357444536213), + KQU( 7704610912964485743), KQU( 8107533670327205061), + KQU( 9062969835083315985), KQU(16968963142126734184), + KQU(12958041214190810180), KQU( 2720170147759570200), + KQU( 2986358963942189566), KQU(14884226322219356580), + KQU( 286224325144368520), KQU(11313800433154279797), + KQU(18366849528439673248), KQU(17899725929482368789), + KQU( 3730004284609106799), KQU( 1654474302052767205), + KQU( 5006698007047077032), KQU( 8196893913601182838), + KQU(15214541774425211640), KQU(17391346045606626073), + KQU( 8369003584076969089), KQU( 3939046733368550293), + KQU(10178639720308707785), KQU( 2180248669304388697), + KQU( 62894391300126322), KQU( 9205708961736223191), + KQU( 6837431058165360438), KQU( 3150743890848308214), + KQU(17849330658111464583), KQU(12214815643135450865), + KQU(13410713840519603402), KQU( 3200778126692046802), + KQU(13354780043041779313), KQU( 800850022756886036), + KQU(15660052933953067433), KQU( 6572823544154375676), + KQU(11030281857015819266), KQU(12682241941471433835), + KQU(11654136407300274693), KQU( 4517795492388641109), + KQU( 9757017371504524244), KQU(17833043400781889277), + KQU(12685085201747792227), KQU(10408057728835019573), + KQU( 98370418513455221), KQU( 6732663555696848598), + KQU(13248530959948529780), KQU( 3530441401230622826), + KQU(18188251992895660615), KQU( 1847918354186383756), + KQU( 1127392190402660921), KQU(11293734643143819463), + KQU( 3015506344578682982), KQU(13852645444071153329), + KQU( 2121359659091349142), KQU( 1294604376116677694), + KQU( 5616576231286352318), KQU( 7112502442954235625), + KQU(11676228199551561689), KQU(12925182803007305359), + KQU( 7852375518160493082), KQU( 1136513130539296154), + KQU( 5636923900916593195), KQU( 3221077517612607747), + KQU(17784790465798152513), KQU( 3554210049056995938), + KQU(17476839685878225874), KQU( 3206836372585575732), + KQU( 2765333945644823430), KQU(10080070903718799528), + KQU( 5412370818878286353), KQU( 9689685887726257728), + KQU( 8236117509123533998), KQU( 1951139137165040214), + KQU( 4492205209227980349), KQU(16541291230861602967), + KQU( 1424371548301437940), KQU( 9117562079669206794), + KQU(14374681563251691625), KQU(13873164030199921303), + KQU( 6680317946770936731), KQU(15586334026918276214), + KQU(10896213950976109802), KQU( 9506261949596413689), + KQU( 9903949574308040616), KQU( 6038397344557204470), + KQU( 174601465422373648), KQU(15946141191338238030), + KQU(17142225620992044937), KQU( 7552030283784477064), + KQU( 2947372384532947997), KQU( 510797021688197711), + KQU( 4962499439249363461), KQU( 23770320158385357), + KQU( 959774499105138124), KQU( 1468396011518788276), + KQU( 2015698006852312308), KQU( 4149400718489980136), + KQU( 5992916099522371188), KQU(10819182935265531076), + KQU(16189787999192351131), KQU( 342833961790261950), + KQU(12470830319550495336), KQU(18128495041912812501), + KQU( 1193600899723524337), KQU( 9056793666590079770), + KQU( 2154021227041669041), KQU( 4963570213951235735), + KQU( 4865075960209211409), KQU( 2097724599039942963), + KQU( 2024080278583179845), KQU(11527054549196576736), + KQU(10650256084182390252), KQU( 4808408648695766755), + KQU( 1642839215013788844), KQU(10607187948250398390), + KQU( 7076868166085913508), KQU( 730522571106887032), + KQU(12500579240208524895), KQU( 4484390097311355324), + KQU(15145801330700623870), KQU( 8055827661392944028), + KQU( 5865092976832712268), KQU(15159212508053625143), + KQU( 3560964582876483341), KQU( 4070052741344438280), + KQU( 6032585709886855634), KQU(15643262320904604873), + KQU( 2565119772293371111), KQU( 318314293065348260), + KQU(15047458749141511872), KQU( 7772788389811528730), + KQU( 7081187494343801976), KQU( 6465136009467253947), + KQU(10425940692543362069), KQU( 554608190318339115), + KQU(14796699860302125214), KQU( 1638153134431111443), + KQU(10336967447052276248), KQU( 8412308070396592958), + KQU( 4004557277152051226), KQU( 8143598997278774834), + KQU(16413323996508783221), KQU(13139418758033994949), + KQU( 9772709138335006667), KQU( 2818167159287157659), + KQU(17091740573832523669), KQU(14629199013130751608), + KQU(18268322711500338185), KQU( 8290963415675493063), + KQU( 8830864907452542588), KQU( 1614839084637494849), + KQU(14855358500870422231), KQU( 3472996748392519937), + KQU(15317151166268877716), KQU( 5825895018698400362), + KQU(16730208429367544129), KQU(10481156578141202800), + KQU( 4746166512382823750), KQU(12720876014472464998), + KQU( 8825177124486735972), KQU(13733447296837467838), + KQU( 6412293741681359625), KQU( 8313213138756135033), + KQU(11421481194803712517), KQU( 7997007691544174032), + KQU( 6812963847917605930), KQU( 9683091901227558641), + KQU(14703594165860324713), KQU( 1775476144519618309), + KQU( 2724283288516469519), KQU( 717642555185856868), + KQU( 8736402192215092346), KQU(11878800336431381021), + KQU( 4348816066017061293), KQU( 6115112756583631307), + KQU( 9176597239667142976), KQU(12615622714894259204), + KQU(10283406711301385987), KQU( 5111762509485379420), + KQU( 3118290051198688449), KQU( 7345123071632232145), + KQU( 9176423451688682359), KQU( 4843865456157868971), + KQU(12008036363752566088), KQU(12058837181919397720), + KQU( 2145073958457347366), KQU( 1526504881672818067), + KQU( 3488830105567134848), KQU(13208362960674805143), + KQU( 4077549672899572192), KQU( 7770995684693818365), + KQU( 1398532341546313593), KQU(12711859908703927840), + KQU( 1417561172594446813), KQU(17045191024194170604), + KQU( 4101933177604931713), KQU(14708428834203480320), + KQU(17447509264469407724), KQU(14314821973983434255), + KQU(17990472271061617265), KQU( 5087756685841673942), + KQU(12797820586893859939), KQU( 1778128952671092879), + KQU( 3535918530508665898), KQU( 9035729701042481301), + KQU(14808661568277079962), KQU(14587345077537747914), + KQU(11920080002323122708), KQU( 6426515805197278753), + KQU( 3295612216725984831), KQU(11040722532100876120), + KQU(12305952936387598754), KQU(16097391899742004253), + KQU( 4908537335606182208), KQU(12446674552196795504), + KQU(16010497855816895177), KQU( 9194378874788615551), + KQU( 3382957529567613384), KQU( 5154647600754974077), + KQU( 9801822865328396141), KQU( 9023662173919288143), + KQU(17623115353825147868), KQU( 8238115767443015816), + KQU(15811444159859002560), KQU( 9085612528904059661), + KQU( 6888601089398614254), KQU( 258252992894160189), + KQU( 6704363880792428622), KQU( 6114966032147235763), + KQU(11075393882690261875), KQU( 8797664238933620407), + KQU( 5901892006476726920), KQU( 5309780159285518958), + KQU(14940808387240817367), KQU(14642032021449656698), + KQU( 9808256672068504139), KQU( 3670135111380607658), + KQU(11211211097845960152), KQU( 1474304506716695808), + KQU(15843166204506876239), KQU( 7661051252471780561), + KQU(10170905502249418476), KQU( 7801416045582028589), + KQU( 2763981484737053050), KQU( 9491377905499253054), + KQU(16201395896336915095), KQU( 9256513756442782198), + KQU( 5411283157972456034), KQU( 5059433122288321676), + KQU( 4327408006721123357), KQU( 9278544078834433377), + KQU( 7601527110882281612), KQU(11848295896975505251), + KQU(12096998801094735560), KQU(14773480339823506413), + KQU(15586227433895802149), KQU(12786541257830242872), + KQU( 6904692985140503067), KQU( 5309011515263103959), + KQU(12105257191179371066), KQU(14654380212442225037), + KQU( 2556774974190695009), KQU( 4461297399927600261), + KQU(14888225660915118646), KQU(14915459341148291824), + KQU( 2738802166252327631), KQU( 6047155789239131512), + KQU(12920545353217010338), KQU(10697617257007840205), + KQU( 2751585253158203504), KQU(13252729159780047496), + KQU(14700326134672815469), KQU(14082527904374600529), + KQU(16852962273496542070), KQU(17446675504235853907), + KQU(15019600398527572311), KQU(12312781346344081551), + KQU(14524667935039810450), KQU( 5634005663377195738), + KQU(11375574739525000569), KQU( 2423665396433260040), + KQU( 5222836914796015410), KQU( 4397666386492647387), + KQU( 4619294441691707638), KQU( 665088602354770716), + KQU(13246495665281593610), KQU( 6564144270549729409), + KQU(10223216188145661688), KQU( 3961556907299230585), + KQU(11543262515492439914), KQU(16118031437285993790), + KQU( 7143417964520166465), KQU(13295053515909486772), + KQU( 40434666004899675), KQU(17127804194038347164), + KQU( 8599165966560586269), KQU( 8214016749011284903), + KQU(13725130352140465239), KQU( 5467254474431726291), + KQU( 7748584297438219877), KQU(16933551114829772472), + KQU( 2169618439506799400), KQU( 2169787627665113463), + KQU(17314493571267943764), KQU(18053575102911354912), + KQU(11928303275378476973), KQU(11593850925061715550), + KQU(17782269923473589362), KQU( 3280235307704747039), + KQU( 6145343578598685149), KQU(17080117031114086090), + KQU(18066839902983594755), KQU( 6517508430331020706), + KQU( 8092908893950411541), KQU(12558378233386153732), + KQU( 4476532167973132976), KQU(16081642430367025016), + KQU( 4233154094369139361), KQU( 8693630486693161027), + KQU(11244959343027742285), KQU(12273503967768513508), + KQU(14108978636385284876), KQU( 7242414665378826984), + KQU( 6561316938846562432), KQU( 8601038474994665795), + KQU(17532942353612365904), KQU(17940076637020912186), + KQU( 7340260368823171304), KQU( 7061807613916067905), + KQU(10561734935039519326), KQU(17990796503724650862), + KQU( 6208732943911827159), KQU( 359077562804090617), + KQU(14177751537784403113), KQU(10659599444915362902), + KQU(15081727220615085833), KQU(13417573895659757486), + KQU(15513842342017811524), KQU(11814141516204288231), + KQU( 1827312513875101814), KQU( 2804611699894603103), + KQU(17116500469975602763), KQU(12270191815211952087), + KQU(12256358467786024988), KQU(18435021722453971267), + KQU( 671330264390865618), KQU( 476504300460286050), + KQU(16465470901027093441), KQU( 4047724406247136402), + KQU( 1322305451411883346), KQU( 1388308688834322280), + KQU( 7303989085269758176), KQU( 9323792664765233642), + KQU( 4542762575316368936), KQU(17342696132794337618), + KQU( 4588025054768498379), KQU(13415475057390330804), + KQU(17880279491733405570), KQU(10610553400618620353), + KQU( 3180842072658960139), KQU(13002966655454270120), + KQU( 1665301181064982826), KQU( 7083673946791258979), + KQU( 190522247122496820), KQU(17388280237250677740), + KQU( 8430770379923642945), KQU(12987180971921668584), + KQU( 2311086108365390642), KQU( 2870984383579822345), + KQU(14014682609164653318), KQU(14467187293062251484), + KQU( 192186361147413298), KQU(15171951713531796524), + KQU( 9900305495015948728), KQU(17958004775615466344), + KQU(14346380954498606514), KQU(18040047357617407096), + KQU( 5035237584833424532), KQU(15089555460613972287), + KQU( 4131411873749729831), KQU( 1329013581168250330), + KQU(10095353333051193949), KQU(10749518561022462716), + KQU( 9050611429810755847), KQU(15022028840236655649), + KQU( 8775554279239748298), KQU(13105754025489230502), + KQU(15471300118574167585), KQU( 89864764002355628), + KQU( 8776416323420466637), KQU( 5280258630612040891), + KQU( 2719174488591862912), KQU( 7599309137399661994), + KQU(15012887256778039979), KQU(14062981725630928925), + KQU(12038536286991689603), KQU( 7089756544681775245), + KQU(10376661532744718039), KQU( 1265198725901533130), + KQU(13807996727081142408), KQU( 2935019626765036403), + KQU( 7651672460680700141), KQU( 3644093016200370795), + KQU( 2840982578090080674), KQU(17956262740157449201), + KQU(18267979450492880548), KQU(11799503659796848070), + KQU( 9942537025669672388), KQU(11886606816406990297), + KQU( 5488594946437447576), KQU( 7226714353282744302), + KQU( 3784851653123877043), KQU( 878018453244803041), + KQU(12110022586268616085), KQU( 734072179404675123), + KQU(11869573627998248542), KQU( 469150421297783998), + KQU( 260151124912803804), KQU(11639179410120968649), + KQU( 9318165193840846253), KQU(12795671722734758075), + KQU(15318410297267253933), KQU( 691524703570062620), + KQU( 5837129010576994601), KQU(15045963859726941052), + KQU( 5850056944932238169), KQU(12017434144750943807), + KQU( 7447139064928956574), KQU( 3101711812658245019), + KQU(16052940704474982954), KQU(18195745945986994042), + KQU( 8932252132785575659), KQU(13390817488106794834), + KQU(11582771836502517453), KQU( 4964411326683611686), + KQU( 2195093981702694011), KQU(14145229538389675669), + KQU(16459605532062271798), KQU( 866316924816482864), + KQU( 4593041209937286377), KQU( 8415491391910972138), + KQU( 4171236715600528969), KQU(16637569303336782889), + KQU( 2002011073439212680), KQU(17695124661097601411), + KQU( 4627687053598611702), KQU( 7895831936020190403), + KQU( 8455951300917267802), KQU( 2923861649108534854), + KQU( 8344557563927786255), KQU( 6408671940373352556), + KQU(12210227354536675772), KQU(14294804157294222295), + KQU(10103022425071085127), KQU(10092959489504123771), + KQU( 6554774405376736268), KQU(12629917718410641774), + KQU( 6260933257596067126), KQU( 2460827021439369673), + KQU( 2541962996717103668), KQU( 597377203127351475), + KQU( 5316984203117315309), KQU( 4811211393563241961), + KQU(13119698597255811641), KQU( 8048691512862388981), + KQU(10216818971194073842), KQU( 4612229970165291764), + KQU(10000980798419974770), KQU( 6877640812402540687), + KQU( 1488727563290436992), KQU( 2227774069895697318), + KQU(11237754507523316593), KQU(13478948605382290972), + KQU( 1963583846976858124), KQU( 5512309205269276457), + KQU( 3972770164717652347), KQU( 3841751276198975037), + KQU(10283343042181903117), KQU( 8564001259792872199), + KQU(16472187244722489221), KQU( 8953493499268945921), + KQU( 3518747340357279580), KQU( 4003157546223963073), + KQU( 3270305958289814590), KQU( 3966704458129482496), + KQU( 8122141865926661939), KQU(14627734748099506653), + KQU(13064426990862560568), KQU( 2414079187889870829), + KQU( 5378461209354225306), KQU(10841985740128255566), + KQU( 538582442885401738), KQU( 7535089183482905946), + KQU(16117559957598879095), KQU( 8477890721414539741), + KQU( 1459127491209533386), KQU(17035126360733620462), + KQU( 8517668552872379126), KQU(10292151468337355014), + KQU(17081267732745344157), KQU(13751455337946087178), + KQU(14026945459523832966), KQU( 6653278775061723516), + KQU(10619085543856390441), KQU( 2196343631481122885), + KQU(10045966074702826136), KQU(10082317330452718282), + KQU( 5920859259504831242), KQU( 9951879073426540617), + KQU( 7074696649151414158), KQU(15808193543879464318), + KQU( 7385247772746953374), KQU( 3192003544283864292), + KQU(18153684490917593847), KQU(12423498260668568905), + KQU(10957758099756378169), KQU(11488762179911016040), + KQU( 2099931186465333782), KQU(11180979581250294432), + KQU( 8098916250668367933), KQU( 3529200436790763465), + KQU(12988418908674681745), KQU( 6147567275954808580), + KQU( 3207503344604030989), KQU(10761592604898615360), + KQU( 229854861031893504), KQU( 8809853962667144291), + KQU(13957364469005693860), KQU( 7634287665224495886), + KQU(12353487366976556874), KQU( 1134423796317152034), + KQU( 2088992471334107068), KQU( 7393372127190799698), + KQU( 1845367839871058391), KQU( 207922563987322884), + KQU(11960870813159944976), KQU(12182120053317317363), + KQU(17307358132571709283), KQU(13871081155552824936), + KQU(18304446751741566262), KQU( 7178705220184302849), + KQU(10929605677758824425), KQU(16446976977835806844), + KQU(13723874412159769044), KQU( 6942854352100915216), + KQU( 1726308474365729390), KQU( 2150078766445323155), + KQU(15345558947919656626), KQU(12145453828874527201), + KQU( 2054448620739726849), KQU( 2740102003352628137), + KQU(11294462163577610655), KQU( 756164283387413743), + KQU(17841144758438810880), KQU(10802406021185415861), + KQU( 8716455530476737846), KQU( 6321788834517649606), + KQU(14681322910577468426), KQU(17330043563884336387), + KQU(12701802180050071614), KQU(14695105111079727151), + KQU( 5112098511654172830), KQU( 4957505496794139973), + KQU( 8270979451952045982), KQU(12307685939199120969), + KQU(12425799408953443032), KQU( 8376410143634796588), + KQU(16621778679680060464), KQU( 3580497854566660073), + KQU( 1122515747803382416), KQU( 857664980960597599), + KQU( 6343640119895925918), KQU(12878473260854462891), + KQU(10036813920765722626), KQU(14451335468363173812), + KQU( 5476809692401102807), KQU(16442255173514366342), + KQU(13060203194757167104), KQU(14354124071243177715), + KQU(15961249405696125227), KQU(13703893649690872584), + KQU( 363907326340340064), KQU( 6247455540491754842), + KQU(12242249332757832361), KQU( 156065475679796717), + KQU( 9351116235749732355), KQU( 4590350628677701405), + KQU( 1671195940982350389), KQU(13501398458898451905), + KQU( 6526341991225002255), KQU( 1689782913778157592), + KQU( 7439222350869010334), KQU(13975150263226478308), + KQU(11411961169932682710), KQU(17204271834833847277), + KQU( 541534742544435367), KQU( 6591191931218949684), + KQU( 2645454775478232486), KQU( 4322857481256485321), + KQU( 8477416487553065110), KQU(12902505428548435048), + KQU( 971445777981341415), KQU(14995104682744976712), + KQU( 4243341648807158063), KQU( 8695061252721927661), + KQU( 5028202003270177222), KQU( 2289257340915567840), + KQU(13870416345121866007), KQU(13994481698072092233), + KQU( 6912785400753196481), KQU( 2278309315841980139), + KQU( 4329765449648304839), KQU( 5963108095785485298), + KQU( 4880024847478722478), KQU(16015608779890240947), + KQU( 1866679034261393544), KQU( 914821179919731519), + KQU( 9643404035648760131), KQU( 2418114953615593915), + KQU( 944756836073702374), KQU(15186388048737296834), + KQU( 7723355336128442206), KQU( 7500747479679599691), + KQU(18013961306453293634), KQU( 2315274808095756456), + KQU(13655308255424029566), KQU(17203800273561677098), + KQU( 1382158694422087756), KQU( 5090390250309588976), + KQU( 517170818384213989), KQU( 1612709252627729621), + KQU( 1330118955572449606), KQU( 300922478056709885), + KQU(18115693291289091987), KQU(13491407109725238321), + KQU(15293714633593827320), KQU( 5151539373053314504), + KQU( 5951523243743139207), KQU(14459112015249527975), + KQU( 5456113959000700739), KQU( 3877918438464873016), + KQU(12534071654260163555), KQU(15871678376893555041), + KQU(11005484805712025549), KQU(16353066973143374252), + KQU( 4358331472063256685), KQU( 8268349332210859288), + KQU(12485161590939658075), KQU(13955993592854471343), + KQU( 5911446886848367039), KQU(14925834086813706974), + KQU( 6590362597857994805), KQU( 1280544923533661875), + KQU( 1637756018947988164), KQU( 4734090064512686329), + KQU(16693705263131485912), KQU( 6834882340494360958), + KQU( 8120732176159658505), KQU( 2244371958905329346), + KQU(10447499707729734021), KQU( 7318742361446942194), + KQU( 8032857516355555296), KQU(14023605983059313116), + KQU( 1032336061815461376), KQU( 9840995337876562612), + KQU( 9869256223029203587), KQU(12227975697177267636), + KQU(12728115115844186033), KQU( 7752058479783205470), + KQU( 729733219713393087), KQU(12954017801239007622) +}; +static const uint64_t init_by_array_64_expected[] = { + KQU( 2100341266307895239), KQU( 8344256300489757943), + KQU(15687933285484243894), KQU( 8268620370277076319), + KQU(12371852309826545459), KQU( 8800491541730110238), + KQU(18113268950100835773), KQU( 2886823658884438119), + KQU( 3293667307248180724), KQU( 9307928143300172731), + KQU( 7688082017574293629), KQU( 900986224735166665), + KQU( 9977972710722265039), KQU( 6008205004994830552), + KQU( 546909104521689292), KQU( 7428471521869107594), + KQU(14777563419314721179), KQU(16116143076567350053), + KQU( 5322685342003142329), KQU( 4200427048445863473), + KQU( 4693092150132559146), KQU(13671425863759338582), + KQU( 6747117460737639916), KQU( 4732666080236551150), + KQU( 5912839950611941263), KQU( 3903717554504704909), + KQU( 2615667650256786818), KQU(10844129913887006352), + KQU(13786467861810997820), KQU(14267853002994021570), + KQU(13767807302847237439), KQU(16407963253707224617), + KQU( 4802498363698583497), KQU( 2523802839317209764), + KQU( 3822579397797475589), KQU( 8950320572212130610), + KQU( 3745623504978342534), KQU(16092609066068482806), + KQU( 9817016950274642398), KQU(10591660660323829098), + KQU(11751606650792815920), KQU( 5122873818577122211), + KQU(17209553764913936624), KQU( 6249057709284380343), + KQU(15088791264695071830), KQU(15344673071709851930), + KQU( 4345751415293646084), KQU( 2542865750703067928), + KQU(13520525127852368784), KQU(18294188662880997241), + KQU( 3871781938044881523), KQU( 2873487268122812184), + KQU(15099676759482679005), KQU(15442599127239350490), + KQU( 6311893274367710888), KQU( 3286118760484672933), + KQU( 4146067961333542189), KQU(13303942567897208770), + KQU( 8196013722255630418), KQU( 4437815439340979989), + KQU(15433791533450605135), KQU( 4254828956815687049), + KQU( 1310903207708286015), KQU(10529182764462398549), + KQU(14900231311660638810), KQU( 9727017277104609793), + KQU( 1821308310948199033), KQU(11628861435066772084), + KQU( 9469019138491546924), KQU( 3145812670532604988), + KQU( 9938468915045491919), KQU( 1562447430672662142), + KQU(13963995266697989134), KQU( 3356884357625028695), + KQU( 4499850304584309747), KQU( 8456825817023658122), + KQU(10859039922814285279), KQU( 8099512337972526555), + KQU( 348006375109672149), KQU(11919893998241688603), + KQU( 1104199577402948826), KQU(16689191854356060289), + KQU(10992552041730168078), KQU( 7243733172705465836), + KQU( 5668075606180319560), KQU(18182847037333286970), + KQU( 4290215357664631322), KQU( 4061414220791828613), + KQU(13006291061652989604), KQU( 7140491178917128798), + KQU(12703446217663283481), KQU( 5500220597564558267), + KQU(10330551509971296358), KQU(15958554768648714492), + KQU( 5174555954515360045), KQU( 1731318837687577735), + KQU( 3557700801048354857), KQU(13764012341928616198), + KQU(13115166194379119043), KQU( 7989321021560255519), + KQU( 2103584280905877040), KQU( 9230788662155228488), + KQU(16396629323325547654), KQU( 657926409811318051), + KQU(15046700264391400727), KQU( 5120132858771880830), + KQU( 7934160097989028561), KQU( 6963121488531976245), + KQU(17412329602621742089), KQU(15144843053931774092), + KQU(17204176651763054532), KQU(13166595387554065870), + KQU( 8590377810513960213), KQU( 5834365135373991938), + KQU( 7640913007182226243), KQU( 3479394703859418425), + KQU(16402784452644521040), KQU( 4993979809687083980), + KQU(13254522168097688865), KQU(15643659095244365219), + KQU( 5881437660538424982), KQU(11174892200618987379), + KQU( 254409966159711077), KQU(17158413043140549909), + KQU( 3638048789290376272), KQU( 1376816930299489190), + KQU( 4622462095217761923), KQU(15086407973010263515), + KQU(13253971772784692238), KQU( 5270549043541649236), + KQU(11182714186805411604), KQU(12283846437495577140), + KQU( 5297647149908953219), KQU(10047451738316836654), + KQU( 4938228100367874746), KQU(12328523025304077923), + KQU( 3601049438595312361), KQU( 9313624118352733770), + KQU(13322966086117661798), KQU(16660005705644029394), + KQU(11337677526988872373), KQU(13869299102574417795), + KQU(15642043183045645437), KQU( 3021755569085880019), + KQU( 4979741767761188161), KQU(13679979092079279587), + KQU( 3344685842861071743), KQU(13947960059899588104), + KQU( 305806934293368007), KQU( 5749173929201650029), + KQU(11123724852118844098), KQU(15128987688788879802), + KQU(15251651211024665009), KQU( 7689925933816577776), + KQU(16732804392695859449), KQU(17087345401014078468), + KQU(14315108589159048871), KQU( 4820700266619778917), + KQU(16709637539357958441), KQU( 4936227875177351374), + KQU( 2137907697912987247), KQU(11628565601408395420), + KQU( 2333250549241556786), KQU( 5711200379577778637), + KQU( 5170680131529031729), KQU(12620392043061335164), + KQU( 95363390101096078), KQU( 5487981914081709462), + KQU( 1763109823981838620), KQU( 3395861271473224396), + KQU( 1300496844282213595), KQU( 6894316212820232902), + KQU(10673859651135576674), KQU( 5911839658857903252), + KQU(17407110743387299102), KQU( 8257427154623140385), + KQU(11389003026741800267), KQU( 4070043211095013717), + KQU(11663806997145259025), KQU(15265598950648798210), + KQU( 630585789434030934), KQU( 3524446529213587334), + KQU( 7186424168495184211), KQU(10806585451386379021), + KQU(11120017753500499273), KQU( 1586837651387701301), + KQU(17530454400954415544), KQU( 9991670045077880430), + KQU( 7550997268990730180), KQU( 8640249196597379304), + KQU( 3522203892786893823), KQU(10401116549878854788), + KQU(13690285544733124852), KQU( 8295785675455774586), + KQU(15535716172155117603), KQU( 3112108583723722511), + KQU(17633179955339271113), KQU(18154208056063759375), + KQU( 1866409236285815666), KQU(13326075895396412882), + KQU( 8756261842948020025), KQU( 6281852999868439131), + KQU(15087653361275292858), KQU(10333923911152949397), + KQU( 5265567645757408500), KQU(12728041843210352184), + KQU( 6347959327507828759), KQU( 154112802625564758), + KQU(18235228308679780218), KQU( 3253805274673352418), + KQU( 4849171610689031197), KQU(17948529398340432518), + KQU(13803510475637409167), KQU(13506570190409883095), + KQU(15870801273282960805), KQU( 8451286481299170773), + KQU( 9562190620034457541), KQU( 8518905387449138364), + KQU(12681306401363385655), KQU( 3788073690559762558), + KQU( 5256820289573487769), KQU( 2752021372314875467), + KQU( 6354035166862520716), KQU( 4328956378309739069), + KQU( 449087441228269600), KQU( 5533508742653090868), + KQU( 1260389420404746988), KQU(18175394473289055097), + KQU( 1535467109660399420), KQU( 8818894282874061442), + KQU(12140873243824811213), KQU(15031386653823014946), + KQU( 1286028221456149232), KQU( 6329608889367858784), + KQU( 9419654354945132725), KQU( 6094576547061672379), + KQU(17706217251847450255), KQU( 1733495073065878126), + KQU(16918923754607552663), KQU( 8881949849954945044), + KQU(12938977706896313891), KQU(14043628638299793407), + KQU(18393874581723718233), KQU( 6886318534846892044), + KQU(14577870878038334081), KQU(13541558383439414119), + KQU(13570472158807588273), KQU(18300760537910283361), + KQU( 818368572800609205), KQU( 1417000585112573219), + KQU(12337533143867683655), KQU(12433180994702314480), + KQU( 778190005829189083), KQU(13667356216206524711), + KQU( 9866149895295225230), KQU(11043240490417111999), + KQU( 1123933826541378598), KQU( 6469631933605123610), + KQU(14508554074431980040), KQU(13918931242962026714), + KQU( 2870785929342348285), KQU(14786362626740736974), + KQU(13176680060902695786), KQU( 9591778613541679456), + KQU( 9097662885117436706), KQU( 749262234240924947), + KQU( 1944844067793307093), KQU( 4339214904577487742), + KQU( 8009584152961946551), KQU(16073159501225501777), + KQU( 3335870590499306217), KQU(17088312653151202847), + KQU( 3108893142681931848), KQU(16636841767202792021), + KQU(10423316431118400637), KQU( 8008357368674443506), + KQU(11340015231914677875), KQU(17687896501594936090), + KQU(15173627921763199958), KQU( 542569482243721959), + KQU(15071714982769812975), KQU( 4466624872151386956), + KQU( 1901780715602332461), KQU( 9822227742154351098), + KQU( 1479332892928648780), KQU( 6981611948382474400), + KQU( 7620824924456077376), KQU(14095973329429406782), + KQU( 7902744005696185404), KQU(15830577219375036920), + KQU(10287076667317764416), KQU(12334872764071724025), + KQU( 4419302088133544331), KQU(14455842851266090520), + KQU(12488077416504654222), KQU( 7953892017701886766), + KQU( 6331484925529519007), KQU( 4902145853785030022), + KQU(17010159216096443073), KQU(11945354668653886087), + KQU(15112022728645230829), KQU(17363484484522986742), + KQU( 4423497825896692887), KQU( 8155489510809067471), + KQU( 258966605622576285), KQU( 5462958075742020534), + KQU( 6763710214913276228), KQU( 2368935183451109054), + KQU(14209506165246453811), KQU( 2646257040978514881), + KQU( 3776001911922207672), KQU( 1419304601390147631), + KQU(14987366598022458284), KQU( 3977770701065815721), + KQU( 730820417451838898), KQU( 3982991703612885327), + KQU( 2803544519671388477), KQU(17067667221114424649), + KQU( 2922555119737867166), KQU( 1989477584121460932), + KQU(15020387605892337354), KQU( 9293277796427533547), + KQU(10722181424063557247), KQU(16704542332047511651), + KQU( 5008286236142089514), KQU(16174732308747382540), + KQU(17597019485798338402), KQU(13081745199110622093), + KQU( 8850305883842258115), KQU(12723629125624589005), + KQU( 8140566453402805978), KQU(15356684607680935061), + KQU(14222190387342648650), KQU(11134610460665975178), + KQU( 1259799058620984266), KQU(13281656268025610041), + KQU( 298262561068153992), KQU(12277871700239212922), + KQU(13911297774719779438), KQU(16556727962761474934), + KQU(17903010316654728010), KQU( 9682617699648434744), + KQU(14757681836838592850), KQU( 1327242446558524473), + KQU(11126645098780572792), KQU( 1883602329313221774), + KQU( 2543897783922776873), KQU(15029168513767772842), + KQU(12710270651039129878), KQU(16118202956069604504), + KQU(15010759372168680524), KQU( 2296827082251923948), + KQU(10793729742623518101), KQU(13829764151845413046), + KQU(17769301223184451213), KQU( 3118268169210783372), + KQU(17626204544105123127), KQU( 7416718488974352644), + KQU(10450751996212925994), KQU( 9352529519128770586), + KQU( 259347569641110140), KQU( 8048588892269692697), + KQU( 1774414152306494058), KQU(10669548347214355622), + KQU(13061992253816795081), KQU(18432677803063861659), + KQU( 8879191055593984333), KQU(12433753195199268041), + KQU(14919392415439730602), KQU( 6612848378595332963), + KQU( 6320986812036143628), KQU(10465592420226092859), + KQU( 4196009278962570808), KQU( 3747816564473572224), + KQU(17941203486133732898), KQU( 2350310037040505198), + KQU( 5811779859134370113), KQU(10492109599506195126), + KQU( 7699650690179541274), KQU( 1954338494306022961), + KQU(14095816969027231152), KQU( 5841346919964852061), + KQU(14945969510148214735), KQU( 3680200305887550992), + KQU( 6218047466131695792), KQU( 8242165745175775096), + KQU(11021371934053307357), KQU( 1265099502753169797), + KQU( 4644347436111321718), KQU( 3609296916782832859), + KQU( 8109807992218521571), KQU(18387884215648662020), + KQU(14656324896296392902), KQU(17386819091238216751), + KQU(17788300878582317152), KQU( 7919446259742399591), + KQU( 4466613134576358004), KQU(12928181023667938509), + KQU(13147446154454932030), KQU(16552129038252734620), + KQU( 8395299403738822450), KQU(11313817655275361164), + KQU( 434258809499511718), KQU( 2074882104954788676), + KQU( 7929892178759395518), KQU( 9006461629105745388), + KQU( 5176475650000323086), KQU(11128357033468341069), + KQU(12026158851559118955), KQU(14699716249471156500), + KQU( 448982497120206757), KQU( 4156475356685519900), + KQU( 6063816103417215727), KQU(10073289387954971479), + KQU( 8174466846138590962), KQU( 2675777452363449006), + KQU( 9090685420572474281), KQU( 6659652652765562060), + KQU(12923120304018106621), KQU(11117480560334526775), + KQU( 937910473424587511), KQU( 1838692113502346645), + KQU(11133914074648726180), KQU( 7922600945143884053), + KQU(13435287702700959550), KQU( 5287964921251123332), + KQU(11354875374575318947), KQU(17955724760748238133), + KQU(13728617396297106512), KQU( 4107449660118101255), + KQU( 1210269794886589623), KQU(11408687205733456282), + KQU( 4538354710392677887), KQU(13566803319341319267), + KQU(17870798107734050771), KQU( 3354318982568089135), + KQU( 9034450839405133651), KQU(13087431795753424314), + KQU( 950333102820688239), KQU( 1968360654535604116), + KQU(16840551645563314995), KQU( 8867501803892924995), + KQU(11395388644490626845), KQU( 1529815836300732204), + KQU(13330848522996608842), KQU( 1813432878817504265), + KQU( 2336867432693429560), KQU(15192805445973385902), + KQU( 2528593071076407877), KQU( 128459777936689248), + KQU( 9976345382867214866), KQU( 6208885766767996043), + KQU(14982349522273141706), KQU( 3099654362410737822), + KQU(13776700761947297661), KQU( 8806185470684925550), + KQU( 8151717890410585321), KQU( 640860591588072925), + KQU(14592096303937307465), KQU( 9056472419613564846), + KQU(14861544647742266352), KQU(12703771500398470216), + KQU( 3142372800384138465), KQU( 6201105606917248196), + KQU(18337516409359270184), KQU(15042268695665115339), + KQU(15188246541383283846), KQU(12800028693090114519), + KQU( 5992859621101493472), KQU(18278043971816803521), + KQU( 9002773075219424560), KQU( 7325707116943598353), + KQU( 7930571931248040822), KQU( 5645275869617023448), + KQU( 7266107455295958487), KQU( 4363664528273524411), + KQU(14313875763787479809), KQU(17059695613553486802), + KQU( 9247761425889940932), KQU(13704726459237593128), + KQU( 2701312427328909832), KQU(17235532008287243115), + KQU(14093147761491729538), KQU( 6247352273768386516), + KQU( 8268710048153268415), KQU( 7985295214477182083), + KQU(15624495190888896807), KQU( 3772753430045262788), + KQU( 9133991620474991698), KQU( 5665791943316256028), + KQU( 7551996832462193473), KQU(13163729206798953877), + KQU( 9263532074153846374), KQU( 1015460703698618353), + KQU(17929874696989519390), KQU(18257884721466153847), + KQU(16271867543011222991), KQU( 3905971519021791941), + KQU(16814488397137052085), KQU( 1321197685504621613), + KQU( 2870359191894002181), KQU(14317282970323395450), + KQU(13663920845511074366), KQU( 2052463995796539594), + KQU(14126345686431444337), KQU( 1727572121947022534), + KQU(17793552254485594241), KQU( 6738857418849205750), + KQU( 1282987123157442952), KQU(16655480021581159251), + KQU( 6784587032080183866), KQU(14726758805359965162), + KQU( 7577995933961987349), KQU(12539609320311114036), + KQU(10789773033385439494), KQU( 8517001497411158227), + KQU(10075543932136339710), KQU(14838152340938811081), + KQU( 9560840631794044194), KQU(17445736541454117475), + KQU(10633026464336393186), KQU(15705729708242246293), + KQU( 1117517596891411098), KQU( 4305657943415886942), + KQU( 4948856840533979263), KQU(16071681989041789593), + KQU(13723031429272486527), KQU( 7639567622306509462), + KQU(12670424537483090390), KQU( 9715223453097197134), + KQU( 5457173389992686394), KQU( 289857129276135145), + KQU(17048610270521972512), KQU( 692768013309835485), + KQU(14823232360546632057), KQU(18218002361317895936), + KQU( 3281724260212650204), KQU(16453957266549513795), + KQU( 8592711109774511881), KQU( 929825123473369579), + KQU(15966784769764367791), KQU( 9627344291450607588), + KQU(10849555504977813287), KQU( 9234566913936339275), + KQU( 6413807690366911210), KQU(10862389016184219267), + KQU(13842504799335374048), KQU( 1531994113376881174), + KQU( 2081314867544364459), KQU(16430628791616959932), + KQU( 8314714038654394368), KQU( 9155473892098431813), + KQU(12577843786670475704), KQU( 4399161106452401017), + KQU( 1668083091682623186), KQU( 1741383777203714216), + KQU( 2162597285417794374), KQU(15841980159165218736), + KQU( 1971354603551467079), KQU( 1206714764913205968), + KQU( 4790860439591272330), KQU(14699375615594055799), + KQU( 8374423871657449988), KQU(10950685736472937738), + KQU( 697344331343267176), KQU(10084998763118059810), + KQU(12897369539795983124), KQU(12351260292144383605), + KQU( 1268810970176811234), KQU( 7406287800414582768), + KQU( 516169557043807831), KQU( 5077568278710520380), + KQU( 3828791738309039304), KQU( 7721974069946943610), + KQU( 3534670260981096460), KQU( 4865792189600584891), + KQU(16892578493734337298), KQU( 9161499464278042590), + KQU(11976149624067055931), KQU(13219479887277343990), + KQU(14161556738111500680), KQU(14670715255011223056), + KQU( 4671205678403576558), KQU(12633022931454259781), + KQU(14821376219869187646), KQU( 751181776484317028), + KQU( 2192211308839047070), KQU(11787306362361245189), + KQU(10672375120744095707), KQU( 4601972328345244467), + KQU(15457217788831125879), KQU( 8464345256775460809), + KQU(10191938789487159478), KQU( 6184348739615197613), + KQU(11425436778806882100), KQU( 2739227089124319793), + KQU( 461464518456000551), KQU( 4689850170029177442), + KQU( 6120307814374078625), KQU(11153579230681708671), + KQU( 7891721473905347926), KQU(10281646937824872400), + KQU( 3026099648191332248), KQU( 8666750296953273818), + KQU(14978499698844363232), KQU(13303395102890132065), + KQU( 8182358205292864080), KQU(10560547713972971291), + KQU(11981635489418959093), KQU( 3134621354935288409), + KQU(11580681977404383968), KQU(14205530317404088650), + KQU( 5997789011854923157), KQU(13659151593432238041), + KQU(11664332114338865086), KQU( 7490351383220929386), + KQU( 7189290499881530378), KQU(15039262734271020220), + KQU( 2057217285976980055), KQU( 555570804905355739), + KQU(11235311968348555110), KQU(13824557146269603217), + KQU(16906788840653099693), KQU( 7222878245455661677), + KQU( 5245139444332423756), KQU( 4723748462805674292), + KQU(12216509815698568612), KQU(17402362976648951187), + KQU(17389614836810366768), KQU( 4880936484146667711), + KQU( 9085007839292639880), KQU(13837353458498535449), + KQU(11914419854360366677), KQU(16595890135313864103), + KQU( 6313969847197627222), KQU(18296909792163910431), + KQU(10041780113382084042), KQU( 2499478551172884794), + KQU(11057894246241189489), KQU( 9742243032389068555), + KQU(12838934582673196228), KQU(13437023235248490367), + KQU(13372420669446163240), KQU( 6752564244716909224), + KQU( 7157333073400313737), KQU(12230281516370654308), + KQU( 1182884552219419117), KQU( 2955125381312499218), + KQU(10308827097079443249), KQU( 1337648572986534958), + KQU(16378788590020343939), KQU( 108619126514420935), + KQU( 3990981009621629188), KQU( 5460953070230946410), + KQU( 9703328329366531883), KQU(13166631489188077236), + KQU( 1104768831213675170), KQU( 3447930458553877908), + KQU( 8067172487769945676), KQU( 5445802098190775347), + KQU( 3244840981648973873), KQU(17314668322981950060), + KQU( 5006812527827763807), KQU(18158695070225526260), + KQU( 2824536478852417853), KQU(13974775809127519886), + KQU( 9814362769074067392), KQU(17276205156374862128), + KQU(11361680725379306967), KQU( 3422581970382012542), + KQU(11003189603753241266), KQU(11194292945277862261), + KQU( 6839623313908521348), KQU(11935326462707324634), + KQU( 1611456788685878444), KQU(13112620989475558907), + KQU( 517659108904450427), KQU(13558114318574407624), + KQU(15699089742731633077), KQU( 4988979278862685458), + KQU( 8111373583056521297), KQU( 3891258746615399627), + KQU( 8137298251469718086), KQU(12748663295624701649), + KQU( 4389835683495292062), KQU( 5775217872128831729), + KQU( 9462091896405534927), KQU( 8498124108820263989), + KQU( 8059131278842839525), KQU(10503167994254090892), + KQU(11613153541070396656), KQU(18069248738504647790), + KQU( 570657419109768508), KQU( 3950574167771159665), + KQU( 5514655599604313077), KQU( 2908460854428484165), + KQU(10777722615935663114), KQU(12007363304839279486), + KQU( 9800646187569484767), KQU( 8795423564889864287), + KQU(14257396680131028419), KQU( 6405465117315096498), + KQU( 7939411072208774878), KQU(17577572378528990006), + KQU(14785873806715994850), KQU(16770572680854747390), + KQU(18127549474419396481), KQU(11637013449455757750), + KQU(14371851933996761086), KQU( 3601181063650110280), + KQU( 4126442845019316144), KQU(10198287239244320669), + KQU(18000169628555379659), KQU(18392482400739978269), + KQU( 6219919037686919957), KQU( 3610085377719446052), + KQU( 2513925039981776336), KQU(16679413537926716955), + KQU(12903302131714909434), KQU( 5581145789762985009), + KQU(12325955044293303233), KQU(17216111180742141204), + KQU( 6321919595276545740), KQU( 3507521147216174501), + KQU( 9659194593319481840), KQU(11473976005975358326), + KQU(14742730101435987026), KQU( 492845897709954780), + KQU(16976371186162599676), KQU(17712703422837648655), + KQU( 9881254778587061697), KQU( 8413223156302299551), + KQU( 1563841828254089168), KQU( 9996032758786671975), + KQU( 138877700583772667), KQU(13003043368574995989), + KQU( 4390573668650456587), KQU( 8610287390568126755), + KQU(15126904974266642199), KQU( 6703637238986057662), + KQU( 2873075592956810157), KQU( 6035080933946049418), + KQU(13382846581202353014), KQU( 7303971031814642463), + KQU(18418024405307444267), KQU( 5847096731675404647), + KQU( 4035880699639842500), KQU(11525348625112218478), + KQU( 3041162365459574102), KQU( 2604734487727986558), + KQU(15526341771636983145), KQU(14556052310697370254), + KQU(12997787077930808155), KQU( 9601806501755554499), + KQU(11349677952521423389), KQU(14956777807644899350), + KQU(16559736957742852721), KQU(12360828274778140726), + KQU( 6685373272009662513), KQU(16932258748055324130), + KQU(15918051131954158508), KQU( 1692312913140790144), + KQU( 546653826801637367), KQU( 5341587076045986652), + KQU(14975057236342585662), KQU(12374976357340622412), + KQU(10328833995181940552), KQU(12831807101710443149), + KQU(10548514914382545716), KQU( 2217806727199715993), + KQU(12627067369242845138), KQU( 4598965364035438158), + KQU( 150923352751318171), KQU(14274109544442257283), + KQU( 4696661475093863031), KQU( 1505764114384654516), + KQU(10699185831891495147), KQU( 2392353847713620519), + KQU( 3652870166711788383), KQU( 8640653276221911108), + KQU( 3894077592275889704), KQU( 4918592872135964845), + KQU(16379121273281400789), KQU(12058465483591683656), + KQU(11250106829302924945), KQU( 1147537556296983005), + KQU( 6376342756004613268), KQU(14967128191709280506), + KQU(18007449949790627628), KQU( 9497178279316537841), + KQU( 7920174844809394893), KQU(10037752595255719907), + KQU(15875342784985217697), KQU(15311615921712850696), + KQU( 9552902652110992950), KQU(14054979450099721140), + KQU( 5998709773566417349), KQU(18027910339276320187), + KQU( 8223099053868585554), KQU( 7842270354824999767), + KQU( 4896315688770080292), KQU(12969320296569787895), + KQU( 2674321489185759961), KQU( 4053615936864718439), + KQU(11349775270588617578), KQU( 4743019256284553975), + KQU( 5602100217469723769), KQU(14398995691411527813), + KQU( 7412170493796825470), KQU( 836262406131744846), + KQU( 8231086633845153022), KQU( 5161377920438552287), + KQU( 8828731196169924949), KQU(16211142246465502680), + KQU( 3307990879253687818), KQU( 5193405406899782022), + KQU( 8510842117467566693), KQU( 6070955181022405365), + KQU(14482950231361409799), KQU(12585159371331138077), + KQU( 3511537678933588148), KQU( 2041849474531116417), + KQU(10944936685095345792), KQU(18303116923079107729), + KQU( 2720566371239725320), KQU( 4958672473562397622), + KQU( 3032326668253243412), KQU(13689418691726908338), + KQU( 1895205511728843996), KQU( 8146303515271990527), + KQU(16507343500056113480), KQU( 473996939105902919), + KQU( 9897686885246881481), KQU(14606433762712790575), + KQU( 6732796251605566368), KQU( 1399778120855368916), + KQU( 935023885182833777), KQU(16066282816186753477), + KQU( 7291270991820612055), KQU(17530230393129853844), + KQU(10223493623477451366), KQU(15841725630495676683), + KQU(17379567246435515824), KQU( 8588251429375561971), + KQU(18339511210887206423), KQU(17349587430725976100), + KQU(12244876521394838088), KQU( 6382187714147161259), + KQU(12335807181848950831), KQU(16948885622305460665), + KQU(13755097796371520506), KQU(14806740373324947801), + KQU( 4828699633859287703), KQU( 8209879281452301604), + KQU(12435716669553736437), KQU(13970976859588452131), + KQU( 6233960842566773148), KQU(12507096267900505759), + KQU( 1198713114381279421), KQU(14989862731124149015), + KQU(15932189508707978949), KQU( 2526406641432708722), + KQU( 29187427817271982), KQU( 1499802773054556353), + KQU(10816638187021897173), KQU( 5436139270839738132), + KQU( 6659882287036010082), KQU( 2154048955317173697), + KQU(10887317019333757642), KQU(16281091802634424955), + KQU(10754549879915384901), KQU(10760611745769249815), + KQU( 2161505946972504002), KQU( 5243132808986265107), + KQU(10129852179873415416), KQU( 710339480008649081), + KQU( 7802129453068808528), KQU(17967213567178907213), + KQU(15730859124668605599), KQU(13058356168962376502), + KQU( 3701224985413645909), KQU(14464065869149109264), + KQU( 9959272418844311646), KQU(10157426099515958752), + KQU(14013736814538268528), KQU(17797456992065653951), + KQU(17418878140257344806), KQU(15457429073540561521), + KQU( 2184426881360949378), KQU( 2062193041154712416), + KQU( 8553463347406931661), KQU( 4913057625202871854), + KQU( 2668943682126618425), KQU(17064444737891172288), + KQU( 4997115903913298637), KQU(12019402608892327416), + KQU(17603584559765897352), KQU(11367529582073647975), + KQU( 8211476043518436050), KQU( 8676849804070323674), + KQU(18431829230394475730), KQU(10490177861361247904), + KQU( 9508720602025651349), KQU( 7409627448555722700), + KQU( 5804047018862729008), KQU(11943858176893142594), + KQU(11908095418933847092), KQU( 5415449345715887652), + KQU( 1554022699166156407), KQU( 9073322106406017161), + KQU( 7080630967969047082), KQU(18049736940860732943), + KQU(12748714242594196794), KQU( 1226992415735156741), + KQU(17900981019609531193), KQU(11720739744008710999), + KQU( 3006400683394775434), KQU(11347974011751996028), + KQU( 3316999628257954608), KQU( 8384484563557639101), + KQU(18117794685961729767), KQU( 1900145025596618194), + KQU(17459527840632892676), KQU( 5634784101865710994), + KQU( 7918619300292897158), KQU( 3146577625026301350), + KQU( 9955212856499068767), KQU( 1873995843681746975), + KQU( 1561487759967972194), KQU( 8322718804375878474), + KQU(11300284215327028366), KQU( 4667391032508998982), + KQU( 9820104494306625580), KQU(17922397968599970610), + KQU( 1784690461886786712), KQU(14940365084341346821), + KQU( 5348719575594186181), KQU(10720419084507855261), + KQU(14210394354145143274), KQU( 2426468692164000131), + KQU(16271062114607059202), KQU(14851904092357070247), + KQU( 6524493015693121897), KQU( 9825473835127138531), + KQU(14222500616268569578), KQU(15521484052007487468), + KQU(14462579404124614699), KQU(11012375590820665520), + KQU(11625327350536084927), KQU(14452017765243785417), + KQU( 9989342263518766305), KQU( 3640105471101803790), + KQU( 4749866455897513242), KQU(13963064946736312044), + KQU(10007416591973223791), KQU(18314132234717431115), + KQU( 3286596588617483450), KQU( 7726163455370818765), + KQU( 7575454721115379328), KQU( 5308331576437663422), + KQU(18288821894903530934), KQU( 8028405805410554106), + KQU(15744019832103296628), KQU( 149765559630932100), + KQU( 6137705557200071977), KQU(14513416315434803615), + KQU(11665702820128984473), KQU( 218926670505601386), + KQU( 6868675028717769519), KQU(15282016569441512302), + KQU( 5707000497782960236), KQU( 6671120586555079567), + KQU( 2194098052618985448), KQU(16849577895477330978), + KQU(12957148471017466283), KQU( 1997805535404859393), + KQU( 1180721060263860490), KQU(13206391310193756958), + KQU(12980208674461861797), KQU( 3825967775058875366), + KQU(17543433670782042631), KQU( 1518339070120322730), + KQU(16344584340890991669), KQU( 2611327165318529819), + KQU(11265022723283422529), KQU( 4001552800373196817), + KQU(14509595890079346161), KQU( 3528717165416234562), + KQU(18153222571501914072), KQU( 9387182977209744425), + KQU(10064342315985580021), KQU(11373678413215253977), + KQU( 2308457853228798099), KQU( 9729042942839545302), + KQU( 7833785471140127746), KQU( 6351049900319844436), + KQU(14454610627133496067), KQU(12533175683634819111), + KQU(15570163926716513029), KQU(13356980519185762498) +}; + +TEST_BEGIN(test_gen_rand_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_gen_rand(1234); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_gen_rand(1234); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_gen_rand_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_by_array(ini, 4); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 4); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_by_array_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_gen_rand_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_gen_rand(4321); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_gen_rand(4321); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_gen_rand_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + uint32_t ini[] = {5, 4, 3, 2, 1}; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_by_array(ini, 5); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 5); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_by_array_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gen_rand_32, + test_by_array_32, + test_gen_rand_64, + test_by_array_64)); +} diff --git a/memory/jemalloc/src/test/unit/a0.c b/memory/jemalloc/src/test/unit/a0.c new file mode 100644 index 000000000..b9ba45a3d --- /dev/null +++ b/memory/jemalloc/src/test/unit/a0.c @@ -0,0 +1,19 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_a0) +{ + void *p; + + p = a0malloc(1); + assert_ptr_not_null(p, "Unexpected a0malloc() error"); + a0dalloc(p); +} +TEST_END + +int +main(void) +{ + + return (test_no_malloc_init( + test_a0)); +} diff --git a/memory/jemalloc/src/test/unit/arena_reset.c b/memory/jemalloc/src/test/unit/arena_reset.c new file mode 100644 index 000000000..8ba36c21f --- /dev/null +++ b/memory/jemalloc/src/test/unit/arena_reset.c @@ -0,0 +1,159 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,lg_prof_sample:0"; +#endif + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nsmall(void) +{ + + return (get_nsizes_impl("arenas.nbins")); +} + +static unsigned +get_nlarge(void) +{ + + return (get_nsizes_impl("arenas.nlruns")); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_small_size(size_t ind) +{ + + return (get_size_impl("arenas.bin.0.size", ind)); +} + +static size_t +get_large_size(size_t ind) +{ + + return (get_size_impl("arenas.lrun.0.size", ind)); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_arena_reset) +{ +#define NHUGE 4 + unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i; + size_t sz, miblen; + void **ptrs; + int flags; + size_t mib[3]; + tsdn_t *tsdn; + + test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill + && unlikely(opt_quarantine))); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + nsmall = get_nsmall(); + nlarge = get_nlarge(); + nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge(); + nptrs = nsmall + nlarge + nhuge; + ptrs = (void **)malloc(nptrs * sizeof(void *)); + assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); + + /* Allocate objects with a wide range of sizes. */ + for (i = 0; i < nsmall; i++) { + sz = get_small_size(i); + ptrs[i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nlarge; i++) { + sz = get_large_size(i); + ptrs[nsmall + i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nhuge; i++) { + sz = get_huge_size(i); + ptrs[nsmall + nlarge + i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + + tsdn = tsdn_fetch(); + + /* Verify allocations. */ + for (i = 0; i < nptrs; i++) { + assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, + "Allocation should have queryable size"); + } + + /* Reset. */ + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + /* Verify allocations no longer exist. */ + for (i = 0; i < nptrs; i++) { + assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0, + "Allocation should no longer exist"); + } + + free(ptrs); +} +TEST_END + +int +main(void) +{ + + return (test( + test_arena_reset)); +} diff --git a/memory/jemalloc/src/test/unit/atomic.c b/memory/jemalloc/src/test/unit/atomic.c new file mode 100644 index 000000000..bdd74f659 --- /dev/null +++ b/memory/jemalloc/src/test/unit/atomic.c @@ -0,0 +1,122 @@ +#include "test/jemalloc_test.h" + +#define TEST_STRUCT(p, t) \ +struct p##_test_s { \ + t accum0; \ + t x; \ + t s; \ +}; \ +typedef struct p##_test_s p##_test_t; + +#define TEST_BODY(p, t, tc, ta, FMT) do { \ + const p##_test_t tests[] = { \ + {(t)-1, (t)-1, (t)-2}, \ + {(t)-1, (t) 0, (t)-2}, \ + {(t)-1, (t) 1, (t)-2}, \ + \ + {(t) 0, (t)-1, (t)-2}, \ + {(t) 0, (t) 0, (t)-2}, \ + {(t) 0, (t) 1, (t)-2}, \ + \ + {(t) 1, (t)-1, (t)-2}, \ + {(t) 1, (t) 0, (t)-2}, \ + {(t) 1, (t) 1, (t)-2}, \ + \ + {(t)0, (t)-(1 << 22), (t)-2}, \ + {(t)0, (t)(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)(1 << 22), (t)-2} \ + }; \ + unsigned i; \ + \ + for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ + bool err; \ + t accum = tests[i].accum0; \ + assert_##ta##_eq(atomic_read_##p(&accum), \ + tests[i].accum0, \ + "Erroneous read, i=%u", i); \ + \ + assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ + (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ + "i=%u, accum=%"FMT", x=%"FMT, \ + i, tests[i].accum0, tests[i].x); \ + assert_##ta##_eq(atomic_read_##p(&accum), accum, \ + "Erroneous add, i=%u", i); \ + \ + accum = tests[i].accum0; \ + assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ + (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ + "i=%u, accum=%"FMT", x=%"FMT, \ + i, tests[i].accum0, tests[i].x); \ + assert_##ta##_eq(atomic_read_##p(&accum), accum, \ + "Erroneous sub, i=%u", i); \ + \ + accum = tests[i].accum0; \ + err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ + assert_b_eq(err, tests[i].accum0 != tests[i].x, \ + "Erroneous cas success/failure result"); \ + assert_##ta##_eq(accum, err ? tests[i].accum0 : \ + tests[i].s, "Erroneous cas effect, i=%u", i); \ + \ + accum = tests[i].accum0; \ + atomic_write_##p(&accum, tests[i].s); \ + assert_##ta##_eq(accum, tests[i].s, \ + "Erroneous write, i=%u", i); \ + } \ +} while (0) + +TEST_STRUCT(uint64, uint64_t) +TEST_BEGIN(test_atomic_uint64) +{ + +#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) + test_skip("64-bit atomic operations not supported"); +#else + TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); +#endif +} +TEST_END + +TEST_STRUCT(uint32, uint32_t) +TEST_BEGIN(test_atomic_uint32) +{ + + TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); +} +TEST_END + +TEST_STRUCT(p, void *) +TEST_BEGIN(test_atomic_p) +{ + + TEST_BODY(p, void *, uintptr_t, ptr, "p"); +} +TEST_END + +TEST_STRUCT(z, size_t) +TEST_BEGIN(test_atomic_z) +{ + + TEST_BODY(z, size_t, size_t, zu, "#zx"); +} +TEST_END + +TEST_STRUCT(u, unsigned) +TEST_BEGIN(test_atomic_u) +{ + + TEST_BODY(u, unsigned, unsigned, u, "#x"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_atomic_uint64, + test_atomic_uint32, + test_atomic_p, + test_atomic_z, + test_atomic_u)); +} diff --git a/memory/jemalloc/src/test/unit/bitmap.c b/memory/jemalloc/src/test/unit/bitmap.c new file mode 100644 index 000000000..a2dd54630 --- /dev/null +++ b/memory/jemalloc/src/test/unit/bitmap.c @@ -0,0 +1,163 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_bitmap_size) +{ + size_t i, prev_size; + + prev_size = 0; + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + size_t size; + + bitmap_info_init(&binfo, i); + size = bitmap_size(&binfo); + assert_true(size >= prev_size, + "Bitmap size is smaller than expected"); + prev_size = size; + } +} +TEST_END + +TEST_BEGIN(test_bitmap_init) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc( + bitmap_size(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) { + assert_false(bitmap_get(bitmap, &binfo, j), + "Bit should be unset"); + } + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_set) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc( + bitmap_size(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_unset) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc( + bitmap_size(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + for (j = 0; j < i; j++) + bitmap_unset(bitmap, &binfo, j); + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_sfu) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc( + bitmap_size(&binfo)); + bitmap_init(bitmap, &binfo); + + /* Iteratively set bits starting at the beginning. */ + for (j = 0; j < i; j++) { + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after " + "previous first unset bit"); + } + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + + /* + * Iteratively unset bits starting at the end, and + * verify that bitmap_sfu() reaches the unset bits. + */ + for (j = i - 1; j < i; j--) { /* (i..0] */ + bitmap_unset(bitmap, &binfo, j); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should the bit previously " + "unset"); + bitmap_unset(bitmap, &binfo, j); + } + assert_false(bitmap_get(bitmap, &binfo, 0), + "Bit should be unset"); + + /* + * Iteratively set bits starting at the beginning, and + * verify that bitmap_sfu() looks past them. + */ + for (j = 1; j < i; j++) { + bitmap_set(bitmap, &binfo, j - 1); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after the " + "bit previously set"); + bitmap_unset(bitmap, &binfo, j); + } + assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_bitmap_size, + test_bitmap_init, + test_bitmap_set, + test_bitmap_unset, + test_bitmap_sfu)); +} diff --git a/memory/jemalloc/src/test/unit/ckh.c b/memory/jemalloc/src/test/unit/ckh.c new file mode 100644 index 000000000..2cbc22688 --- /dev/null +++ b/memory/jemalloc/src/test/unit/ckh.c @@ -0,0 +1,214 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_new_delete) +{ + tsd_t *tsd; + ckh_t ckh; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); + + assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_count_insert_search_remove) +{ + tsd_t *tsd; + ckh_t ckh; + const char *strs[] = { + "a string", + "A string", + "a string.", + "A string." + }; + const char *missing = "A string not in the hash table."; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", ZU(0), + ckh_count(&ckh)); + + /* Insert. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + ckh_insert(tsd, &ckh, strs[i], strs[i]); + assert_zu_eq(ckh_count(&ckh), i+1, + "ckh_count() should return %zu, but it returned %zu", i+1, + ckh_count(&ckh)); + } + + /* Search. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_search(&ckh, strs[i], kp, vp), + "Unexpected ckh_search() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + } + assert_true(ckh_search(&ckh, missing, NULL, NULL), + "Unexpected ckh_search() success"); + + /* Remove. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), + "Unexpected ckh_remove() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + assert_zu_eq(ckh_count(&ckh), + sizeof(strs)/sizeof(const char *) - i - 1, + "ckh_count() should return %zu, but it returned %zu", + sizeof(strs)/sizeof(const char *) - i - 1, + ckh_count(&ckh)); + } + + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_insert_iter_remove) +{ +#define NITEMS ZU(1000) + tsd_t *tsd; + ckh_t ckh; + void **p[NITEMS]; + void *q, *r; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + + for (i = 0; i < NITEMS; i++) { + p[i] = mallocx(i+1, 0); + assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NITEMS; i++) { + size_t j; + + for (j = i; j < NITEMS; j++) { + assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), + "Unexpected ckh_insert() failure"); + assert_false(ckh_search(&ckh, p[j], &q, &r), + "Unexpected ckh_search() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + } + + assert_zu_eq(ckh_count(&ckh), NITEMS, + "ckh_count() should return %zu, but it returned %zu", + NITEMS, ckh_count(&ckh)); + + for (j = i + 1; j < NITEMS; j++) { + assert_false(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() success"); + } + + { + bool seen[NITEMS]; + size_t tabind; + + memset(seen, 0, sizeof(seen)); + + for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { + size_t k; + + assert_ptr_eq(q, r, "Key and val not equal"); + + for (k = 0; k < NITEMS; k++) { + if (p[k] == q) { + assert_false(seen[k], + "Item %zu already seen", k); + seen[k] = true; + break; + } + } + } + + for (j = 0; j < i + 1; j++) + assert_true(seen[j], "Item %zu not seen", j); + for (; j < NITEMS; j++) + assert_false(seen[j], "Item %zu seen", j); + } + } + + for (i = 0; i < NITEMS; i++) { + assert_false(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[i], q, "Key pointer mismatch"); + assert_ptr_eq(p[i], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() success"); + dallocx(p[i], 0); + } + + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", + ZU(0), ckh_count(&ckh)); + ckh_delete(tsd, &ckh); +#undef NITEMS +} +TEST_END + +int +main(void) +{ + + return (test( + test_new_delete, + test_count_insert_search_remove, + test_insert_iter_remove)); +} diff --git a/memory/jemalloc/src/test/unit/decay.c b/memory/jemalloc/src/test/unit/decay.c new file mode 100644 index 000000000..e169ae24e --- /dev/null +++ b/memory/jemalloc/src/test/unit/decay.c @@ -0,0 +1,374 @@ +#include "test/jemalloc_test.h" + +const char *malloc_conf = "purge:decay,decay_time:1"; + +static nstime_monotonic_t *nstime_monotonic_orig; +static nstime_update_t *nstime_update_orig; + +static unsigned nupdates_mock; +static nstime_t time_mock; +static bool monotonic_mock; + +static bool +nstime_monotonic_mock(void) +{ + + return (monotonic_mock); +} + +static bool +nstime_update_mock(nstime_t *time) +{ + + nupdates_mock++; + if (monotonic_mock) + nstime_copy(time, &time_mock); + return (!monotonic_mock); +} + +TEST_BEGIN(test_decay_ticks) +{ + ticker_t *decay_ticker; + unsigned tick0, tick1; + size_t sz, huge0, large0; + void *p; + + test_skip_if(opt_purge != purge_mode_decay); + + decay_ticker = decay_ticker_get(tsd_fetch(), 0); + assert_ptr_not_null(decay_ticker, + "Unexpected failure getting decay ticker"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + /* + * Test the standard APIs using a huge size class, since we can't + * control tcache interactions (except by completely disabling tcache + * for the entire test program). + */ + + /* malloc(). */ + tick0 = ticker_read(decay_ticker); + p = malloc(huge0); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); + /* free(). */ + tick0 = ticker_read(decay_ticker); + free(p); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); + + /* calloc(). */ + tick0 = ticker_read(decay_ticker); + p = calloc(1, huge0); + assert_ptr_not_null(p, "Unexpected calloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); + free(p); + + /* posix_memalign(). */ + tick0 = ticker_read(decay_ticker); + assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0, + "Unexpected posix_memalign() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during posix_memalign()"); + free(p); + + /* aligned_alloc(). */ + tick0 = ticker_read(decay_ticker); + p = aligned_alloc(sizeof(size_t), huge0); + assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during aligned_alloc()"); + free(p); + + /* realloc(). */ + /* Allocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(NULL, huge0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Reallocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(p, huge0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Deallocate. */ + tick0 = ticker_read(decay_ticker); + realloc(p, 0); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + + /* + * Test the *allocx() APIs using huge, large, and small size classes, + * with tcache explicitly disabled. + */ + { + unsigned i; + size_t allocx_sizes[3]; + allocx_sizes[0] = huge0; + allocx_sizes[1] = large0; + allocx_sizes[2] = 1; + + for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { + sz = allocx_sizes[i]; + + /* mallocx(). */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during mallocx() (sz=%zu)", + sz); + /* rallocx(). */ + tick0 = ticker_read(decay_ticker); + p = rallocx(p, sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during rallocx() (sz=%zu)", + sz); + /* xallocx(). */ + tick0 = ticker_read(decay_ticker); + xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during xallocx() (sz=%zu)", + sz); + /* dallocx(). */ + tick0 = ticker_read(decay_ticker); + dallocx(p, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during dallocx() (sz=%zu)", + sz); + /* sdallocx(). */ + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick0 = ticker_read(decay_ticker); + sdallocx(p, sz, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during sdallocx() " + "(sz=%zu)", sz); + } + } + + /* + * Test tcache fill/flush interactions for large and small size classes, + * using an explicit tcache. + */ + if (config_tcache) { + unsigned tcache_ind, i; + size_t tcache_sizes[2]; + tcache_sizes[0] = large0; + tcache_sizes[1] = 1; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); + + for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { + sz = tcache_sizes[i]; + + /* tcache fill. */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache fill " + "(sz=%zu)", sz); + /* tcache flush. */ + dallocx(p, MALLOCX_TCACHE(tcache_ind)); + tick0 = ticker_read(decay_ticker); + assert_d_eq(mallctl("tcache.flush", NULL, NULL, + &tcache_ind, sizeof(unsigned)), 0, + "Unexpected mallctl failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache flush " + "(sz=%zu)", sz); + } + } +} +TEST_END + +TEST_BEGIN(test_decay_ticker) +{ +#define NPS 1024 + int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + uint64_t epoch; + uint64_t npurge0 = 0; + uint64_t npurge1 = 0; + size_t sz, large; + unsigned i, nupdates0; + nstime_t time, decay_time, deadline; + + test_skip_if(opt_purge != purge_mode_decay); + + /* + * Allocate a bunch of large objects, pause the clock, deallocate the + * objects, restore the clock, then [md]allocx() in a tight loop to + * verify the ticker triggers purging. + */ + + if (config_tcache) { + size_t tcache_max; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + large = nallocx(tcache_max + 1, flags); + } else { + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); + } + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, + "Unexpected mallctl failure"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + + for (i = 0; i < NPS; i++) { + ps[i] = mallocx(large, flags); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); + } + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = true; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (i = 0; i < NPS; i++) { + dallocx(ps[i], flags); + nupdates0 = nupdates_mock; + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.decay failure"); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; + + nstime_init(&time, 0); + nstime_update(&time); + nstime_init2(&decay_time, opt_decay_time, 0); + nstime_copy(&deadline, &time); + nstime_add(&deadline, &decay_time); + do { + for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { + void *p = mallocx(1, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, flags); + } + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, + sizeof(uint64_t)), 0, "Unexpected mallctl failure"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, + NULL, 0), config_stats ? 0 : ENOENT, + "Unexpected mallctl result"); + + nstime_update(&time); + } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0); + + if (config_stats) + assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); +#undef NPS +} +TEST_END + +TEST_BEGIN(test_decay_nonmonotonic) +{ +#define NPS (SMOOTHSTEP_NSTEPS + 1) + int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + uint64_t epoch; + uint64_t npurge0 = 0; + uint64_t npurge1 = 0; + size_t sz, large0; + unsigned i, nupdates0; + + test_skip_if(opt_purge != purge_mode_decay); + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, + "Unexpected mallctl failure"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = false; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (i = 0; i < NPS; i++) { + ps[i] = mallocx(large0, flags); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NPS; i++) { + dallocx(ps[i], flags); + nupdates0 = nupdates_mock; + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.decay failure"); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, + "Unexpected mallctl failure"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + + if (config_stats) + assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; +#undef NPS +} +TEST_END + +int +main(void) +{ + + return (test( + test_decay_ticks, + test_decay_ticker, + test_decay_nonmonotonic)); +} diff --git a/memory/jemalloc/src/test/unit/fork.c b/memory/jemalloc/src/test/unit/fork.c new file mode 100644 index 000000000..c530797c4 --- /dev/null +++ b/memory/jemalloc/src/test/unit/fork.c @@ -0,0 +1,64 @@ +#include "test/jemalloc_test.h" + +#ifndef _WIN32 +#include <sys/wait.h> +#endif + +TEST_BEGIN(test_fork) +{ +#ifndef _WIN32 + void *p; + pid_t pid; + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + pid = fork(); + + free(p); + + p = malloc(64); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + free(p); + + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure"); + } else if (pid == 0) { + /* Child. */ + _exit(0); + } else { + int status; + + /* Parent. */ + while (true) { + if (waitpid(pid, &status, 0) == -1) + test_fail("Unexpected waitpid() failure"); + if (WIFSIGNALED(status)) { + test_fail("Unexpected child termination due to " + "signal %d", WTERMSIG(status)); + break; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) != 0) { + test_fail( + "Unexpected child exit value %d", + WEXITSTATUS(status)); + } + break; + } + } + } +#else + test_skip("fork(2) is irrelevant to Windows"); +#endif +} +TEST_END + +int +main(void) +{ + + return (test( + test_fork)); +} diff --git a/memory/jemalloc/src/test/unit/hash.c b/memory/jemalloc/src/test/unit/hash.c new file mode 100644 index 000000000..010c9d76f --- /dev/null +++ b/memory/jemalloc/src/test/unit/hash.c @@ -0,0 +1,185 @@ +/* + * This file is based on code that is part of SMHasher + * (https://code.google.com/p/smhasher/), and is subject to the MIT license + * (http://www.opensource.org/licenses/mit-license.php). Both email addresses + * associated with the source code's revision history belong to Austin Appleby, + * and the revision history ranges from 2010 to 2012. Therefore the copyright + * and license are here taken to be: + * + * Copyright (c) 2010-2012 Austin Appleby + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "test/jemalloc_test.h" + +typedef enum { + hash_variant_x86_32, + hash_variant_x86_128, + hash_variant_x64_128 +} hash_variant_t; + +static int +hash_variant_bits(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return (32); + case hash_variant_x86_128: return (128); + case hash_variant_x64_128: return (128); + default: not_reached(); + } +} + +static const char * +hash_variant_string(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return ("hash_x86_32"); + case hash_variant_x86_128: return ("hash_x86_128"); + case hash_variant_x64_128: return ("hash_x64_128"); + default: not_reached(); + } +} + +#define KEY_SIZE 256 +static void +hash_variant_verify_key(hash_variant_t variant, uint8_t *key) +{ + const int hashbytes = hash_variant_bits(variant) / 8; + const int hashes_size = hashbytes * 256; + VARIABLE_ARRAY(uint8_t, hashes, hashes_size); + VARIABLE_ARRAY(uint8_t, final, hashbytes); + unsigned i; + uint32_t computed, expected; + + memset(key, 0, KEY_SIZE); + memset(hashes, 0, hashes_size); + memset(final, 0, hashbytes); + + /* + * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the + * seed. + */ + for (i = 0; i < 256; i++) { + key[i] = (uint8_t)i; + switch (variant) { + case hash_variant_x86_32: { + uint32_t out; + out = hash_x86_32(key, i, 256-i); + memcpy(&hashes[i*hashbytes], &out, hashbytes); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } default: not_reached(); + } + } + + /* Hash the result array. */ + switch (variant) { + case hash_variant_x86_32: { + uint32_t out = hash_x86_32(hashes, hashes_size, 0); + memcpy(final, &out, sizeof(out)); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(hashes, hashes_size, 0, out); + memcpy(final, out, sizeof(out)); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(hashes, hashes_size, 0, out); + memcpy(final, out, sizeof(out)); + break; + } default: not_reached(); + } + + computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | + (final[3] << 24); + + switch (variant) { +#ifdef JEMALLOC_BIG_ENDIAN + case hash_variant_x86_32: expected = 0x6213303eU; break; + case hash_variant_x86_128: expected = 0x266820caU; break; + case hash_variant_x64_128: expected = 0xcc622b6fU; break; +#else + case hash_variant_x86_32: expected = 0xb0f57ee3U; break; + case hash_variant_x86_128: expected = 0xb3ece62aU; break; + case hash_variant_x64_128: expected = 0x6384ba69U; break; +#endif + default: not_reached(); + } + + assert_u32_eq(computed, expected, + "Hash mismatch for %s(): expected %#x but got %#x", + hash_variant_string(variant), expected, computed); +} + +static void +hash_variant_verify(hash_variant_t variant) +{ +#define MAX_ALIGN 16 + uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; + unsigned i; + + for (i = 0; i < MAX_ALIGN; i++) + hash_variant_verify_key(variant, &key[i]); +#undef MAX_ALIGN +} +#undef KEY_SIZE + +TEST_BEGIN(test_hash_x86_32) +{ + + hash_variant_verify(hash_variant_x86_32); +} +TEST_END + +TEST_BEGIN(test_hash_x86_128) +{ + + hash_variant_verify(hash_variant_x86_128); +} +TEST_END + +TEST_BEGIN(test_hash_x64_128) +{ + + hash_variant_verify(hash_variant_x64_128); +} +TEST_END + +int +main(void) +{ + + return (test( + test_hash_x86_32, + test_hash_x86_128, + test_hash_x64_128)); +} diff --git a/memory/jemalloc/src/test/unit/junk.c b/memory/jemalloc/src/test/unit/junk.c new file mode 100644 index 000000000..460bd524d --- /dev/null +++ b/memory/jemalloc/src/test/unit/junk.c @@ -0,0 +1,253 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +# ifndef JEMALLOC_TEST_JUNK_OPT +# define JEMALLOC_TEST_JUNK_OPT "junk:true" +# endif +const char *malloc_conf = + "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT; +#endif + +static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; +static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; +static huge_dalloc_junk_t *huge_dalloc_junk_orig; +static void *watch_for_junking; +static bool saw_junking; + +static void +watch_junking(void *p) +{ + + watch_for_junking = p; + saw_junking = false; +} + +static void +arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) +{ + size_t i; + + arena_dalloc_junk_small_orig(ptr, bin_info); + for (i = 0; i < bin_info->reg_size; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, bin_info->reg_size); + } + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +arena_dalloc_junk_large_intercept(void *ptr, size_t usize) +{ + size_t i; + + arena_dalloc_junk_large_orig(ptr, usize); + for (i = 0; i < usize; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, usize); + } + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +huge_dalloc_junk_intercept(void *ptr, size_t usize) +{ + + huge_dalloc_junk_orig(ptr, usize); + /* + * The conditions under which junk filling actually occurs are nuanced + * enough that it doesn't make sense to duplicate the decision logic in + * test code, so don't actually check that the region is junk-filled. + */ + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +test_junk(size_t sz_min, size_t sz_max) +{ + uint8_t *s; + size_t sz_prev, sz, i; + + if (opt_junk_free) { + arena_dalloc_junk_small_orig = arena_dalloc_junk_small; + arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; + arena_dalloc_junk_large_orig = arena_dalloc_junk_large; + arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; + huge_dalloc_junk_orig = huge_dalloc_junk; + huge_dalloc_junk = huge_dalloc_junk_intercept; + } + + sz_prev = 0; + s = (uint8_t *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_u_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_u_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + if (opt_junk_alloc) { + assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, + "Newly allocated byte %zu/%zu isn't " + "junk-filled", i, sz); + } + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + watch_junking(s); + s = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", + sz); + } + } + + watch_junking(s); + dallocx(s, 0); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", sz); + + if (opt_junk_free) { + arena_dalloc_junk_small = arena_dalloc_junk_small_orig; + arena_dalloc_junk_large = arena_dalloc_junk_large_orig; + huge_dalloc_junk = huge_dalloc_junk_orig; + } +} + +TEST_BEGIN(test_junk_small) +{ + + test_skip_if(!config_fill); + test_junk(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_junk_large) +{ + + test_skip_if(!config_fill); + test_junk(SMALL_MAXCLASS+1, large_maxclass); +} +TEST_END + +TEST_BEGIN(test_junk_huge) +{ + + test_skip_if(!config_fill); + test_junk(large_maxclass+1, chunksize*2); +} +TEST_END + +arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; +static void *most_recently_trimmed; + +static size_t +shrink_size(size_t size) +{ + size_t shrink_size; + + for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; + shrink_size--) + ; /* Do nothing. */ + + return (shrink_size); +} + +static void +arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) +{ + + arena_ralloc_junk_large_orig(ptr, old_usize, usize); + assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); + assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); + most_recently_trimmed = ptr; +} + +TEST_BEGIN(test_junk_large_ralloc_shrink) +{ + void *p1, *p2; + + p1 = mallocx(large_maxclass, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + + arena_ralloc_junk_large_orig = arena_ralloc_junk_large; + arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; + + p2 = rallocx(p1, shrink_size(large_maxclass), 0); + assert_ptr_eq(p1, p2, "Unexpected move during shrink"); + + arena_ralloc_junk_large = arena_ralloc_junk_large_orig; + + assert_ptr_eq(most_recently_trimmed, p1, + "Expected trimmed portion of region to be junk-filled"); +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_junk_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + test_skip_if(!opt_junk_alloc || !opt_junk_free); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_junk_small, + test_junk_large, + test_junk_huge, + test_junk_large_ralloc_shrink, + test_junk_redzone)); +} diff --git a/memory/jemalloc/src/test/unit/junk_alloc.c b/memory/jemalloc/src/test/unit/junk_alloc.c new file mode 100644 index 000000000..a5895b5c0 --- /dev/null +++ b/memory/jemalloc/src/test/unit/junk_alloc.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" +#include "junk.c" +#undef JEMALLOC_TEST_JUNK_OPT diff --git a/memory/jemalloc/src/test/unit/junk_free.c b/memory/jemalloc/src/test/unit/junk_free.c new file mode 100644 index 000000000..bb5183c90 --- /dev/null +++ b/memory/jemalloc/src/test/unit/junk_free.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TEST_JUNK_OPT "junk:free" +#include "junk.c" +#undef JEMALLOC_TEST_JUNK_OPT diff --git a/memory/jemalloc/src/test/unit/lg_chunk.c b/memory/jemalloc/src/test/unit/lg_chunk.c new file mode 100644 index 000000000..7e5df3814 --- /dev/null +++ b/memory/jemalloc/src/test/unit/lg_chunk.c @@ -0,0 +1,26 @@ +#include "test/jemalloc_test.h" + +/* + * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test + * program will fail a debug assertion during initialization and abort (rather + * than the test soft-failing) if clamping is insufficient. + */ +const char *malloc_conf = "lg_chunk:0"; + +TEST_BEGIN(test_lg_chunk_clamp) +{ + void *p; + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_lg_chunk_clamp)); +} diff --git a/memory/jemalloc/src/test/unit/mallctl.c b/memory/jemalloc/src/test/unit/mallctl.c new file mode 100644 index 000000000..69f8c20c1 --- /dev/null +++ b/memory/jemalloc/src/test/unit/mallctl.c @@ -0,0 +1,731 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_mallctl_errors) +{ + uint64_t epoch; + size_t sz; + + assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, + "mallctl() should return ENOENT for non-existent names"); + + assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, "mallctl() should return EPERM on attempt to write " + "read-only value"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_errors) +{ + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, + "mallctlnametomib() should return ENOENT for non-existent names"); +} +TEST_END + +TEST_BEGIN(test_mallctlbymib_errors) +{ + uint64_t epoch; + size_t sz; + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", + strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + "attempt to write read-only value"); + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)-1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)+1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctl_read_write) +{ + uint64_t old_epoch, new_epoch; + size_t sz = sizeof(old_epoch); + + /* Blind. */ + assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Write. */ + assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), + 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read+write. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_short_mib) +{ + size_t mib[4]; + size_t miblen; + + miblen = 3; + mib[3] = 42; + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + assert_zu_eq(miblen, 3, "Unexpected mib output length"); + assert_zu_eq(mib[3], 42, + "mallctlnametomib() wrote past the end of the input mib"); +} +TEST_END + +TEST_BEGIN(test_mallctl_config) +{ + +#define TEST_MALLCTL_CONFIG(config, t) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_b_eq(oldval, config_##config, "Incorrect config value"); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_CONFIG(cache_oblivious, bool); + TEST_MALLCTL_CONFIG(debug, bool); + TEST_MALLCTL_CONFIG(fill, bool); + TEST_MALLCTL_CONFIG(lazy_lock, bool); + TEST_MALLCTL_CONFIG(malloc_conf, const char *); + TEST_MALLCTL_CONFIG(munmap, bool); + TEST_MALLCTL_CONFIG(prof, bool); + TEST_MALLCTL_CONFIG(prof_libgcc, bool); + TEST_MALLCTL_CONFIG(prof_libunwind, bool); + TEST_MALLCTL_CONFIG(stats, bool); + TEST_MALLCTL_CONFIG(tcache, bool); + TEST_MALLCTL_CONFIG(tls, bool); + TEST_MALLCTL_CONFIG(utrace, bool); + TEST_MALLCTL_CONFIG(valgrind, bool); + TEST_MALLCTL_CONFIG(xmalloc, bool); + +#undef TEST_MALLCTL_CONFIG +} +TEST_END + +TEST_BEGIN(test_mallctl_opt) +{ + bool config_always = true; + +#define TEST_MALLCTL_OPT(t, opt, config) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ + assert_d_eq(result, expected, \ + "Unexpected mallctl() result for opt."#opt); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_OPT(bool, abort, always); + TEST_MALLCTL_OPT(size_t, lg_chunk, always); + TEST_MALLCTL_OPT(const char *, dss, always); + TEST_MALLCTL_OPT(unsigned, narenas, always); + TEST_MALLCTL_OPT(const char *, purge, always); + TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); + TEST_MALLCTL_OPT(ssize_t, decay_time, always); + TEST_MALLCTL_OPT(bool, stats_print, always); + TEST_MALLCTL_OPT(const char *, junk, fill); + TEST_MALLCTL_OPT(size_t, quarantine, fill); + TEST_MALLCTL_OPT(bool, redzone, fill); + TEST_MALLCTL_OPT(bool, zero, fill); + TEST_MALLCTL_OPT(bool, utrace, utrace); + TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); + TEST_MALLCTL_OPT(bool, tcache, tcache); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); + TEST_MALLCTL_OPT(bool, prof, prof); + TEST_MALLCTL_OPT(const char *, prof_prefix, prof); + TEST_MALLCTL_OPT(bool, prof_active, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); + TEST_MALLCTL_OPT(bool, prof_accum, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); + TEST_MALLCTL_OPT(bool, prof_gdump, prof); + TEST_MALLCTL_OPT(bool, prof_final, prof); + TEST_MALLCTL_OPT(bool, prof_leak, prof); + +#undef TEST_MALLCTL_OPT +} +TEST_END + +TEST_BEGIN(test_manpage_example) +{ + unsigned nbins, i; + size_t mib[4]; + size_t len, miblen; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); + /* Do something with bin_size... */ + } +} +TEST_END + +TEST_BEGIN(test_tcache_none) +{ + void *p0, *q, *p1; + + test_skip_if(!config_tcache); + + /* Allocate p and q. */ + p0 = mallocx(42, 0); + assert_ptr_not_null(p0, "Unexpected mallocx() failure"); + q = mallocx(42, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + + /* Deallocate p and q, but bypass the tcache for q. */ + dallocx(p0, 0); + dallocx(q, MALLOCX_TCACHE_NONE); + + /* Make sure that tcache-based allocation returns p, not q. */ + p1 = mallocx(42, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); + + /* Clean up. */ + dallocx(p1, MALLOCX_TCACHE_NONE); +} +TEST_END + +TEST_BEGIN(test_tcache) +{ +#define NTCACHES 10 + unsigned tis[NTCACHES]; + void *ps[NTCACHES]; + void *qs[NTCACHES]; + unsigned i; + size_t sz, psz, qsz; + + test_skip_if(!config_tcache); + + psz = 42; + qsz = nallocx(psz, 0) + 1; + + /* Create tcaches. */ + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); + } + + /* Exercise tcache ID recycling. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); + } + + /* Flush empty tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Cache some allocations. */ + for (i = 0; i < NTCACHES; i++) { + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(ps[i], MALLOCX_TCACHE(tis[i])); + + qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Verify that tcaches allocate cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *p0 = ps[i]; + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + assert_ptr_eq(ps[i], p0, + "Expected mallocx() to allocate cached region, i=%u", i); + } + + /* Verify that reallocation uses cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *q0 = qs[i]; + qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", + i); + assert_ptr_eq(qs[i], q0, + "Expected rallocx() to allocate cached region, i=%u", i); + /* Avoid undefined behavior in case of test failure. */ + if (qs[i] == NULL) + qs[i] = ps[i]; + } + for (i = 0; i < NTCACHES; i++) + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + + /* Flush some non-empty tcaches. */ + for (i = 0; i < NTCACHES/2; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Destroy tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } +} +TEST_END + +TEST_BEGIN(test_thread_arena) +{ + unsigned arena_old, arena_new, narenas; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); + arena_new = narenas - 1; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); + arena_new = 0; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_lg_dirty_mult) +{ + ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; + size_t sz = sizeof(ssize_t); + + test_skip_if(opt_purge != purge_mode_ratio); + + assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + lg_dirty_mult = -2; + assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + lg_dirty_mult = (sizeof(size_t) << 3); + assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; + lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult + = lg_dirty_mult, lg_dirty_mult++) { + ssize_t old_lg_dirty_mult; + + assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult, + &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, + "Unexpected old arena.0.lg_dirty_mult"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_decay_time) +{ + ssize_t decay_time, orig_decay_time, prev_decay_time; + size_t sz = sizeof(ssize_t); + + test_skip_if(opt_purge != purge_mode_decay); + + assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + decay_time = -2; + assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, + &decay_time, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + decay_time = 0x7fffffff; + assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, + &decay_time, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + + for (prev_decay_time = decay_time, decay_time = -1; + decay_time < 20; prev_decay_time = decay_time, decay_time++) { + ssize_t old_decay_time; + + assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time, + &sz, &decay_time, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_decay_time, prev_decay_time, + "Unexpected old arena.0.decay_time"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_purge) +{ + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_decay) +{ + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dss) +{ + const char *dss_prec_old, *dss_prec_new; + size_t sz = sizeof(dss_prec_old); + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, + sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); + + mib[1] = narenas_total_get(); + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); +} +TEST_END + +TEST_BEGIN(test_arenas_initialized) +{ + unsigned narenas; + size_t sz = sizeof(narenas); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + { + VARIABLE_ARRAY(bool, initialized, narenas); + + sz = narenas * sizeof(bool); + assert_d_eq(mallctl("arenas.initialized", initialized, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_lg_dirty_mult) +{ + ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; + size_t sz = sizeof(ssize_t); + + test_skip_if(opt_purge != purge_mode_ratio); + + assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + lg_dirty_mult = -2; + assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + lg_dirty_mult = (sizeof(size_t) << 3); + assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; + lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = + lg_dirty_mult, lg_dirty_mult++) { + ssize_t old_lg_dirty_mult; + + assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult, + &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, + "Unexpected old arenas.lg_dirty_mult"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_decay_time) +{ + ssize_t decay_time, orig_decay_time, prev_decay_time; + size_t sz = sizeof(ssize_t); + + test_skip_if(opt_purge != purge_mode_decay); + + assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + decay_time = -2; + assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, + &decay_time, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + decay_time = 0x7fffffff; + assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, + &decay_time, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); + + for (prev_decay_time = decay_time, decay_time = -1; + decay_time < 20; prev_decay_time = decay_time, decay_time++) { + ssize_t old_decay_time; + + assert_d_eq(mallctl("arenas.decay_time", &old_decay_time, + &sz, &decay_time, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_decay_time, prev_decay_time, + "Unexpected old arenas.decay_time"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_constants) +{ + +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ + "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); + TEST_ARENAS_CONSTANT(size_t, page, PAGE); + TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); + TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); + TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); + +#undef TEST_ARENAS_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_bin_constants) +{ + +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); + +#undef TEST_ARENAS_BIN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_lrun_constants) +{ + +#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); + +#undef TEST_ARENAS_LRUN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_hchunk_constants) +{ + +#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); + +#undef TEST_ARENAS_HCHUNK_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_extend) +{ + unsigned narenas_before, arena, narenas_after; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_u_eq(narenas_before+1, narenas_after, + "Unexpected number of arenas before versus after extension"); + assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); +} +TEST_END + +TEST_BEGIN(test_stats_arenas) +{ + +#define TEST_STATS_ARENAS(t, name) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ +} while (0) + + TEST_STATS_ARENAS(unsigned, nthreads); + TEST_STATS_ARENAS(const char *, dss); + TEST_STATS_ARENAS(ssize_t, lg_dirty_mult); + TEST_STATS_ARENAS(ssize_t, decay_time); + TEST_STATS_ARENAS(size_t, pactive); + TEST_STATS_ARENAS(size_t, pdirty); + +#undef TEST_STATS_ARENAS +} +TEST_END + +int +main(void) +{ + + return (test( + test_mallctl_errors, + test_mallctlnametomib_errors, + test_mallctlbymib_errors, + test_mallctl_read_write, + test_mallctlnametomib_short_mib, + test_mallctl_config, + test_mallctl_opt, + test_manpage_example, + test_tcache_none, + test_tcache, + test_thread_arena, + test_arena_i_lg_dirty_mult, + test_arena_i_decay_time, + test_arena_i_purge, + test_arena_i_decay, + test_arena_i_dss, + test_arenas_initialized, + test_arenas_lg_dirty_mult, + test_arenas_decay_time, + test_arenas_constants, + test_arenas_bin_constants, + test_arenas_lrun_constants, + test_arenas_hchunk_constants, + test_arenas_extend, + test_stats_arenas)); +} diff --git a/memory/jemalloc/src/test/unit/math.c b/memory/jemalloc/src/test/unit/math.c new file mode 100644 index 000000000..adb72bed9 --- /dev/null +++ b/memory/jemalloc/src/test/unit/math.c @@ -0,0 +1,398 @@ +#include "test/jemalloc_test.h" + +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 + +#include <float.h> + +#ifdef __PGI +#undef INFINITY +#endif + +#ifndef INFINITY +#define INFINITY (DBL_MAX + DBL_MAX) +#endif + +static bool +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) +{ + double rel_err; + + if (fabs(a - b) < max_abs_err) + return (true); + rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); + return (rel_err < max_rel_err); +} + +static uint64_t +factorial(unsigned x) +{ + uint64_t ret = 1; + unsigned i; + + for (i = 2; i <= x; i++) + ret *= (uint64_t)i; + + return (ret); +} + +TEST_BEGIN(test_ln_gamma_factorial) +{ + unsigned x; + + /* exp(ln_gamma(x)) == (x-1)! for integer x. */ + for (x = 1; x <= 21; x++) { + assert_true(double_eq_rel(exp(ln_gamma(x)), + (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect factorial result for x=%u", x); + } +} +TEST_END + +/* Expected ln_gamma([0.0..100.0] increment=0.25). */ +static const double ln_gamma_misc_expected[] = { + INFINITY, + 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, + 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, + -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, + 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, + 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, + 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, + 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, + 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, + 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, + 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, + 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, + 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, + 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, + 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, + 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, + 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, + 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, + 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, + 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, + 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, + 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, + 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, + 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, + 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, + 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, + 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, + 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, + 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, + 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, + 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, + 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, + 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, + 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, + 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, + 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, + 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, + 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, + 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, + 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, + 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, + 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, + 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, + 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, + 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, + 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, + 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, + 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, + 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, + 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, + 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, + 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, + 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, + 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, + 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, + 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, + 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, + 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, + 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, + 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, + 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, + 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, + 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, + 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, + 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, + 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, + 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, + 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, + 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, + 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, + 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, + 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, + 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, + 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, + 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, + 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, + 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, + 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, + 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, + 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, + 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, + 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, + 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, + 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, + 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, + 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, + 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, + 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, + 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, + 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, + 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, + 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, + 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, + 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, + 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, + 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, + 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, + 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, + 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, + 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, + 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, + 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, + 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, + 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, + 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, + 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, + 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, + 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, + 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, + 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, + 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, + 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, + 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, + 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, + 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, + 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, + 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, + 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, + 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, + 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, + 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, + 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, + 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, + 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, + 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, + 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, + 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, + 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, + 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, + 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, + 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, + 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, + 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, + 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, + 359.13420536957539753 +}; + +TEST_BEGIN(test_ln_gamma_misc) +{ + unsigned i; + + for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { + double x = (double)i * 0.25; + assert_true(double_eq_rel(ln_gamma(x), + ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect ln_gamma result for i=%u", i); + } +} +TEST_END + +/* Expected pt_norm([0.01..0.99] increment=0.01). */ +static const double pt_norm_expected[] = { + -INFINITY, + -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, + -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, + -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, + -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, + -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, + -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, + -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, + -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, + -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, + -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, + -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, + -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, + -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, + -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, + -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, + -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, + -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, + 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, + 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, + 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, + 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, + 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, + 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, + 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, + 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, + 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, + 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, + 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, + 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, + 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, + 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, + 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, + 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 +}; + +TEST_BEGIN(test_pt_norm) +{ + unsigned i; + + for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { + double p = (double)i * 0.01; + assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], + MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_norm result for i=%u", i); + } +} +TEST_END + +/* + * Expected pt_chi2(p=[0.01..0.99] increment=0.07, + * df={0.1, 1.1, 10.1, 100.1, 1000.1}). + */ +static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; +static const double pt_chi2_expected[] = { + 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, + 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, + 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, + 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, + 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, + + 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, + 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, + 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, + 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, + 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, + + 2.606673548632508, 4.602913725294877, 5.646152813924212, + 6.488971315540869, 7.249823275816285, 7.977314231410841, + 8.700354939944047, 9.441728024225892, 10.224338321374127, + 11.076435368801061, 12.039320937038386, 13.183878752697167, + 14.657791935084575, 16.885728216339373, 23.361991680031817, + + 70.14844087392152, 80.92379498849355, 85.53325420085891, + 88.94433120715347, 91.83732712857017, 94.46719943606301, + 96.96896479994635, 99.43412843510363, 101.94074719829733, + 104.57228644307247, 107.43900093448734, 110.71844673417287, + 114.76616819871325, 120.57422505959563, 135.92318818757556, + + 899.0072447849649, 937.9271278858220, 953.8117189560207, + 965.3079371501154, 974.8974061207954, 983.4936235182347, + 991.5691170518946, 999.4334123954690, 1007.3391826856553, + 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, + 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 +}; + +TEST_BEGIN(test_pt_chi2) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { + double df = pt_chi2_df[i]; + double ln_gamma_df = ln_gamma(df * 0.5); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), + pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_chi2 result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +/* + * Expected pt_gamma(p=[0.1..0.99] increment=0.07, + * shape=[0.5..3.0] increment=0.5). + */ +static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; +static const double pt_gamma_expected[] = { + 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, + 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, + 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, + 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, + 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, + + 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, + 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, + 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, + 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, + 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, + + 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, + 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, + 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, + 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, + 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, + + 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, + 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, + 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, + 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, + 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, + + 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, + 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, + 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, + 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, + 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, + + 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, + 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, + 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, + 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, + 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 +}; + +TEST_BEGIN(test_pt_gamma_shape) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { + double shape = pt_gamma_shape[i]; + double ln_gamma_shape = ln_gamma(shape); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, + ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, + MAX_ABS_ERR), + "Incorrect pt_gamma result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +TEST_BEGIN(test_pt_gamma_scale) +{ + double shape = 1.0; + double ln_gamma_shape = ln_gamma(shape); + + assert_true(double_eq_rel( + pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, + pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, + MAX_ABS_ERR), + "Scale should be trivially equivalent to external multiplication"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ln_gamma_factorial, + test_ln_gamma_misc, + test_pt_norm, + test_pt_chi2, + test_pt_gamma_shape, + test_pt_gamma_scale)); +} diff --git a/memory/jemalloc/src/test/unit/mq.c b/memory/jemalloc/src/test/unit/mq.c new file mode 100644 index 000000000..bde2a480b --- /dev/null +++ b/memory/jemalloc/src/test/unit/mq.c @@ -0,0 +1,93 @@ +#include "test/jemalloc_test.h" + +#define NSENDERS 3 +#define NMSGS 100000 + +typedef struct mq_msg_s mq_msg_t; +struct mq_msg_s { + mq_msg(mq_msg_t) link; +}; +mq_gen(static, mq_, mq_t, mq_msg_t, link) + +TEST_BEGIN(test_mq_basic) +{ + mq_t mq; + mq_msg_t msg; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + assert_u_eq(mq_count(&mq), 0, "mq should be empty"); + assert_ptr_null(mq_tryget(&mq), + "mq_tryget() should fail when the queue is empty"); + + mq_put(&mq, &msg); + assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); + assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); + + mq_put(&mq, &msg); + assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); + + mq_fini(&mq); +} +TEST_END + +static void * +thd_receiver_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < (NSENDERS * NMSGS); i++) { + mq_msg_t *msg = mq_get(mq); + assert_ptr_not_null(msg, "mq_get() should never return NULL"); + dallocx(msg, 0); + } + return (NULL); +} + +static void * +thd_sender_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < NMSGS; i++) { + mq_msg_t *msg; + void *p; + p = mallocx(sizeof(mq_msg_t), 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + msg = (mq_msg_t *)p; + mq_put(mq, msg); + } + return (NULL); +} + +TEST_BEGIN(test_mq_threaded) +{ + mq_t mq; + thd_t receiver; + thd_t senders[NSENDERS]; + unsigned i; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + + thd_create(&receiver, thd_receiver_start, (void *)&mq); + for (i = 0; i < NSENDERS; i++) + thd_create(&senders[i], thd_sender_start, (void *)&mq); + + thd_join(receiver, NULL); + for (i = 0; i < NSENDERS; i++) + thd_join(senders[i], NULL); + + mq_fini(&mq); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mq_basic, + test_mq_threaded)); +} + diff --git a/memory/jemalloc/src/test/unit/mtx.c b/memory/jemalloc/src/test/unit/mtx.c new file mode 100644 index 000000000..96ff69486 --- /dev/null +++ b/memory/jemalloc/src/test/unit/mtx.c @@ -0,0 +1,60 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 2 +#define NINCRS 2000000 + +TEST_BEGIN(test_mtx_basic) +{ + mtx_t mtx; + + assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); + mtx_lock(&mtx); + mtx_unlock(&mtx); + mtx_fini(&mtx); +} +TEST_END + +typedef struct { + mtx_t mtx; + unsigned x; +} thd_start_arg_t; + +static void * +thd_start(void *varg) +{ + thd_start_arg_t *arg = (thd_start_arg_t *)varg; + unsigned i; + + for (i = 0; i < NINCRS; i++) { + mtx_lock(&arg->mtx); + arg->x++; + mtx_unlock(&arg->mtx); + } + return (NULL); +} + +TEST_BEGIN(test_mtx_race) +{ + thd_start_arg_t arg; + thd_t thds[NTHREADS]; + unsigned i; + + assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); + arg.x = 0; + for (i = 0; i < NTHREADS; i++) + thd_create(&thds[i], thd_start, (void *)&arg); + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + assert_u_eq(arg.x, NTHREADS * NINCRS, + "Race-related counter corruption"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mtx_basic, + test_mtx_race)); +} diff --git a/memory/jemalloc/src/test/unit/nstime.c b/memory/jemalloc/src/test/unit/nstime.c new file mode 100644 index 000000000..0368bc26e --- /dev/null +++ b/memory/jemalloc/src/test/unit/nstime.c @@ -0,0 +1,227 @@ +#include "test/jemalloc_test.h" + +#define BILLION UINT64_C(1000000000) + +TEST_BEGIN(test_nstime_init) +{ + nstime_t nst; + + nstime_init(&nst, 42000000043); + assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_init2) +{ + nstime_t nst; + + nstime_init2(&nst, 42, 43); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_copy) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_init(&nstb, 0); + nstime_copy(&nstb, &nsta); + assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); + assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); +} +TEST_END + +TEST_BEGIN(test_nstime_compare) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); + assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); + + nstime_init2(&nstb, 42, 42); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 42, 44); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); + + nstime_init2(&nstb, 41, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); +} +TEST_END + +TEST_BEGIN(test_nstime_add) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 84, 86); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 85, BILLION - 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_subtract) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_subtract(&nsta, &nstb); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_init2(&nstb, 41, 44); + nstime_subtract(&nsta, &nstb); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_imultiply) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_imultiply(&nsta, 10); + nstime_init2(&nstb, 420, 430); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_imultiply(&nsta, 3); + nstime_init2(&nstb, 127, 999999998); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); +} +TEST_END + +TEST_BEGIN(test_nstime_idivide) +{ + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_idivide(&nsta, 10); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 3); + nstime_idivide(&nsta, 3); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_divide) +{ + nstime_t nsta, nstb, nstc; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_add(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_subtract(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 9, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_monotonic) +{ + + nstime_monotonic(); +} +TEST_END + +TEST_BEGIN(test_nstime_update) +{ + nstime_t nst; + + nstime_init(&nst, 0); + + assert_false(nstime_update(&nst), "Basic time update failed."); + + /* Only Rip Van Winkle sleeps this long. */ + { + nstime_t addend; + nstime_init2(&addend, 631152000, 0); + nstime_add(&nst, &addend); + } + { + nstime_t nst0; + nstime_copy(&nst0, &nst); + assert_true(nstime_update(&nst), + "Update should detect time roll-back."); + assert_d_eq(nstime_compare(&nst, &nst0), 0, + "Time should not have been modified"); + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_nstime_init, + test_nstime_init2, + test_nstime_copy, + test_nstime_compare, + test_nstime_add, + test_nstime_subtract, + test_nstime_imultiply, + test_nstime_idivide, + test_nstime_divide, + test_nstime_monotonic, + test_nstime_update)); +} diff --git a/memory/jemalloc/src/test/unit/ph.c b/memory/jemalloc/src/test/unit/ph.c new file mode 100644 index 000000000..da442f07e --- /dev/null +++ b/memory/jemalloc/src/test/unit/ph.c @@ -0,0 +1,290 @@ +#include "test/jemalloc_test.h" + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + phn(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) +{ + int ret; + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the heap, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return (ret); +} + +static int +node_cmp_magic(const node_t *a, const node_t *b) { + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + return (node_cmp(a, b)); +} + +typedef ph(node_t) heap_t; +ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); + +static void +node_print(const node_t *node, unsigned depth) +{ + unsigned i; + node_t *leftmost_child, *sibling; + + for (i = 0; i < depth; i++) + malloc_printf("\t"); + malloc_printf("%2"FMTu64"\n", node->key); + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) + return; + node_print(leftmost_child, depth + 1); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + node_print(sibling, depth + 1); + } +} + +static void +heap_print(const heap_t *heap) +{ + node_t *auxelm; + + malloc_printf("vvv heap %p vvv\n", heap); + if (heap->ph_root == NULL) + goto label_return; + + node_print(heap->ph_root, 0); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + node_print(auxelm, 0); + } + +label_return: + malloc_printf("^^^ heap %p ^^^\n", heap); +} + +static unsigned +node_validate(const node_t *node, const node_t *parent) +{ + unsigned nnodes = 1; + node_t *leftmost_child, *sibling; + + if (parent != NULL) { + assert_d_ge(node_cmp_magic(node, parent), 0, + "Child is less than parent"); + } + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) + return (nnodes); + assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), + (void *)node, "Leftmost child does not link to node"); + nnodes += node_validate(leftmost_child, node); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, sibling)), sibling, + "sibling's prev doesn't link to sibling"); + nnodes += node_validate(sibling, node); + } + return (nnodes); +} + +static unsigned +heap_validate(const heap_t *heap) +{ + unsigned nnodes = 0; + node_t *auxelm; + + if (heap->ph_root == NULL) + goto label_return; + + nnodes += node_validate(heap->ph_root, NULL); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + nnodes += node_validate(auxelm, NULL); + } + +label_return: + if (false) + heap_print(heap); + return (nnodes); +} + +TEST_BEGIN(test_ph_empty) +{ + heap_t heap; + + heap_new(&heap); + assert_true(heap_empty(&heap), "Heap should be empty"); + assert_ptr_null(heap_first(&heap), "Unexpected node"); +} +TEST_END + +static void +node_remove(heap_t *heap, node_t *node) +{ + + heap_remove(heap, node); + + node->magic = 0; +} + +static node_t * +node_remove_first(heap_t *heap) +{ + node_t *node = heap_remove_first(heap); + node->magic = 0; + return (node); +} + +TEST_BEGIN(test_ph_random) +{ +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + heap_t heap; + node_t nodes[NNODES]; + unsigned i, j, k; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) + bag[j] = j; + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) + bag[j] = NNODES - j - 1; + break; + default: + for (j = 0; j < NNODES; j++) + bag[j] = gen_rand64_range(sfmt, NNODES); + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize heap and nodes. */ + heap_new(&heap); + assert_u_eq(heap_validate(&heap), 0, + "Incorrect node count"); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + heap_insert(&heap, &nodes[k]); + if (i % 13 == 12) { + /* Trigger merging. */ + assert_ptr_not_null(heap_first(&heap), + "Heap should not be empty"); + } + assert_u_eq(heap_validate(&heap), k + 1, + "Incorrect node count"); + } + + assert_false(heap_empty(&heap), + "Heap should not be empty"); + + /* Remove nodes. */ + switch (i % 4) { + case 0: + for (k = 0; k < j; k++) { + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, &nodes[k]); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + case 1: + for (k = j; k > 0; k--) { + node_remove(&heap, &nodes[k-1]); + assert_u_eq(heap_validate(&heap), k - 1, + "Incorrect node count"); + } + break; + case 2: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = node_remove_first(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + prev = node; + } + break; + } case 3: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = heap_first(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + prev = node; + } + break; + } default: + not_reached(); + } + + assert_ptr_null(heap_first(&heap), + "Heap should be empty"); + assert_true(heap_empty(&heap), "Heap should be empty"); + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_ph_empty, + test_ph_random)); +} diff --git a/memory/jemalloc/src/test/unit/prng.c b/memory/jemalloc/src/test/unit/prng.c new file mode 100644 index 000000000..80c9d733f --- /dev/null +++ b/memory/jemalloc/src/test/unit/prng.c @@ -0,0 +1,263 @@ +#include "test/jemalloc_test.h" + +static void +test_prng_lg_range_u32(bool atomic) +{ + uint32_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + sa = 42; + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_u32(&sb, 32, atomic); + assert_u32_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + for (lg_range = 31; lg_range > 0; lg_range--) { + sb = 42; + rb = prng_lg_range_u32(&sb, lg_range, atomic); + assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u32_eq(rb, (ra >> (32 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_u64(void) +{ + uint64_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + sa = 42; + rb = prng_lg_range_u64(&sa, 64); + assert_u64_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_u64(&sb, 64); + assert_u64_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + rb = prng_lg_range_u64(&sa, 64); + assert_u64_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + for (lg_range = 63; lg_range > 0; lg_range--) { + sb = 42; + rb = prng_lg_range_u64(&sb, lg_range); + assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u64_eq(rb, (ra >> (64 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_zu(bool atomic) +{ + size_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + sa = 42; + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; + lg_range--) { + sb = 42; + rb = prng_lg_range_zu(&sb, lg_range, atomic); + assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - + lg_range)), "Expected high order bits of full-width " + "result, lg_range=%u", lg_range); + } +} + +TEST_BEGIN(test_prng_lg_range_u32_nonatomic) +{ + + test_prng_lg_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u32_atomic) +{ + + test_prng_lg_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u64_nonatomic) +{ + + test_prng_lg_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_nonatomic) +{ + + test_prng_lg_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_atomic) +{ + + test_prng_lg_range_zu(true); +} +TEST_END + +static void +test_prng_range_u32(bool atomic) +{ + uint32_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + uint32_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + uint32_t r = prng_range_u32(&s, range, atomic); + + assert_u32_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_u64(void) +{ + uint64_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + uint64_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + uint64_t r = prng_range_u64(&s, range); + + assert_u64_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_zu(bool atomic) +{ + size_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + size_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + size_t r = prng_range_zu(&s, range, atomic); + + assert_zu_lt(r, range, "Out of range"); + } + } +} + +TEST_BEGIN(test_prng_range_u32_nonatomic) +{ + + test_prng_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_u32_atomic) +{ + + test_prng_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_range_u64_nonatomic) +{ + + test_prng_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_nonatomic) +{ + + test_prng_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_atomic) +{ + + test_prng_range_zu(true); +} +TEST_END + +int +main(void) +{ + + return (test( + test_prng_lg_range_u32_nonatomic, + test_prng_lg_range_u32_atomic, + test_prng_lg_range_u64_nonatomic, + test_prng_lg_range_zu_nonatomic, + test_prng_lg_range_zu_atomic, + test_prng_range_u32_nonatomic, + test_prng_range_u32_atomic, + test_prng_range_u64_nonatomic, + test_prng_range_zu_nonatomic, + test_prng_range_zu_atomic)); +} diff --git a/memory/jemalloc/src/test/unit/prof_accum.c b/memory/jemalloc/src/test/unit/prof_accum.c new file mode 100644 index 000000000..fd229e0fd --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_accum.c @@ -0,0 +1,91 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +static void * +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) +{ + + return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration)); +} + +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + size_t bt_count_prev, bt_count; + unsigned i_prev, i; + + i_prev = 0; + bt_count_prev = 0; + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + void *p = alloc_from_permuted_backtrace(thd_ind, i); + dallocx(p, 0); + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + if (i % BT_COUNT_CHECK_INTERVAL == 0 || + i+1 == NALLOCS_PER_THREAD) { + bt_count = prof_bt_count(); + assert_zu_le(bt_count_prev+(i-i_prev), bt_count, + "Expected larger backtrace count increase"); + i_prev = i; + bt_count_prev = bt_count; + } + } + + return (NULL); +} + +TEST_BEGIN(test_idump) +{ + bool active; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/memory/jemalloc/src/test/unit/prof_active.c b/memory/jemalloc/src/test/unit/prof_active.c new file mode 100644 index 000000000..814909572 --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_active.c @@ -0,0 +1,136 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_thread_active_init:false,lg_prof_sample:0"; +#endif + +static void +mallctl_bool_get(const char *name, bool expected, const char *func, int line) +{ + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading %s", func, line, name); + assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, + name); +} + +static void +mallctl_bool_set(const char *name, bool old_expected, bool val_new, + const char *func, int line) +{ + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0, + "%s():%d: Unexpected mallctl failure reading/writing %s", func, + line, name); + assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, + line, name); +} + +static void +mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, + int line) +{ + + mallctl_bool_get("prof.active", prof_active_old_expected, func, line); +} +#define mallctl_prof_active_get(a) \ + mallctl_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_prof_active_set_impl(bool prof_active_old_expected, + bool prof_active_new, const char *func, int line) +{ + + mallctl_bool_set("prof.active", prof_active_old_expected, + prof_active_new, func, line); +} +#define mallctl_prof_active_set(a, b) \ + mallctl_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, + const char *func, int line) +{ + + mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, + func, line); +} +#define mallctl_thread_prof_active_get(a) \ + mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, + bool thread_prof_active_new, const char *func, int line) +{ + + mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, + thread_prof_active_new, func, line); +} +#define mallctl_thread_prof_active_set(a, b) \ + mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +prof_sampling_probe_impl(bool expect_sample, const char *func, int line) +{ + void *p; + size_t expected_backtraces = expect_sample ? 1 : 0; + + assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, + line); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), expected_backtraces, + "%s():%d: Unexpected backtrace count", func, line); + dallocx(p, 0); +} +#define prof_sampling_probe(a) \ + prof_sampling_probe_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_active) +{ + + test_skip_if(!config_prof); + + mallctl_prof_active_get(true); + mallctl_thread_prof_active_get(false); + + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(false, false); + /* prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(true, false); + mallctl_thread_prof_active_set(false, false); + /* !prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, false); + mallctl_thread_prof_active_set(false, true); + /* !prof.active, thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, true); + mallctl_thread_prof_active_set(true, true); + /* prof.active, thread.prof.active. */ + prof_sampling_probe(true); + + /* Restore settings. */ + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(true, false); +} +TEST_END + +int +main(void) +{ + + return (test( + test_prof_active)); +} diff --git a/memory/jemalloc/src/test/unit/prof_gdump.c b/memory/jemalloc/src/test/unit/prof_gdump.c new file mode 100644 index 000000000..a0e6ee921 --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_gdump.c @@ -0,0 +1,81 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_gdump) +{ + bool active, gdump, gdump_old; + void *p, *q, *r, *s; + size_t sz; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + did_prof_dump_open = false; + q = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + gdump = false; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, + sizeof(gdump)), 0, + "Unexpected mallctl failure while disabling prof.gdump"); + assert(gdump_old); + did_prof_dump_open = false; + r = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_false(did_prof_dump_open, "Unexpected profile dump"); + + gdump = true; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, + sizeof(gdump)), 0, + "Unexpected mallctl failure while enabling prof.gdump"); + assert(!gdump_old); + did_prof_dump_open = false; + s = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + dallocx(p, 0); + dallocx(q, 0); + dallocx(r, 0); + dallocx(s, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gdump)); +} diff --git a/memory/jemalloc/src/test/unit/prof_idump.c b/memory/jemalloc/src/test/unit/prof_idump.c new file mode 100644 index 000000000..bdea53ecd --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_idump.c @@ -0,0 +1,51 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," + "lg_prof_interval:0"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_idump) +{ + bool active; + void *p; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); + assert_true(did_prof_dump_open, "Expected a profile dump"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/memory/jemalloc/src/test/unit/prof_reset.c b/memory/jemalloc/src/test/unit/prof_reset.c new file mode 100644 index 000000000..5ae45fd2c --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_reset.c @@ -0,0 +1,303 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +static void +set_prof_active(bool active) +{ + + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure"); +} + +static size_t +get_lg_prof_sample(void) +{ + size_t lg_prof_sample; + size_t sz = sizeof(size_t); + + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + return (lg_prof_sample); +} + +static void +do_prof_reset(size_t lg_prof_sample) +{ + assert_d_eq(mallctl("prof.reset", NULL, NULL, + &lg_prof_sample, sizeof(size_t)), 0, + "Unexpected mallctl failure while resetting profile data"); + assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), + "Expected profile sample rate change"); +} + +TEST_BEGIN(test_prof_reset_basic) +{ + size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; + size_t sz; + unsigned i; + + test_skip_if(!config_prof); + + sz = sizeof(size_t); + assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, + NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, 0, + "Unexpected profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); + + /* Test simple resets. */ + for (i = 0; i < 2; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure while resetting profile data"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected profile sample rate change"); + } + + /* Test resets with prof.lg_sample changes. */ + lg_prof_sample_next = 1; + for (i = 0; i < 2; i++) { + do_prof_reset(lg_prof_sample_next); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample, lg_prof_sample_next, + "Expected profile sample rate change"); + lg_prof_sample_next = lg_prof_sample_orig; + } + + /* Make sure the test code restored prof.lg_sample. */ + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); +} +TEST_END + +bool prof_dump_header_intercepted = false; +prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; +static bool +prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) +{ + + prof_dump_header_intercepted = true; + memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); + + return (false); +} + +TEST_BEGIN(test_prof_reset_cleanup) +{ + void *p; + prof_dump_header_t *prof_dump_header_orig; + + test_skip_if(!config_prof); + + set_prof_active(true); + + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header_orig = prof_dump_header; + prof_dump_header = prof_dump_header_intercept; + assert_false(prof_dump_header_intercepted, "Unexpected intercept"); + + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_true(prof_dump_header_intercepted, "Expected intercept"); + assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); + + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header = prof_dump_header_orig; + + dallocx(p, 0); + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + + set_prof_active(false); +} +TEST_END + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + unsigned i; + void *objs[OBJ_RING_BUF_COUNT]; + + memset(objs, 0, sizeof(objs)); + + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + if (i % RESET_INTERVAL == 0) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), + 0, "Unexpected error while resetting heap profile " + "data"); + } + + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); + assert_ptr_not_null(*pp, + "Unexpected btalloc() failure"); + } + } + + /* Clean up any remaining objects. */ + for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + } + + return (NULL); +} + +TEST_BEGIN(test_prof_reset) +{ + size_t lg_prof_sample_orig; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + size_t bt_count, tdata_count; + + test_skip_if(!config_prof); + + bt_count = prof_bt_count(); + assert_zu_eq(bt_count, 0, + "Unexpected pre-existing tdata structures"); + tdata_count = prof_tdata_count(); + + lg_prof_sample_orig = get_lg_prof_sample(); + do_prof_reset(5); + + set_prof_active(true); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + + assert_zu_eq(prof_bt_count(), bt_count, + "Unexpected bactrace count change"); + assert_zu_eq(prof_tdata_count(), tdata_count, + "Unexpected remaining tdata structures"); + + set_prof_active(false); + + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NTHREADS +#undef NALLOCS_PER_THREAD +#undef OBJ_RING_BUF_COUNT +#undef RESET_INTERVAL +#undef DUMP_INTERVAL + +/* Test sampling at the same allocation site across resets. */ +#define NITER 10 +TEST_BEGIN(test_xallocx) +{ + size_t lg_prof_sample_orig; + unsigned i; + void *ptrs[NITER]; + + test_skip_if(!config_prof); + + lg_prof_sample_orig = get_lg_prof_sample(); + set_prof_active(true); + + /* Reset profiling. */ + do_prof_reset(0); + + for (i = 0; i < NITER; i++) { + void *p; + size_t sz, nsz; + + /* Reset profiling. */ + do_prof_reset(0); + + /* Allocate small object (which will be promoted). */ + p = ptrs[i] = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + /* Reset profiling. */ + do_prof_reset(0); + + /* Perform successful xallocx(). */ + sz = sallocx(p, 0); + assert_zu_eq(xallocx(p, sz, 0, 0), sz, + "Unexpected xallocx() failure"); + + /* Perform unsuccessful xallocx(). */ + nsz = nallocx(sz+1, 0); + assert_zu_eq(xallocx(p, nsz, 0, 0), sz, + "Unexpected xallocx() success"); + } + + for (i = 0; i < NITER; i++) { + /* dallocx. */ + dallocx(ptrs[i], 0); + } + + set_prof_active(false); + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NITER + +int +main(void) +{ + + /* Intercept dumping prior to running any tests. */ + prof_dump_open = prof_dump_open_intercept; + + return (test( + test_prof_reset_basic, + test_prof_reset_cleanup, + test_prof_reset, + test_xallocx)); +} diff --git a/memory/jemalloc/src/test/unit/prof_thread_name.c b/memory/jemalloc/src/test/unit/prof_thread_name.c new file mode 100644 index 000000000..f501158d7 --- /dev/null +++ b/memory/jemalloc/src/test/unit/prof_thread_name.c @@ -0,0 +1,129 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,prof_active:false"; +#endif + +static void +mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, + int line) +{ + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0), + 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + assert_str_eq(thread_name_old, thread_name_expected, + "%s():%d: Unexpected thread.prof.name value", func, line); +} +#define mallctl_thread_name_get(a) \ + mallctl_thread_name_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_name_set_impl(const char *thread_name, const char *func, + int line) +{ + + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + mallctl_thread_name_get_impl(thread_name, func, line); +} +#define mallctl_thread_name_set(a) \ + mallctl_thread_name_set_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_thread_name_validation) +{ + const char *thread_name; + + test_skip_if(!config_prof); + + mallctl_thread_name_get(""); + mallctl_thread_name_set("hi there"); + + /* NULL input shouldn't be allowed. */ + thread_name = NULL; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* '\n' shouldn't be allowed. */ + thread_name = "hi\nthere"; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* Simultaneous read/write shouldn't be allowed. */ + { + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, + &thread_name, sizeof(thread_name)), EPERM, + "Unexpected mallctl result writing \"%s\" to " + "thread.prof.name", thread_name); + } + + mallctl_thread_name_set(""); +} +TEST_END + +#define NTHREADS 4 +#define NRESET 25 +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + char thread_name[16] = ""; + unsigned i; + + malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); + + mallctl_thread_name_get(""); + mallctl_thread_name_set(thread_name); + + for (i = 0; i < NRESET; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + mallctl_thread_name_get(thread_name); + } + + mallctl_thread_name_set(thread_name); + mallctl_thread_name_set(""); + + return (NULL); +} + +TEST_BEGIN(test_prof_thread_name_threaded) +{ + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END +#undef NTHREADS +#undef NRESET + +int +main(void) +{ + + return (test( + test_prof_thread_name_validation, + test_prof_thread_name_threaded)); +} diff --git a/memory/jemalloc/src/test/unit/ql.c b/memory/jemalloc/src/test/unit/ql.c new file mode 100644 index 000000000..05fad450f --- /dev/null +++ b/memory/jemalloc/src/test/unit/ql.c @@ -0,0 +1,209 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 + +typedef struct list_s list_t; +typedef ql_head(list_t) list_head_t; + +struct list_s { + ql_elm(list_t) link; + char id; +}; + +static void +test_empty_list(list_head_t *head) +{ + list_t *t; + unsigned i; + + assert_ptr_null(ql_first(head), "Unexpected element for empty list"); + assert_ptr_null(ql_last(head, link), + "Unexpected element for empty list"); + + i = 0; + ql_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); + + i = 0; + ql_reverse_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); +} + +TEST_BEGIN(test_ql_empty) +{ + list_head_t head; + + ql_new(&head); + test_empty_list(&head); +} +TEST_END + +static void +init_entries(list_t *entries, unsigned nentries) +{ + unsigned i; + + for (i = 0; i < nentries; i++) { + entries[i].id = 'a' + i; + ql_elm_new(&entries[i], link); + } +} + +static void +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) +{ + list_t *t; + unsigned i; + + assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); + assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, + "Element id mismatch"); + + i = 0; + ql_foreach(t, head, link) { + assert_c_eq(t->id, entries[i].id, "Element id mismatch"); + i++; + } + + i = 0; + ql_reverse_foreach(t, head, link) { + assert_c_eq(t->id, entries[nentries-i-1].id, + "Element id mismatch"); + i++; + } + + for (i = 0; i < nentries-1; i++) { + t = ql_next(head, &entries[i], link); + assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); + } + assert_ptr_null(ql_next(head, &entries[nentries-1], link), + "Unexpected element"); + + assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); + for (i = 1; i < nentries; i++) { + t = ql_prev(head, &entries[i], link); + assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); + } +} + +TEST_BEGIN(test_ql_tail_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_tail_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, entries, NENTRIES-i); + ql_tail_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_head_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_head_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, &entries[i], NENTRIES-i); + ql_head_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_insert) +{ + list_head_t head; + list_t entries[8]; + list_t *a, *b, *c, *d, *e, *f, *g, *h; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + a = &entries[0]; + b = &entries[1]; + c = &entries[2]; + d = &entries[3]; + e = &entries[4]; + f = &entries[5]; + g = &entries[6]; + h = &entries[7]; + + /* + * ql_remove(), ql_before_insert(), and ql_after_insert() are used + * internally by other macros that are already tested, so there's no + * need to test them completely. However, insertion/deletion from the + * middle of lists is not otherwise tested; do so here. + */ + ql_tail_insert(&head, f, link); + ql_before_insert(&head, f, b, link); + ql_before_insert(&head, f, c, link); + ql_after_insert(f, h, link); + ql_after_insert(f, g, link); + ql_before_insert(&head, b, a, link); + ql_after_insert(c, d, link); + ql_before_insert(&head, f, e, link); + + test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ql_empty, + test_ql_tail_insert, + test_ql_tail_remove, + test_ql_head_insert, + test_ql_head_remove, + test_ql_insert)); +} diff --git a/memory/jemalloc/src/test/unit/qr.c b/memory/jemalloc/src/test/unit/qr.c new file mode 100644 index 000000000..a2a2d902b --- /dev/null +++ b/memory/jemalloc/src/test/unit/qr.c @@ -0,0 +1,248 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 +/* Split index, in [1..NENTRIES). */ +#define SPLIT_INDEX 5 + +typedef struct ring_s ring_t; + +struct ring_s { + qr(ring_t) link; + char id; +}; + +static void +init_entries(ring_t *entries) +{ + unsigned i; + + for (i = 0; i < NENTRIES; i++) { + qr_new(&entries[i], link); + entries[i].id = 'a' + i; + } +} + +static void +test_independent_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Next element in single-element ring should be same as " + "current element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Previous element in single-element ring should be same as " + "current element"); + } +} + +TEST_BEGIN(test_qr_one) +{ + ring_t entries[NENTRIES]; + + init_entries(entries); + test_independent_entries(entries); +} +TEST_END + +static void +test_entries_ring(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } +} + +TEST_BEGIN(test_qr_after_insert) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + test_entries_ring(entries); +} +TEST_END + +TEST_BEGIN(test_qr_remove) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[i+j].id, + "Element id mismatch"); + j++; + } + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, + "Element id mismatch"); + j++; + } + qr_remove(&entries[i], link); + } + test_independent_entries(entries); +} +TEST_END + +TEST_BEGIN(test_qr_before_insert) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_before_insert(&entries[i - 1], &entries[i], link); + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } +} +TEST_END + +static void +test_split_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + if (i < SPLIT_INDEX) { + assert_c_eq(t->id, + entries[(i+j) % SPLIT_INDEX].id, + "Element id mismatch"); + } else { + assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % + (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, + "Element id mismatch"); + } + j++; + } + } +} + +TEST_BEGIN(test_qr_meld_split) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_split(&entries[0], &entries[0], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[0], link); + test_entries_ring(entries); +} +TEST_END + +int +main(void) +{ + + return (test( + test_qr_one, + test_qr_after_insert, + test_qr_remove, + test_qr_before_insert, + test_qr_meld_split)); +} diff --git a/memory/jemalloc/src/test/unit/quarantine.c b/memory/jemalloc/src/test/unit/quarantine.c new file mode 100644 index 000000000..bbd48a51d --- /dev/null +++ b/memory/jemalloc/src/test/unit/quarantine.c @@ -0,0 +1,108 @@ +#include "test/jemalloc_test.h" + +#define QUARANTINE_SIZE 8192 +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" + STRINGIFY(QUARANTINE_SIZE); +#endif + +void +quarantine_clear(void) +{ + void *p; + + p = mallocx(QUARANTINE_SIZE*2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); +} + +TEST_BEGIN(test_quarantine) +{ +#define SZ ZU(256) +#define NQUARANTINED (QUARANTINE_SIZE/SZ) + void *quarantined[NQUARANTINED+1]; + size_t i, j; + + test_skip_if(!config_fill); + + assert_zu_eq(nallocx(SZ, 0), SZ, + "SZ=%zu does not precisely equal a size class", SZ); + + quarantine_clear(); + + /* + * Allocate enough regions to completely fill the quarantine, plus one + * more. The last iteration occurs with a completely full quarantine, + * but no regions should be drained from the quarantine until the last + * deallocation occurs. Therefore no region recycling should occur + * until after this loop completes. + */ + for (i = 0; i < NQUARANTINED+1; i++) { + void *p = mallocx(SZ, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + quarantined[i] = p; + dallocx(p, 0); + for (j = 0; j < i; j++) { + assert_ptr_ne(p, quarantined[j], + "Quarantined region recycled too early; " + "i=%zu, j=%zu", i, j); + } + } +#undef NQUARANTINED +#undef SZ +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_quarantine_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_quarantine, + test_quarantine_redzone)); +} diff --git a/memory/jemalloc/src/test/unit/rb.c b/memory/jemalloc/src/test/unit/rb.c new file mode 100644 index 000000000..cf3d3a783 --- /dev/null +++ b/memory/jemalloc/src/test/unit/rb.c @@ -0,0 +1,354 @@ +#include "test/jemalloc_test.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ + rbp_bh_t != NULL; \ + rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ + } \ +} while (0) + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + rb_node(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) { + int ret; + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the tree, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return (ret); +} + +typedef rb_tree(node_t) tree_t; +rb_gen(static, tree_, tree_t, node_t, link, node_cmp); + +TEST_BEGIN(test_rb_empty) +{ + tree_t tree; + node_t key; + + tree_new(&tree); + + assert_true(tree_empty(&tree), "Tree should be empty"); + assert_ptr_null(tree_first(&tree), "Unexpected node"); + assert_ptr_null(tree_last(&tree), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); +} +TEST_END + +static unsigned +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) +{ + unsigned ret = 0; + node_t *left_node; + node_t *right_node; + + if (node == NULL) + return (ret); + + left_node = rbtn_left_get(node_t, link, node); + right_node = rbtn_right_get(node_t, link, node); + + if (!rbtn_red_get(node_t, link, node)) + black_depth++; + + /* Red nodes must be interleaved with black nodes. */ + if (rbtn_red_get(node_t, link, node)) { + if (left_node != NULL) + assert_false(rbtn_red_get(node_t, link, left_node), + "Node should be black"); + if (right_node != NULL) + assert_false(rbtn_red_get(node_t, link, right_node), + "Node should be black"); + } + + /* Self. */ + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Left subtree. */ + if (left_node != NULL) + ret += tree_recurse(left_node, black_height, black_depth); + else + ret += (black_depth != black_height); + + /* Right subtree. */ + if (right_node != NULL) + ret += tree_recurse(right_node, black_height, black_depth); + else + ret += (black_depth != black_height); + + return (ret); +} + +static node_t * +tree_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *i = (unsigned *)data; + node_t *search_node; + + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Test rb_search(). */ + search_node = tree_search(tree, node); + assert_ptr_eq(search_node, node, + "tree_search() returned unexpected node"); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_nsearch() returned unexpected node"); + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_psearch() returned unexpected node"); + + (*i)++; + + return (NULL); +} + +static unsigned +tree_iterate(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static unsigned +tree_iterate_reverse(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static void +node_remove(tree_t *tree, node_t *node, unsigned nnodes) +{ + node_t *search_node; + unsigned black_height, imbalances; + + tree_remove(tree, node); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + if (search_node != NULL) { + assert_u64_ge(search_node->key, node->key, + "Key ordering error"); + } + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + if (search_node != NULL) { + assert_u64_le(search_node->key, node->key, + "Key ordering error"); + } + + node->magic = 0; + + rbtn_black_height(node_t, link, tree, black_height); + imbalances = tree_recurse(tree->rbt_root, black_height, 0); + assert_u_eq(imbalances, 0, "Tree is unbalanced"); + assert_u_eq(tree_iterate(tree), nnodes-1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(tree), nnodes-1, + "Unexpected node iteration count"); +} + +static node_t * +remove_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_next(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +static node_t * +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_prev(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +static void +destroy_cb(node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + + assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); + (*nnodes)--; +} + +TEST_BEGIN(test_rb_random) +{ +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + tree_t tree; + node_t nodes[NNODES]; + unsigned i, j, k, black_height, imbalances; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) + bag[j] = j; + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) + bag[j] = NNODES - j - 1; + break; + default: + for (j = 0; j < NNODES; j++) + bag[j] = gen_rand64_range(sfmt, NNODES); + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize tree and nodes. */ + tree_new(&tree); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + tree_insert(&tree, &nodes[k]); + + rbtn_black_height(node_t, link, &tree, + black_height); + imbalances = tree_recurse(tree.rbt_root, + black_height, 0); + assert_u_eq(imbalances, 0, + "Tree is unbalanced"); + + assert_u_eq(tree_iterate(&tree), k+1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(&tree), k+1, + "Unexpected node iteration count"); + + assert_false(tree_empty(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_first(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_last(&tree), + "Tree should not be empty"); + + tree_next(&tree, &nodes[k]); + tree_prev(&tree, &nodes[k]); + } + + /* Remove nodes. */ + switch (i % 5) { + case 0: + for (k = 0; k < j; k++) + node_remove(&tree, &nodes[k], j - k); + break; + case 1: + for (k = j; k > 0; k--) + node_remove(&tree, &nodes[k-1], k); + break; + case 2: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_iter(&tree, start, + remove_iterate_cb, (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 3: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_reverse_iter(&tree, start, + remove_reverse_iterate_cb, + (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 4: { + unsigned nnodes = j; + tree_destroy(&tree, destroy_cb, &nnodes); + assert_u_eq(nnodes, 0, + "Destruction terminated early"); + break; + } default: + not_reached(); + } + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef NBAGS +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rb_empty, + test_rb_random)); +} diff --git a/memory/jemalloc/src/test/unit/rtree.c b/memory/jemalloc/src/test/unit/rtree.c new file mode 100644 index 000000000..b54b3e86f --- /dev/null +++ b/memory/jemalloc/src/test/unit/rtree.c @@ -0,0 +1,151 @@ +#include "test/jemalloc_test.h" + +static rtree_node_elm_t * +node_alloc(size_t nelms) +{ + + return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); +} + +static void +node_dalloc(rtree_node_elm_t *node) +{ + + free(node); +} + +TEST_BEGIN(test_rtree_get_empty) +{ + unsigned i; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_get(&rtree, 0, false), + "rtree_get() should return NULL for empty tree"); + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_extrema) +{ + unsigned i; + extent_node_t node_a, node_b; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + assert_false(rtree_set(&rtree, 0, &node_a), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, + "rtree_get() should return previously set value"); + + assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, + "rtree_get() should return previously set value"); + + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_bits) +{ + unsigned i, j, k; + + for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { + uintptr_t keys[] = {0, 1, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; + extent_node_t node; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_false(rtree_set(&rtree, keys[j], &node), + "Unexpected rtree_set() failure"); + for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { + assert_ptr_eq(rtree_get(&rtree, keys[k], true), + &node, "rtree_get() should return " + "previously set value and ignore " + "insignificant key bits; i=%u, j=%u, k=%u, " + "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, + j, k, keys[j], keys[k]); + } + assert_ptr_null(rtree_get(&rtree, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), + "Only leftmost rtree leaf should be set; " + "i=%u, j=%u", i, j); + assert_false(rtree_set(&rtree, keys[j], NULL), + "Unexpected rtree_set() failure"); + } + + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_random) +{ + unsigned i; + sfmt_t *sfmt; +#define NSET 16 +#define SEED 42 + + sfmt = init_gen_rand(SEED); + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + uintptr_t keys[NSET]; + extent_node_t node; + unsigned j; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + for (j = 0; j < NSET; j++) { + keys[j] = (uintptr_t)gen_rand64(sfmt); + assert_false(rtree_set(&rtree, keys[j], &node), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, + "rtree_get() should return previously set value"); + } + + for (j = 0; j < NSET; j++) { + assert_false(rtree_set(&rtree, keys[j], NULL), + "Unexpected rtree_set() failure"); + assert_ptr_null(rtree_get(&rtree, keys[j], true), + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_ptr_null(rtree_get(&rtree, keys[j], true), + "rtree_get() should return previously set value"); + } + + rtree_delete(&rtree); + } + fini_gen_rand(sfmt); +#undef NSET +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rtree_get_empty, + test_rtree_extrema, + test_rtree_bits, + test_rtree_random)); +} diff --git a/memory/jemalloc/src/test/unit/run_quantize.c b/memory/jemalloc/src/test/unit/run_quantize.c new file mode 100644 index 000000000..b1ca6356d --- /dev/null +++ b/memory/jemalloc/src/test/unit/run_quantize.c @@ -0,0 +1,149 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_small_run_size) +{ + unsigned nbins, i; + size_t sz, run_size; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all small size classes, get their run sizes, and verify + * that the quantized size is the same as the run size. + */ + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nbins; i++) { + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0), + 0, "Unexpected mallctlbymib failure"); + assert_zu_eq(run_size, run_quantize_floor(run_size), + "Small run quantization should be a no-op (run_size=%zu)", + run_size); + assert_zu_eq(run_size, run_quantize_ceil(run_size), + "Small run quantization should be a no-op (run_size=%zu)", + run_size); + } +} +TEST_END + +TEST_BEGIN(test_large_run_size) +{ + bool cache_oblivious; + unsigned nlruns, i; + size_t sz, run_size_prev, ceil_prev; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all large size classes, get their run sizes, and verify + * that the quantized size is the same as the run size. + */ + + sz = sizeof(bool); + assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nlruns; i++) { + size_t lrun_size, run_size, floor, ceil; + + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0), + 0, "Unexpected mallctlbymib failure"); + run_size = cache_oblivious ? lrun_size + PAGE : lrun_size; + floor = run_quantize_floor(run_size); + ceil = run_quantize_ceil(run_size); + + assert_zu_eq(run_size, floor, + "Large run quantization should be a no-op for precise " + "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); + assert_zu_eq(run_size, ceil, + "Large run quantization should be a no-op for precise " + "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); + + if (i > 0) { + assert_zu_eq(run_size_prev, run_quantize_floor(run_size + - PAGE), "Floor should be a precise size"); + if (run_size_prev < ceil_prev) { + assert_zu_eq(ceil_prev, run_size, + "Ceiling should be a precise size " + "(run_size_prev=%zu, ceil_prev=%zu, " + "run_size=%zu)", run_size_prev, ceil_prev, + run_size); + } + } + run_size_prev = floor; + ceil_prev = run_quantize_ceil(run_size + PAGE); + } +} +TEST_END + +TEST_BEGIN(test_monotonic) +{ + unsigned nbins, nlruns, i; + size_t sz, floor_prev, ceil_prev; + + /* + * Iterate over all run sizes and verify that + * run_quantize_{floor,ceil}() are monotonic. + */ + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + floor_prev = 0; + ceil_prev = 0; + for (i = 1; i <= chunksize >> LG_PAGE; i++) { + size_t run_size, floor, ceil; + + run_size = i << LG_PAGE; + floor = run_quantize_floor(run_size); + ceil = run_quantize_ceil(run_size); + + assert_zu_le(floor, run_size, + "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)", + floor, run_size, ceil); + assert_zu_ge(ceil, run_size, + "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)", + floor, run_size, ceil); + + assert_zu_le(floor_prev, floor, "Floor should be monotonic " + "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)", + floor_prev, floor, run_size, ceil); + assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " + "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)", + floor, run_size, ceil_prev, ceil); + + floor_prev = floor; + ceil_prev = ceil; + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_small_run_size, + test_large_run_size, + test_monotonic)); +} diff --git a/memory/jemalloc/src/test/unit/size_classes.c b/memory/jemalloc/src/test/unit/size_classes.c new file mode 100644 index 000000000..4e1e0ce4f --- /dev/null +++ b/memory/jemalloc/src/test/unit/size_classes.c @@ -0,0 +1,184 @@ +#include "test/jemalloc_test.h" + +static size_t +get_max_size_class(void) +{ + unsigned nhchunks; + size_t mib[4]; + size_t sz, miblen, max_size_class; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nhchunks - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); + + return (max_size_class); +} + +TEST_BEGIN(test_size_classes) +{ + size_t size_class, max_size_class; + szind_t index, max_index; + + max_size_class = get_max_size_class(); + max_index = size2index(max_size_class); + + for (index = 0, size_class = index2size(index); index < max_index || + size_class < max_size_class; index++, size_class = + index2size(index)) { + assert_true(index < max_index, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + + assert_u_eq(index, size2index(size_class), + "size2index() does not reverse index2size(): index=%u -->" + " size_class=%zu --> index=%u --> size_class=%zu", index, + size_class, size2index(size_class), + index2size(size2index(size_class))); + assert_zu_eq(size_class, index2size(size2index(size_class)), + "index2size() does not reverse size2index(): index=%u -->" + " size_class=%zu --> index=%u --> size_class=%zu", index, + size_class, size2index(size_class), + index2size(size2index(size_class))); + + assert_u_eq(index+1, size2index(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (index > 0) ? + s2u(index2size(index-1)+1) : s2u(1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class-1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class), + "s2u() does not compute same size class"); + assert_zu_eq(s2u(size_class+1), index2size(index+1), + "s2u() does not round up to next size class"); + } + + assert_u_eq(index, size2index(index2size(index)), + "size2index() does not reverse index2size()"); + assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), + "index2size() does not reverse size2index()"); + + assert_zu_eq(size_class, s2u(index2size(index-1)+1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class-1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class), + "s2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_psize_classes) +{ + size_t size_class, max_size_class; + pszind_t pind, max_pind; + + max_size_class = get_max_size_class(); + max_pind = psz2ind(max_size_class); + + for (pind = 0, size_class = pind2sz(pind); pind < max_pind || + size_class < max_size_class; pind++, size_class = + pind2sz(pind)) { + assert_true(pind < max_pind, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + + assert_u_eq(pind, psz2ind(size_class), + "psz2ind() does not reverse pind2sz(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, psz2ind(size_class), + pind2sz(psz2ind(size_class))); + assert_zu_eq(size_class, pind2sz(psz2ind(size_class)), + "pind2sz() does not reverse psz2ind(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, psz2ind(size_class), + pind2sz(psz2ind(size_class))); + + assert_u_eq(pind+1, psz2ind(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (pind > 0) ? + psz2u(pind2sz(pind-1)+1) : psz2u(1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class-1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class), + "psz2u() does not compute same size class"); + assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1), + "psz2u() does not round up to next size class"); + } + + assert_u_eq(pind, psz2ind(pind2sz(pind)), + "psz2ind() does not reverse pind2sz()"); + assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)), + "pind2sz() does not reverse psz2ind()"); + + assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class-1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class), + "psz2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_overflow) +{ + size_t max_size_class; + + max_size_class = get_max_size_class(); + + assert_u_eq(size2index(max_size_class+1), NSIZES, + "size2index() should return NSIZES on overflow"); + assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES, + "size2index() should return NSIZES on overflow"); + assert_u_eq(size2index(SIZE_T_MAX), NSIZES, + "size2index() should return NSIZES on overflow"); + + assert_zu_eq(s2u(max_size_class+1), 0, + "s2u() should return 0 for unsupported size"); + assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0, + "s2u() should return 0 for unsupported size"); + assert_zu_eq(s2u(SIZE_T_MAX), 0, + "s2u() should return 0 on overflow"); + + assert_u_eq(psz2ind(max_size_class+1), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + + assert_zu_eq(psz2u(max_size_class+1), 0, + "psz2u() should return 0 for unsupported size"); + assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0, + "psz2u() should return 0 for unsupported size"); + assert_zu_eq(psz2u(SIZE_T_MAX), 0, + "psz2u() should return 0 on overflow"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_size_classes, + test_psize_classes, + test_overflow)); +} diff --git a/memory/jemalloc/src/test/unit/smoothstep.c b/memory/jemalloc/src/test/unit/smoothstep.c new file mode 100644 index 000000000..4cfb21343 --- /dev/null +++ b/memory/jemalloc/src/test/unit/smoothstep.c @@ -0,0 +1,106 @@ +#include "test/jemalloc_test.h" + +static const uint64_t smoothstep_tab[] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +TEST_BEGIN(test_smoothstep_integral) +{ + uint64_t sum, min, max; + unsigned i; + + /* + * The integral of smoothstep in the [0..1] range equals 1/2. Verify + * that the fixed point representation's integral is no more than + * rounding error distant from 1/2. Regarding rounding, each table + * element is rounded down to the nearest fixed point value, so the + * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) + sum += smoothstep_tab[i]; + + max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); + min = max - SMOOTHSTEP_NSTEPS; + + assert_u64_ge(sum, min, + "Integral too small, even accounting for truncation"); + assert_u64_le(sum, max, "Integral exceeds 1/2"); + if (false) { + malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", + max - sum, SMOOTHSTEP_NSTEPS); + } +} +TEST_END + +TEST_BEGIN(test_smoothstep_monotonic) +{ + uint64_t prev_h; + unsigned i; + + /* + * The smoothstep function is monotonic in [0..1], i.e. its slope is + * non-negative. In practice we want to parametrize table generation + * such that piecewise slope is greater than zero, but do not require + * that here. + */ + prev_h = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + uint64_t h = smoothstep_tab[i]; + assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); + prev_h = h; + } + assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], + (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); +} +TEST_END + +TEST_BEGIN(test_smoothstep_slope) +{ + uint64_t prev_h, prev_delta; + unsigned i; + + /* + * The smoothstep slope strictly increases until x=0.5, and then + * strictly decreases until x=1.0. Verify the slightly weaker + * requirement of monotonicity, so that inadequate table precision does + * not cause false test failures. + */ + prev_h = 0; + prev_delta = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = h - prev_h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically increase in 0.0 <= x <= 0.5, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } + + prev_h = KQU(1) << SMOOTHSTEP_BFP; + prev_delta = 0; + for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = prev_h - h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically decrease in 0.5 <= x <= 1.0, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_smoothstep_integral, + test_smoothstep_monotonic, + test_smoothstep_slope)); +} diff --git a/memory/jemalloc/src/test/unit/stats.c b/memory/jemalloc/src/test/unit/stats.c new file mode 100644 index 000000000..a9a3981fb --- /dev/null +++ b/memory/jemalloc/src/test/unit/stats.c @@ -0,0 +1,447 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_stats_summary) +{ + size_t *cactive; + size_t sz, allocated, active, resident, mapped; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(cactive); + assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(active, *cactive, + "active should be no larger than cactive"); + assert_zu_le(allocated, active, + "allocated should be no larger than active"); + assert_zu_lt(active, resident, + "active should be less than resident"); + assert_zu_lt(active, mapped, + "active should be less than mapped"); + } +} +TEST_END + +TEST_BEGIN(test_stats_huge) +{ + void *p; + uint64_t epoch; + size_t allocated; + uint64_t nmalloc, ndalloc, nrequests; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx(large_maxclass+1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_le(nmalloc, nrequests, + "nmalloc should no larger than nrequests"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_summary) +{ + unsigned arena; + void *little, *large, *huge; + uint64_t epoch; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; + uint64_t npurge, nmadvise, purged; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + little = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(little, "Unexpected mallocx() failure"); + large = mallocx(large_maxclass, 0); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); + huge = mallocx(chunksize, 0); + assert_ptr_not_null(huge, "Unexpected mallocx() failure"); + + dallocx(little, 0); + dallocx(large, 0); + dallocx(huge, 0); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + + if (config_stats) { + assert_u64_gt(npurge, 0, + "At least one purge should have occurred"); + assert_u64_le(nmadvise, purged, + "nmadvise should be no greater than purged"); + } +} +TEST_END + +void * +thd_start(void *arg) +{ + + return (NULL); +} + +static void +no_lazy_lock(void) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} + +TEST_BEGIN(test_stats_arenas_small) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be no greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_large) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(large_maxclass, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_huge) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_bins) +{ + unsigned arena; + void *p; + size_t sz, curruns, curregs; + uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nruns, nreruns; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(arena_bin_info[0].reg_size, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + + assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_zu_gt(curregs, 0, + "allocated should be greater than zero"); + if (config_tcache) { + assert_u64_gt(nfills, 0, + "At least one fill should have occurred"); + assert_u64_gt(nflushes, 0, + "At least one flush should have occurred"); + } + assert_u64_gt(nruns, 0, + "At least one run should have been allocated"); + assert_zu_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_lruns) +{ + unsigned arena; + void *p; + uint64_t epoch, nmalloc, ndalloc, nrequests; + size_t curruns, sz; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(LARGE_MINCLASS, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_u64_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_hchunks) +{ + unsigned arena; + void *p; + uint64_t epoch, nmalloc, ndalloc; + size_t curhchunks, sz; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(curhchunks, 0, + "At least one chunk should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_stats_summary, + test_stats_huge, + test_stats_arenas_summary, + test_stats_arenas_small, + test_stats_arenas_large, + test_stats_arenas_huge, + test_stats_arenas_bins, + test_stats_arenas_lruns, + test_stats_arenas_hchunks)); +} diff --git a/memory/jemalloc/src/test/unit/ticker.c b/memory/jemalloc/src/test/unit/ticker.c new file mode 100644 index 000000000..e737020ab --- /dev/null +++ b/memory/jemalloc/src/test/unit/ticker.c @@ -0,0 +1,76 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_ticker_tick) +{ +#define NREPS 2 +#define NTICKS 3 + ticker_t ticker; + int32_t i, j; + + ticker_init(&ticker, NTICKS); + for (i = 0; i < NREPS; i++) { + for (j = 0; j < NTICKS; j++) { + assert_u_eq(ticker_read(&ticker), NTICKS - j, + "Unexpected ticker value (i=%d, j=%d)", i, j); + assert_false(ticker_tick(&ticker), + "Unexpected ticker fire (i=%d, j=%d)", i, j); + } + assert_u32_eq(ticker_read(&ticker), 0, + "Expected ticker depletion"); + assert_true(ticker_tick(&ticker), + "Expected ticker fire (i=%d)", i); + assert_u32_eq(ticker_read(&ticker), NTICKS, + "Expected ticker reset"); + } +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_ticks) +{ +#define NTICKS 3 + ticker_t ticker; + + ticker_init(&ticker, NTICKS); + + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); + assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); + assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + + assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_copy) +{ +#define NTICKS 3 + ticker_t ta, tb; + + ticker_init(&ta, NTICKS); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + + ticker_tick(&ta); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +int +main(void) +{ + + return (test( + test_ticker_tick, + test_ticker_ticks, + test_ticker_copy)); +} diff --git a/memory/jemalloc/src/test/unit/tsd.c b/memory/jemalloc/src/test/unit/tsd.c new file mode 100644 index 000000000..4e2622a34 --- /dev/null +++ b/memory/jemalloc/src/test/unit/tsd.c @@ -0,0 +1,112 @@ +#include "test/jemalloc_test.h" + +#define THREAD_DATA 0x72b65c10 + +typedef unsigned int data_t; + +static bool data_cleanup_executed; + +malloc_tsd_types(data_, data_t) +malloc_tsd_protos(, data_, data_t) + +void +data_cleanup(void *arg) +{ + data_t *data = (data_t *)arg; + + if (!data_cleanup_executed) { + assert_x_eq(*data, THREAD_DATA, + "Argument passed into cleanup function should match tsd " + "value"); + } + data_cleanup_executed = true; + + /* + * Allocate during cleanup for two rounds, in order to assure that + * jemalloc's internal tsd reinitialization happens. + */ + switch (*data) { + case THREAD_DATA: + *data = 1; + data_tsd_set(data); + break; + case 1: + *data = 2; + data_tsd_set(data); + break; + case 2: + return; + default: + not_reached(); + } + + { + void *p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpeced mallocx() failure"); + dallocx(p, 0); + } +} + +malloc_tsd_externs(data_, data_t) +#define DATA_INIT 0x12345678 +malloc_tsd_data(, data_, data_t, DATA_INIT) +malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) + +static void * +thd_start(void *arg) +{ + data_t d = (data_t)(uintptr_t)arg; + void *p; + + assert_x_eq(*data_tsd_get(true), DATA_INIT, + "Initial tsd get should return initialization value"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + data_tsd_set(&d); + assert_x_eq(*data_tsd_get(true), d, + "After tsd set, tsd get should return value that was set"); + + d = 0; + assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg, + "Resetting local data should have no effect on tsd"); + + free(p); + return (NULL); +} + +TEST_BEGIN(test_tsd_main_thread) +{ + + thd_start((void *) 0xa5f3e329); +} +TEST_END + +TEST_BEGIN(test_tsd_sub_thread) +{ + thd_t thd; + + data_cleanup_executed = false; + thd_create(&thd, thd_start, (void *)THREAD_DATA); + thd_join(thd, NULL); + assert_true(data_cleanup_executed, + "Cleanup function should have executed"); +} +TEST_END + +int +main(void) +{ + + /* Core tsd bootstrapping must happen prior to data_tsd_boot(). */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return (test_status_fail); + } + data_tsd_boot(); + + return (test( + test_tsd_main_thread, + test_tsd_sub_thread)); +} diff --git a/memory/jemalloc/src/test/unit/util.c b/memory/jemalloc/src/test/unit/util.c new file mode 100644 index 000000000..c958dc0fb --- /dev/null +++ b/memory/jemalloc/src/test/unit/util.c @@ -0,0 +1,317 @@ +#include "test/jemalloc_test.h" + +#define TEST_POW2_CEIL(t, suf, pri) do { \ + unsigned i, pow2; \ + t x; \ + \ + assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + \ + for (i = 0; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ + << i, "Unexpected result"); \ + } \ + \ + for (i = 2; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + ((t)1) << (i+1), "Unexpected result"); \ + } \ + \ + for (pow2 = 1; pow2 < 25; pow2++) { \ + for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ + x++) { \ + assert_##suf##_eq(pow2_ceil_##suf(x), \ + ((t)1) << pow2, \ + "Unexpected result, x=%"pri, x); \ + } \ + } \ +} while (0) + +TEST_BEGIN(test_pow2_ceil_u64) +{ + + TEST_POW2_CEIL(uint64_t, u64, FMTu64); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_u32) +{ + + TEST_POW2_CEIL(uint32_t, u32, FMTu32); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_zu) +{ + + TEST_POW2_CEIL(size_t, zu, "zu"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax_no_endptr) +{ + int err; + + set_errno(0); + assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); + err = get_errno(); + assert_d_eq(err, 0, "Unexpected failure"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax) +{ + struct test_s { + const char *input; + const char *expected_remainder; + int base; + int expected_errno; + const char *expected_errno_name; + uintmax_t expected_x; + }; +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) + struct test_s tests[] = { + {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, + + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {"+42", "", 0, ERR(0), KUMAX(42)}, + {"-42", "", 0, ERR(0), KUMAX(-42)}, + {"042", "", 0, ERR(0), KUMAX(042)}, + {"+042", "", 0, ERR(0), KUMAX(042)}, + {"-042", "", 0, ERR(0), KUMAX(-042)}, + {"0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"-0x42", "", 0, ERR(0), KUMAX(-0x42)}, + + {"0", "", 0, ERR(0), KUMAX(0)}, + {"1", "", 0, ERR(0), KUMAX(1)}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {" 42", "", 0, ERR(0), KUMAX(42)}, + {"42 ", " ", 0, ERR(0), KUMAX(42)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"42x", "x", 0, ERR(0), KUMAX(42)}, + + {"07", "", 0, ERR(0), KUMAX(7)}, + {"010", "", 0, ERR(0), KUMAX(8)}, + {"08", "8", 0, ERR(0), KUMAX(0)}, + {"0_", "_", 0, ERR(0), KUMAX(0)}, + + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"0X", "X", 0, ERR(0), KUMAX(0)}, + {"0xg", "xg", 0, ERR(0), KUMAX(0)}, + {"0XA", "", 0, ERR(0), KUMAX(10)}, + + {"010", "", 10, ERR(0), KUMAX(10)}, + {"0x3", "x3", 10, ERR(0), KUMAX(0)}, + + {"12", "2", 2, ERR(0), KUMAX(1)}, + {"78", "8", 8, ERR(0), KUMAX(7)}, + {"9a", "a", 10, ERR(0), KUMAX(9)}, + {"9A", "A", 10, ERR(0), KUMAX(9)}, + {"fg", "g", 16, ERR(0), KUMAX(15)}, + {"FG", "G", 16, ERR(0), KUMAX(15)}, + {"0xfg", "g", 16, ERR(0), KUMAX(15)}, + {"0XFG", "G", 16, ERR(0), KUMAX(15)}, + {"z_", "_", 36, ERR(0), KUMAX(35)}, + {"Z_", "_", 36, ERR(0), KUMAX(35)} + }; +#undef ERR +#undef KUMAX + unsigned i; + + for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + struct test_s *test = &tests[i]; + int err; + uintmax_t result; + char *remainder; + + set_errno(0); + result = malloc_strtoumax(test->input, &remainder, test->base); + err = get_errno(); + assert_d_eq(err, test->expected_errno, + "Expected errno %s for \"%s\", base %d", + test->expected_errno_name, test->input, test->base); + assert_str_eq(remainder, test->expected_remainder, + "Unexpected remainder for \"%s\", base %d", + test->input, test->base); + if (err == 0) { + assert_ju_eq(result, test->expected_x, + "Unexpected result for \"%s\", base %d", + test->input, test->base); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf_truncated) +{ +#define BUFLEN 15 + char buf[BUFLEN]; + size_t result; + size_t len; +#define TEST(expected_str_untruncated, ...) do { \ + result = malloc_snprintf(buf, len, __VA_ARGS__); \ + assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ + "Unexpected string inequality (\"%s\" vs \"%s\")", \ + buf, expected_str_untruncated); \ + assert_zu_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ +} while (0) + + for (len = 1; len < BUFLEN; len++) { + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); + } +#undef BUFLEN +#undef TEST +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf) +{ +#define BUFLEN 128 + char buf[BUFLEN]; + size_t result; +#define TEST(expected_str, ...) do { \ + result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ + assert_str_eq(buf, expected_str, "Unexpected output"); \ + assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ +} while (0) + + TEST("hello", "hello"); + + TEST("50%, 100%", "50%%, %d%%", 100); + + TEST("a0123b", "a%sb", "0123"); + + TEST("a 0123b", "a%5sb", "0123"); + TEST("a 0123b", "a%*sb", 5, "0123"); + + TEST("a0123 b", "a%-5sb", "0123"); + TEST("a0123b", "a%*sb", -1, "0123"); + TEST("a0123 b", "a%*sb", -5, "0123"); + TEST("a0123 b", "a%-*sb", -5, "0123"); + + TEST("a012b", "a%.3sb", "0123"); + TEST("a012b", "a%.*sb", 3, "0123"); + TEST("a0123b", "a%.*sb", -3, "0123"); + + TEST("a 012b", "a%5.3sb", "0123"); + TEST("a 012b", "a%5.*sb", 3, "0123"); + TEST("a 012b", "a%*.3sb", 5, "0123"); + TEST("a 012b", "a%*.*sb", 5, 3, "0123"); + TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); + + TEST("_abcd_", "_%x_", 0xabcd); + TEST("_0xabcd_", "_%#x_", 0xabcd); + TEST("_1234_", "_%o_", 01234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + + TEST("_1234_", "_%d_", 1234); + TEST("_ 1234_", "_% d_", 1234); + TEST("_+1234_", "_%+d_", 1234); + TEST("_-1234_", "_%d_", -1234); + TEST("_-1234_", "_% d_", -1234); + TEST("_-1234_", "_%+d_", -1234); + + TEST("_-1234_", "_%d_", -1234); + TEST("_1234_", "_%d_", 1234); + TEST("_-1234_", "_%i_", -1234); + TEST("_1234_", "_%i_", 1234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + TEST("_0x1234abc_", "_%#x_", 0x1234abc); + TEST("_0X1234ABC_", "_%#X_", 0x1234abc); + TEST("_c_", "_%c_", 'c'); + TEST("_string_", "_%s_", "string"); + TEST("_0x42_", "_%p_", ((void *)0x42)); + + TEST("_-1234_", "_%ld_", ((long)-1234)); + TEST("_1234_", "_%ld_", ((long)1234)); + TEST("_-1234_", "_%li_", ((long)-1234)); + TEST("_1234_", "_%li_", ((long)1234)); + TEST("_01234_", "_%#lo_", ((long)01234)); + TEST("_1234_", "_%lu_", ((long)1234)); + TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); + TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); + + TEST("_-1234_", "_%lld_", ((long long)-1234)); + TEST("_1234_", "_%lld_", ((long long)1234)); + TEST("_-1234_", "_%lli_", ((long long)-1234)); + TEST("_1234_", "_%lli_", ((long long)1234)); + TEST("_01234_", "_%#llo_", ((long long)01234)); + TEST("_1234_", "_%llu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%qd_", ((long long)-1234)); + TEST("_1234_", "_%qd_", ((long long)1234)); + TEST("_-1234_", "_%qi_", ((long long)-1234)); + TEST("_1234_", "_%qi_", ((long long)1234)); + TEST("_01234_", "_%#qo_", ((long long)01234)); + TEST("_1234_", "_%qu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); + TEST("_1234_", "_%jd_", ((intmax_t)1234)); + TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); + TEST("_1234_", "_%ji_", ((intmax_t)1234)); + TEST("_01234_", "_%#jo_", ((intmax_t)01234)); + TEST("_1234_", "_%ju_", ((intmax_t)1234)); + TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); + + TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); + TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); + + TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); + TEST("_1234_", "_%zd_", ((ssize_t)1234)); + TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); + TEST("_1234_", "_%zi_", ((ssize_t)1234)); + TEST("_01234_", "_%#zo_", ((ssize_t)01234)); + TEST("_1234_", "_%zu_", ((ssize_t)1234)); + TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); +#undef BUFLEN +} +TEST_END + +int +main(void) +{ + + return (test( + test_pow2_ceil_u64, + test_pow2_ceil_u32, + test_pow2_ceil_zu, + test_malloc_strtoumax_no_endptr, + test_malloc_strtoumax, + test_malloc_snprintf_truncated, + test_malloc_snprintf)); +} diff --git a/memory/jemalloc/src/test/unit/witness.c b/memory/jemalloc/src/test/unit/witness.c new file mode 100644 index 000000000..ed172753c --- /dev/null +++ b/memory/jemalloc/src/test/unit/witness.c @@ -0,0 +1,278 @@ +#include "test/jemalloc_test.h" + +static witness_lock_error_t *witness_lock_error_orig; +static witness_owner_error_t *witness_owner_error_orig; +static witness_not_owner_error_t *witness_not_owner_error_orig; +static witness_lockless_error_t *witness_lockless_error_orig; + +static bool saw_lock_error; +static bool saw_owner_error; +static bool saw_not_owner_error; +static bool saw_lockless_error; + +static void +witness_lock_error_intercept(const witness_list_t *witnesses, + const witness_t *witness) +{ + + saw_lock_error = true; +} + +static void +witness_owner_error_intercept(const witness_t *witness) +{ + + saw_owner_error = true; +} + +static void +witness_not_owner_error_intercept(const witness_t *witness) +{ + + saw_not_owner_error = true; +} + +static void +witness_lockless_error_intercept(const witness_list_t *witnesses) +{ + + saw_lockless_error = true; +} + +static int +witness_comp(const witness_t *a, const witness_t *b) +{ + + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + return (strcmp(a->name, b->name)); +} + +static int +witness_comp_reverse(const witness_t *a, const witness_t *b) +{ + + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + return (-strcmp(a->name, b->name)); +} + +TEST_BEGIN(test_witness) +{ + witness_t a, b; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); + + witness_init(&b, "b", 2, NULL); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); + + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); + + witness_assert_lockless(tsdn); +} +TEST_END + +TEST_BEGIN(test_witness_comp) +{ + witness_t a, b, c, d; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, witness_comp); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); + + witness_init(&b, "b", 1, witness_comp); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); + witness_unlock(tsdn, &b); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_init(&c, "c", 1, witness_comp_reverse); + witness_assert_not_owner(tsdn, &c); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &c); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(tsdn, &c); + + saw_lock_error = false; + + witness_init(&d, "d", 1, NULL); + witness_assert_not_owner(tsdn, &d); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &d); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(tsdn, &d); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_reversal) +{ + witness_t a, b; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + witness_init(&b, "b", 2, NULL); + + witness_lock(tsdn, &b); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); + + witness_assert_lockless(tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_recursive) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_not_owner_error_orig = witness_not_owner_error; + witness_not_owner_error = witness_not_owner_error_intercept; + saw_not_owner_error = false; + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + witness_lock(tsdn, &a); + assert_false(saw_lock_error, "Unexpected witness lock error"); + assert_false(saw_not_owner_error, "Unexpected witness not owner error"); + witness_lock(tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + assert_true(saw_not_owner_error, "Expected witness not owner error"); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_owner_error = witness_owner_error_orig; + witness_lock_error = witness_lock_error_orig; + +} +TEST_END + +TEST_BEGIN(test_witness_unlock_not_owned) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_owner_error_orig = witness_owner_error; + witness_owner_error = witness_owner_error_intercept; + saw_owner_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + assert_false(saw_owner_error, "Unexpected owner error"); + witness_unlock(tsdn, &a); + assert_true(saw_owner_error, "Expected owner error"); + + witness_assert_lockless(tsdn); + + witness_owner_error = witness_owner_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_lockful) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_lockless_error_orig = witness_lockless_error; + witness_lockless_error = witness_lockless_error_intercept; + saw_lockless_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + assert_false(saw_lockless_error, "Unexpected lockless error"); + witness_assert_lockless(tsdn); + + witness_lock(tsdn, &a); + witness_assert_lockless(tsdn); + assert_true(saw_lockless_error, "Expected lockless error"); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_lockless_error = witness_lockless_error_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_witness, + test_witness_comp, + test_witness_reversal, + test_witness_recursive, + test_witness_unlock_not_owned, + test_witness_lockful)); +} diff --git a/memory/jemalloc/src/test/unit/zero.c b/memory/jemalloc/src/test/unit/zero.c new file mode 100644 index 000000000..30ebe37a4 --- /dev/null +++ b/memory/jemalloc/src/test/unit/zero.c @@ -0,0 +1,80 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = + "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; +#endif + +static void +test_zero(size_t sz_min, size_t sz_max) +{ + uint8_t *s; + size_t sz_prev, sz, i; +#define MAGIC ((uint8_t)0x61) + + sz_prev = 0; + s = (uint8_t *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_u_eq(s[0], MAGIC, + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_u_eq(s[sz_prev-1], MAGIC, + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + assert_u_eq(s[i], 0x0, + "Newly allocated byte %zu/%zu isn't zero-filled", + i, sz); + s[i] = MAGIC; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + s = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + } + } + + dallocx(s, 0); +#undef MAGIC +} + +TEST_BEGIN(test_zero_small) +{ + + test_skip_if(!config_fill); + test_zero(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_zero_large) +{ + + test_skip_if(!config_fill); + test_zero(SMALL_MAXCLASS+1, large_maxclass); +} +TEST_END + +TEST_BEGIN(test_zero_huge) +{ + + test_skip_if(!config_fill); + test_zero(large_maxclass+1, chunksize*2); +} +TEST_END + +int +main(void) +{ + + return (test( + test_zero_small, + test_zero_large, + test_zero_huge)); +} |