diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /media/ffvpx/libavutil/cpu.c | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'media/ffvpx/libavutil/cpu.c')
-rw-r--r-- | media/ffvpx/libavutil/cpu.c | 296 |
1 files changed, 296 insertions, 0 deletions
diff --git a/media/ffvpx/libavutil/cpu.c b/media/ffvpx/libavutil/cpu.c new file mode 100644 index 000000000..f5785fc13 --- /dev/null +++ b/media/ffvpx/libavutil/cpu.c @@ -0,0 +1,296 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdint.h> + +#include "cpu.h" +#include "cpu_internal.h" +#include "config.h" +#include "opt.h" +#include "common.h" + +#if HAVE_SCHED_GETAFFINITY +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif +#include <sched.h> +#endif +#if HAVE_GETPROCESSAFFINITYMASK || HAVE_WINRT +#include <windows.h> +#endif +#if HAVE_SYSCTL +#if HAVE_SYS_PARAM_H +#include <sys/param.h> +#endif +#include <sys/types.h> +#include <sys/sysctl.h> +#endif +#if HAVE_UNISTD_H +#include <unistd.h> +#endif + +static int flags, checked; + +void av_force_cpu_flags(int arg){ + if ( (arg & ( AV_CPU_FLAG_3DNOW | + AV_CPU_FLAG_3DNOWEXT | + AV_CPU_FLAG_MMXEXT | + AV_CPU_FLAG_SSE | + AV_CPU_FLAG_SSE2 | + AV_CPU_FLAG_SSE2SLOW | + AV_CPU_FLAG_SSE3 | + AV_CPU_FLAG_SSE3SLOW | + AV_CPU_FLAG_SSSE3 | + AV_CPU_FLAG_SSE4 | + AV_CPU_FLAG_SSE42 | + AV_CPU_FLAG_AVX | + AV_CPU_FLAG_AVXSLOW | + AV_CPU_FLAG_XOP | + AV_CPU_FLAG_FMA3 | + AV_CPU_FLAG_FMA4 | + AV_CPU_FLAG_AVX2 )) + && !(arg & AV_CPU_FLAG_MMX)) { + av_log(NULL, AV_LOG_WARNING, "MMX implied by specified flags\n"); + arg |= AV_CPU_FLAG_MMX; + } + + flags = arg; + checked = arg != -1; +} + +int av_get_cpu_flags(void) +{ + if (checked) + return flags; + + if (ARCH_AARCH64) + flags = ff_get_cpu_flags_aarch64(); + if (ARCH_ARM) + flags = ff_get_cpu_flags_arm(); + if (ARCH_PPC) + flags = ff_get_cpu_flags_ppc(); + if (ARCH_X86) + flags = ff_get_cpu_flags_x86(); + + checked = 1; + return flags; +} + +void av_set_cpu_flags_mask(int mask) +{ + checked = 0; + flags = av_get_cpu_flags() & mask; + checked = 1; +} + +int av_parse_cpu_flags(const char *s) +{ +#define CPUFLAG_MMXEXT (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT | AV_CPU_FLAG_CMOV) +#define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX) +#define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW) +#define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMXEXT) +#define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE) +#define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2) +#define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2) +#define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3) +#define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3) +#define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3) +#define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4) +#define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42) +#define CPUFLAG_AVXSLOW (AV_CPU_FLAG_AVXSLOW | CPUFLAG_AVX) +#define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX) +#define CPUFLAG_FMA3 (AV_CPU_FLAG_FMA3 | CPUFLAG_AVX) +#define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX) +#define CPUFLAG_AVX2 (AV_CPU_FLAG_AVX2 | CPUFLAG_AVX) +#define CPUFLAG_BMI2 (AV_CPU_FLAG_BMI2 | AV_CPU_FLAG_BMI1) +#define CPUFLAG_AESNI (AV_CPU_FLAG_AESNI | CPUFLAG_SSE42) + static const AVOption cpuflags_opts[] = { + { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, +#if ARCH_PPC + { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" }, +#elif ARCH_X86 + { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, + { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_MMXEXT }, .unit = "flags" }, + { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE }, .unit = "flags" }, + { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2 }, .unit = "flags" }, + { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2SLOW }, .unit = "flags" }, + { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3 }, .unit = "flags" }, + { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3SLOW }, .unit = "flags" }, + { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSSE3 }, .unit = "flags" }, + { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" }, + { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE4 }, .unit = "flags" }, + { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE42 }, .unit = "flags" }, + { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX }, .unit = "flags" }, + { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVXSLOW }, .unit = "flags" }, + { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_XOP }, .unit = "flags" }, + { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA3 }, .unit = "flags" }, + { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA4 }, .unit = "flags" }, + { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX2 }, .unit = "flags" }, + { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" }, + { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_BMI2 }, .unit = "flags" }, + { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOW }, .unit = "flags" }, + { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOWEXT }, .unit = "flags" }, + { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" }, + { "aesni" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AESNI }, .unit = "flags" }, +#elif ARCH_ARM + { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" }, + { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" }, + { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" }, + { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, + { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" }, + { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" }, + { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, +#elif ARCH_AARCH64 + { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" }, + { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, + { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, +#endif + { NULL }, + }; + static const AVClass class = { + .class_name = "cpuflags", + .item_name = av_default_item_name, + .option = cpuflags_opts, + .version = LIBAVUTIL_VERSION_INT, + }; + + int flags = 0, ret; + const AVClass *pclass = &class; + + if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, &flags)) < 0) + return ret; + + return flags & INT_MAX; +} + +int av_parse_cpu_caps(unsigned *flags, const char *s) +{ + static const AVOption cpuflags_opts[] = { + { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, +#if ARCH_PPC + { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" }, +#elif ARCH_X86 + { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, + { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, + { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX2 }, .unit = "flags" }, + { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE }, .unit = "flags" }, + { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2 }, .unit = "flags" }, + { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE2SLOW }, .unit = "flags" }, + { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3 }, .unit = "flags" }, + { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE3SLOW }, .unit = "flags" }, + { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSSE3 }, .unit = "flags" }, + { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" }, + { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE4 }, .unit = "flags" }, + { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SSE42 }, .unit = "flags" }, + { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX }, .unit = "flags" }, + { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVXSLOW }, .unit = "flags" }, + { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_XOP }, .unit = "flags" }, + { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA3 }, .unit = "flags" }, + { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_FMA4 }, .unit = "flags" }, + { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX2 }, .unit = "flags" }, + { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" }, + { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI2 }, .unit = "flags" }, + { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOW }, .unit = "flags" }, + { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" }, + { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" }, + { "aesni", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AESNI }, .unit = "flags" }, + +#define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX +#define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE +#define CPU_FLAG_P4 CPU_FLAG_P3| AV_CPU_FLAG_SSE2 + { "pentium2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P2 }, .unit = "flags" }, + { "pentium3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P3 }, .unit = "flags" }, + { "pentium4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_P4 }, .unit = "flags" }, + +#define CPU_FLAG_K62 AV_CPU_FLAG_MMX | AV_CPU_FLAG_3DNOW +#define CPU_FLAG_ATHLON CPU_FLAG_K62 | AV_CPU_FLAG_CMOV | AV_CPU_FLAG_3DNOWEXT | AV_CPU_FLAG_MMX2 +#define CPU_FLAG_ATHLONXP CPU_FLAG_ATHLON | AV_CPU_FLAG_SSE +#define CPU_FLAG_K8 CPU_FLAG_ATHLONXP | AV_CPU_FLAG_SSE2 + { "k6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" }, + { "k62", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K62 }, .unit = "flags" }, + { "athlon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLON }, .unit = "flags" }, + { "athlonxp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_ATHLONXP }, .unit = "flags" }, + { "k8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPU_FLAG_K8 }, .unit = "flags" }, +#elif ARCH_ARM + { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" }, + { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" }, + { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" }, + { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, + { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" }, + { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" }, + { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, + { "setend", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SETEND }, .unit = "flags" }, +#elif ARCH_AARCH64 + { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" }, + { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" }, + { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" }, +#endif + { NULL }, + }; + static const AVClass class = { + .class_name = "cpuflags", + .item_name = av_default_item_name, + .option = cpuflags_opts, + .version = LIBAVUTIL_VERSION_INT, + }; + const AVClass *pclass = &class; + + return av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, flags); +} + +int av_cpu_count(void) +{ + static volatile int printed; + + int nb_cpus = 1; +#if HAVE_WINRT + SYSTEM_INFO sysinfo; +#endif +#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT) + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + + if (!sched_getaffinity(0, sizeof(cpuset), &cpuset)) + nb_cpus = CPU_COUNT(&cpuset); +#elif HAVE_GETPROCESSAFFINITYMASK + DWORD_PTR proc_aff, sys_aff; + if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff)) + nb_cpus = av_popcount64(proc_aff); +#elif HAVE_SYSCTL && defined(HW_NCPU) + int mib[2] = { CTL_HW, HW_NCPU }; + size_t len = sizeof(nb_cpus); + + if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1) + nb_cpus = 0; +#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN) + nb_cpus = sysconf(_SC_NPROC_ONLN); +#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) + nb_cpus = sysconf(_SC_NPROCESSORS_ONLN); +#elif HAVE_WINRT + GetNativeSystemInfo(&sysinfo); + nb_cpus = sysinfo.dwNumberOfProcessors; +#endif + + if (!printed) { + av_log(NULL, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); + printed = 1; + } + + return nb_cpus; +} |