diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /js/src/jit/arm64 | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/arm64')
54 files changed, 50666 insertions, 0 deletions
diff --git a/js/src/jit/arm64/Architecture-arm64.cpp b/js/src/jit/arm64/Architecture-arm64.cpp new file mode 100644 index 000000000..a5e62fb61 --- /dev/null +++ b/js/src/jit/arm64/Architecture-arm64.cpp @@ -0,0 +1,75 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/Architecture-arm64.h" + +#include <cstring> + +#include "jit/RegisterSets.h" + +namespace js { +namespace jit { + +Registers::Code +Registers::FromName(const char* name) +{ + // Check for some register aliases first. + if (strcmp(name, "ip0") == 0) + return ip0; + if (strcmp(name, "ip1") == 0) + return ip1; + if (strcmp(name, "fp") == 0) + return fp; + + for (uint32_t i = 0; i < Total; i++) { + if (strcmp(GetName(Code(i)), name) == 0) + return Code(i); + } + + return invalid_reg; +} + +FloatRegisters::Code +FloatRegisters::FromName(const char* name) +{ + for (size_t i = 0; i < Total; i++) { + if (strcmp(GetName(Code(i)), name) == 0) + return Code(i); + } + + return invalid_fpreg; +} + +FloatRegisterSet +FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) +{ + LiveFloatRegisterSet ret; + for (FloatRegisterIterator iter(s); iter.more(); ++iter) + ret.addUnchecked(FromCode((*iter).encoding())); + return ret.set(); +} + +uint32_t +FloatRegister::GetSizeInBytes(const FloatRegisterSet& s) +{ + return s.size() * sizeof(double); +} + +uint32_t +FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) +{ + return s.size() * sizeof(double); +} + +uint32_t +FloatRegister::getRegisterDumpOffsetInBytes() +{ + // Although registers are 128-bits wide, only the first 64 need saving per ABI. + return encoding() * sizeof(double); +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h new file mode 100644 index 000000000..e74340f13 --- /dev/null +++ b/js/src/jit/arm64/Architecture-arm64.h @@ -0,0 +1,462 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_Architecture_arm64_h +#define jit_arm64_Architecture_arm64_h + +#include "mozilla/Assertions.h" +#include "mozilla/MathAlgorithms.h" + +#include "js/Utility.h" + +namespace js { +namespace jit { + +// AArch64 has 32 64-bit integer registers, x0 though x31. +// x31 is special and functions as both the stack pointer and a zero register. +// The bottom 32 bits of each of the X registers is accessible as w0 through w31. +// The program counter is no longer accessible as a register. +// SIMD and scalar floating-point registers share a register bank. +// 32 bit float registers are s0 through s31. +// 64 bit double registers are d0 through d31. +// 128 bit SIMD registers are v0 through v31. +// e.g., s0 is the bottom 32 bits of d0, which is the bottom 64 bits of v0. + +// AArch64 Calling Convention: +// x0 - x7: arguments and return value +// x8: indirect result (struct) location +// x9 - x15: temporary registers +// x16 - x17: intra-call-use registers (PLT, linker) +// x18: platform specific use (TLS) +// x19 - x28: callee-saved registers +// x29: frame pointer +// x30: link register + +// AArch64 Calling Convention for Floats: +// d0 - d7: arguments and return value +// d8 - d15: callee-saved registers +// Bits 64:128 are not saved for v8-v15. +// d16 - d31: temporary registers + +// AArch64 does not have soft float. + +class Registers { + public: + enum RegisterID { + w0 = 0, x0 = 0, + w1 = 1, x1 = 1, + w2 = 2, x2 = 2, + w3 = 3, x3 = 3, + w4 = 4, x4 = 4, + w5 = 5, x5 = 5, + w6 = 6, x6 = 6, + w7 = 7, x7 = 7, + w8 = 8, x8 = 8, + w9 = 9, x9 = 9, + w10 = 10, x10 = 10, + w11 = 11, x11 = 11, + w12 = 12, x12 = 12, + w13 = 13, x13 = 13, + w14 = 14, x14 = 14, + w15 = 15, x15 = 15, + w16 = 16, x16 = 16, ip0 = 16, // MacroAssembler scratch register 1. + w17 = 17, x17 = 17, ip1 = 17, // MacroAssembler scratch register 2. + w18 = 18, x18 = 18, tls = 18, // Platform-specific use (TLS). + w19 = 19, x19 = 19, + w20 = 20, x20 = 20, + w21 = 21, x21 = 21, + w22 = 22, x22 = 22, + w23 = 23, x23 = 23, + w24 = 24, x24 = 24, + w25 = 25, x25 = 25, + w26 = 26, x26 = 26, + w27 = 27, x27 = 27, + w28 = 28, x28 = 28, + w29 = 29, x29 = 29, fp = 29, + w30 = 30, x30 = 30, lr = 30, + w31 = 31, x31 = 31, wzr = 31, xzr = 31, sp = 31, // Special: both stack pointer and a zero register. + invalid_reg + }; + typedef uint8_t Code; + typedef uint32_t Encoding; + typedef uint32_t SetType; + + union RegisterContent { + uintptr_t r; + }; + + static uint32_t SetSize(SetType x) { + static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); + return mozilla::CountPopulation32(x); + } + static uint32_t FirstBit(SetType x) { + return mozilla::CountTrailingZeroes32(x); + } + static uint32_t LastBit(SetType x) { + return 31 - mozilla::CountLeadingZeroes32(x); + } + + static const char* GetName(Code code) { + static const char* const Names[] = + { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", + "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", + "lr", "sp", "invalid" }; + return Names[code]; + } + static const char* GetName(uint32_t i) { + MOZ_ASSERT(i < Total); + return GetName(Code(i)); + } + + static Code FromName(const char* name); + + // If SP is used as the base register for a memory load or store, then the value + // of the stack pointer prior to adding any offset must be quadword (16 byte) aligned, + // or else a stack aligment exception will be generated. + static const Code StackPointer = sp; + + static const Code Invalid = invalid_reg; + + static const uint32_t Total = 32; + static const uint32_t TotalPhys = 32; + static const uint32_t Allocatable = 27; // No named special-function registers. + + static const SetType AllMask = 0xFFFFFFFF; + + static const SetType ArgRegMask = + (1 << Registers::x0) | (1 << Registers::x1) | + (1 << Registers::x2) | (1 << Registers::x3) | + (1 << Registers::x4) | (1 << Registers::x5) | + (1 << Registers::x6) | (1 << Registers::x7) | + (1 << Registers::x8); + + static const SetType VolatileMask = + (1 << Registers::x0) | (1 << Registers::x1) | + (1 << Registers::x2) | (1 << Registers::x3) | + (1 << Registers::x4) | (1 << Registers::x5) | + (1 << Registers::x6) | (1 << Registers::x7) | + (1 << Registers::x8) | (1 << Registers::x9) | + (1 << Registers::x10) | (1 << Registers::x11) | + (1 << Registers::x11) | (1 << Registers::x12) | + (1 << Registers::x13) | (1 << Registers::x14) | + (1 << Registers::x14) | (1 << Registers::x15) | + (1 << Registers::x16) | (1 << Registers::x17) | + (1 << Registers::x18); + + static const SetType NonVolatileMask = + (1 << Registers::x19) | (1 << Registers::x20) | + (1 << Registers::x21) | (1 << Registers::x22) | + (1 << Registers::x23) | (1 << Registers::x24) | + (1 << Registers::x25) | (1 << Registers::x26) | + (1 << Registers::x27) | (1 << Registers::x28) | + (1 << Registers::x29) | (1 << Registers::x30); + + static const SetType SingleByteRegs = VolatileMask | NonVolatileMask; + + static const SetType NonAllocatableMask = + (1 << Registers::x28) | // PseudoStackPointer. + (1 << Registers::ip0) | // First scratch register. + (1 << Registers::ip1) | // Second scratch register. + (1 << Registers::tls) | + (1 << Registers::lr) | + (1 << Registers::sp); + + // Registers that can be allocated without being saved, generally. + static const SetType TempMask = VolatileMask & ~NonAllocatableMask; + + static const SetType WrapperMask = VolatileMask; + + // Registers returned from a JS -> JS call. + static const SetType JSCallMask = (1 << Registers::x2); + + // Registers returned from a JS -> C call. + static const SetType CallMask = (1 << Registers::x0); + + static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; +}; + +// Smallest integer type that can hold a register bitmask. +typedef uint32_t PackedRegisterMask; + +template <typename T> +class TypedRegisterSet; + +class FloatRegisters +{ + public: + enum FPRegisterID { + s0 = 0, d0 = 0, v0 = 0, + s1 = 1, d1 = 1, v1 = 1, + s2 = 2, d2 = 2, v2 = 2, + s3 = 3, d3 = 3, v3 = 3, + s4 = 4, d4 = 4, v4 = 4, + s5 = 5, d5 = 5, v5 = 5, + s6 = 6, d6 = 6, v6 = 6, + s7 = 7, d7 = 7, v7 = 7, + s8 = 8, d8 = 8, v8 = 8, + s9 = 9, d9 = 9, v9 = 9, + s10 = 10, d10 = 10, v10 = 10, + s11 = 11, d11 = 11, v11 = 11, + s12 = 12, d12 = 12, v12 = 12, + s13 = 13, d13 = 13, v13 = 13, + s14 = 14, d14 = 14, v14 = 14, + s15 = 15, d15 = 15, v15 = 15, + s16 = 16, d16 = 16, v16 = 16, + s17 = 17, d17 = 17, v17 = 17, + s18 = 18, d18 = 18, v18 = 18, + s19 = 19, d19 = 19, v19 = 19, + s20 = 20, d20 = 20, v20 = 20, + s21 = 21, d21 = 21, v21 = 21, + s22 = 22, d22 = 22, v22 = 22, + s23 = 23, d23 = 23, v23 = 23, + s24 = 24, d24 = 24, v24 = 24, + s25 = 25, d25 = 25, v25 = 25, + s26 = 26, d26 = 26, v26 = 26, + s27 = 27, d27 = 27, v27 = 27, + s28 = 28, d28 = 28, v28 = 28, + s29 = 29, d29 = 29, v29 = 29, + s30 = 30, d30 = 30, v30 = 30, + s31 = 31, d31 = 31, v31 = 31, // Scratch register. + invalid_fpreg + }; + typedef uint8_t Code; + typedef FPRegisterID Encoding; + typedef uint64_t SetType; + + static const char* GetName(Code code) { + static const char* const Names[] = + { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", + "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", + "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", + "d30", "d31", "invalid" }; + return Names[code]; + } + + static const char* GetName(uint32_t i) { + MOZ_ASSERT(i < TotalPhys); + return GetName(Code(i)); + } + + static Code FromName(const char* name); + + static const Code Invalid = invalid_fpreg; + + static const uint32_t Total = 64; + static const uint32_t TotalPhys = 32; + static const SetType AllMask = 0xFFFFFFFFFFFFFFFFULL; + static const SetType AllPhysMask = 0xFFFFFFFFULL; + static const SetType SpreadCoefficient = 0x100000001ULL; + + static const uint32_t Allocatable = 31; // Without d31, the scratch register. + + // d31 is the ScratchFloatReg. + static const SetType NonVolatileMask = + SetType((1 << FloatRegisters::d8) | (1 << FloatRegisters::d9) | + (1 << FloatRegisters::d10) | (1 << FloatRegisters::d11) | + (1 << FloatRegisters::d12) | (1 << FloatRegisters::d13) | + (1 << FloatRegisters::d14) | (1 << FloatRegisters::d15) | + (1 << FloatRegisters::d16) | (1 << FloatRegisters::d17) | + (1 << FloatRegisters::d18) | (1 << FloatRegisters::d19) | + (1 << FloatRegisters::d20) | (1 << FloatRegisters::d21) | + (1 << FloatRegisters::d22) | (1 << FloatRegisters::d23) | + (1 << FloatRegisters::d24) | (1 << FloatRegisters::d25) | + (1 << FloatRegisters::d26) | (1 << FloatRegisters::d27) | + (1 << FloatRegisters::d28) | (1 << FloatRegisters::d29) | + (1 << FloatRegisters::d30)) * SpreadCoefficient; + + static const SetType VolatileMask = AllMask & ~NonVolatileMask; + static const SetType AllDoubleMask = AllMask; + static const SetType AllSingleMask = AllMask; + + static const SetType WrapperMask = VolatileMask; + + // d31 is the ScratchFloatReg. + static const SetType NonAllocatableMask = (SetType(1) << FloatRegisters::d31) * SpreadCoefficient; + + // Registers that can be allocated without being saved, generally. + static const SetType TempMask = VolatileMask & ~NonAllocatableMask; + + static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; + union RegisterContent { + float s; + double d; + }; + enum Kind { + Double, + Single + }; +}; + +// In bytes: slots needed for potential memory->memory move spills. +// +8 for cycles +// +8 for gpr spills +// +8 for double spills +static const uint32_t ION_FRAME_SLACK_SIZE = 24; + +static const uint32_t ShadowStackSpace = 0; + +// TODO: +// This constant needs to be updated to account for whatever near/far branching +// strategy is used by ARM64. +static const uint32_t JumpImmediateRange = UINT32_MAX; + +static const uint32_t ABIStackAlignment = 16; +static const uint32_t CodeAlignment = 16; +static const bool StackKeptAligned = false; + +// Although sp is only usable if 16-byte alignment is kept, +// the Pseudo-StackPointer enables use of 8-byte alignment. +static const uint32_t StackAlignment = 8; +static const uint32_t NativeFrameSize = 8; + +struct FloatRegister +{ + typedef FloatRegisters Codes; + typedef Codes::Code Code; + typedef Codes::Encoding Encoding; + typedef Codes::SetType SetType; + + union RegisterContent { + float s; + double d; + }; + + constexpr FloatRegister(uint32_t code, FloatRegisters::Kind k) + : code_(FloatRegisters::Code(code & 31)), + k_(k) + { } + + explicit constexpr FloatRegister(uint32_t code) + : code_(FloatRegisters::Code(code & 31)), + k_(FloatRegisters::Kind(code >> 5)) + { } + + constexpr FloatRegister() + : code_(FloatRegisters::Code(-1)), + k_(FloatRegisters::Double) + { } + + static uint32_t SetSize(SetType x) { + static_assert(sizeof(SetType) == 8, "SetType must be 64 bits"); + x |= x >> FloatRegisters::TotalPhys; + x &= FloatRegisters::AllPhysMask; + return mozilla::CountPopulation32(x); + } + + static FloatRegister FromCode(uint32_t i) { + MOZ_ASSERT(i < FloatRegisters::Total); + FloatRegister r(i); + return r; + } + Code code() const { + MOZ_ASSERT((uint32_t)code_ < FloatRegisters::Total); + return Code(code_ | (k_ << 5)); + } + Encoding encoding() const { + return Encoding(code_); + } + + const char* name() const { + return FloatRegisters::GetName(code()); + } + bool volatile_() const { + return !!((SetType(1) << code()) & FloatRegisters::VolatileMask); + } + bool operator!=(FloatRegister other) const { + return other.code_ != code_ || other.k_ != k_; + } + bool operator==(FloatRegister other) const { + return other.code_ == code_ && other.k_ == k_; + } + bool aliases(FloatRegister other) const { + return other.code_ == code_; + } + uint32_t numAliased() const { + return 2; + } + static FloatRegisters::Kind otherkind(FloatRegisters::Kind k) { + if (k == FloatRegisters::Double) + return FloatRegisters::Single; + return FloatRegisters::Double; + } + void aliased(uint32_t aliasIdx, FloatRegister* ret) { + if (aliasIdx == 0) + *ret = *this; + else + *ret = FloatRegister(code_, otherkind(k_)); + } + // This function mostly exists for the ARM backend. It is to ensure that two + // floating point registers' types are equivalent. e.g. S0 is not equivalent + // to D16, since S0 holds a float32, and D16 holds a Double. + // Since all floating point registers on x86 and x64 are equivalent, it is + // reasonable for this function to do the same. + bool equiv(FloatRegister other) const { + return k_ == other.k_; + } + constexpr uint32_t size() const { + return k_ == FloatRegisters::Double ? sizeof(double) : sizeof(float); + } + uint32_t numAlignedAliased() { + return numAliased(); + } + void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) { + MOZ_ASSERT(aliasIdx == 0); + aliased(aliasIdx, ret); + } + SetType alignedOrDominatedAliasedSet() const { + return Codes::SpreadCoefficient << code_; + } + + bool isSingle() const { + return k_ == FloatRegisters::Single; + } + bool isDouble() const { + return k_ == FloatRegisters::Double; + } + bool isSimd128() const { + return false; + } + + static uint32_t FirstBit(SetType x) { + JS_STATIC_ASSERT(sizeof(SetType) == 8); + return mozilla::CountTrailingZeroes64(x); + } + static uint32_t LastBit(SetType x) { + JS_STATIC_ASSERT(sizeof(SetType) == 8); + return 63 - mozilla::CountLeadingZeroes64(x); + } + + static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s); + static uint32_t GetSizeInBytes(const TypedRegisterSet<FloatRegister>& s); + static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s); + uint32_t getRegisterDumpOffsetInBytes(); + + public: + Code code_ : 8; + FloatRegisters::Kind k_ : 1; +}; + +// ARM/D32 has double registers that cannot be treated as float32. +// Luckily, ARMv8 doesn't have the same misfortune. +inline bool +hasUnaliasedDouble() +{ + return false; +} + +// ARM prior to ARMv8 also has doubles that alias multiple floats. +// Again, ARMv8 is in the clear. +inline bool +hasMultiAlias() +{ + return false; +} + +} // namespace jit +} // namespace js + +#endif // jit_arm64_Architecture_arm64_h diff --git a/js/src/jit/arm64/Assembler-arm64.cpp b/js/src/jit/arm64/Assembler-arm64.cpp new file mode 100644 index 000000000..3d032cebd --- /dev/null +++ b/js/src/jit/arm64/Assembler-arm64.cpp @@ -0,0 +1,670 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/Assembler-arm64.h" + +#include "mozilla/DebugOnly.h" +#include "mozilla/MathAlgorithms.h" + +#include "jscompartment.h" +#include "jsutil.h" + +#include "gc/Marking.h" + +#include "jit/arm64/Architecture-arm64.h" +#include "jit/arm64/MacroAssembler-arm64.h" +#include "jit/ExecutableAllocator.h" +#include "jit/JitCompartment.h" + +#include "gc/StoreBuffer-inl.h" + +using namespace js; +using namespace js::jit; + +using mozilla::CountLeadingZeroes32; +using mozilla::DebugOnly; + +// Note this is used for inter-wasm calls and may pass arguments and results +// in floating point registers even if the system ABI does not. + +ABIArg +ABIArgGenerator::next(MIRType type) +{ + switch (type) { + case MIRType::Int32: + case MIRType::Pointer: + if (intRegIndex_ == NumIntArgRegs) { + current_ = ABIArg(stackOffset_); + stackOffset_ += sizeof(uintptr_t); + break; + } + current_ = ABIArg(Register::FromCode(intRegIndex_)); + intRegIndex_++; + break; + + case MIRType::Float32: + case MIRType::Double: + if (floatRegIndex_ == NumFloatArgRegs) { + current_ = ABIArg(stackOffset_); + stackOffset_ += sizeof(double); + break; + } + current_ = ABIArg(FloatRegister(floatRegIndex_, + type == MIRType::Double ? FloatRegisters::Double + : FloatRegisters::Single)); + floatRegIndex_++; + break; + + default: + MOZ_CRASH("Unexpected argument type"); + } + return current_; +} + +namespace js { +namespace jit { + +void +Assembler::finish() +{ + armbuffer_.flushPool(); + + // The extended jump table is part of the code buffer. + ExtendedJumpTable_ = emitExtendedJumpTable(); + Assembler::FinalizeCode(); + + // The jump relocation table starts with a fixed-width integer pointing + // to the start of the extended jump table. + // Space for this integer is allocated by Assembler::addJumpRelocation() + // before writing the first entry. + // Don't touch memory if we saw an OOM error. + if (jumpRelocations_.length() && !oom()) { + MOZ_ASSERT(jumpRelocations_.length() >= sizeof(uint32_t)); + *(uint32_t*)jumpRelocations_.buffer() = ExtendedJumpTable_.getOffset(); + } +} + +BufferOffset +Assembler::emitExtendedJumpTable() +{ + if (!pendingJumps_.length() || oom()) + return BufferOffset(); + + armbuffer_.flushPool(); + armbuffer_.align(SizeOfJumpTableEntry); + + BufferOffset tableOffset = armbuffer_.nextOffset(); + + for (size_t i = 0; i < pendingJumps_.length(); i++) { + // Each JumpTableEntry is of the form: + // LDR ip0 [PC, 8] + // BR ip0 + // [Patchable 8-byte constant low bits] + // [Patchable 8-byte constant high bits] + DebugOnly<size_t> preOffset = size_t(armbuffer_.nextOffset().getOffset()); + + ldr(vixl::ip0, ptrdiff_t(8 / vixl::kInstructionSize)); + br(vixl::ip0); + + DebugOnly<size_t> prePointer = size_t(armbuffer_.nextOffset().getOffset()); + MOZ_ASSERT_IF(!oom(), prePointer - preOffset == OffsetOfJumpTableEntryPointer); + + brk(0x0); + brk(0x0); + + DebugOnly<size_t> postOffset = size_t(armbuffer_.nextOffset().getOffset()); + + MOZ_ASSERT_IF(!oom(), postOffset - preOffset == SizeOfJumpTableEntry); + } + + if (oom()) + return BufferOffset(); + + return tableOffset; +} + +void +Assembler::executableCopy(uint8_t* buffer) +{ + // Copy the code and all constant pools into the output buffer. + armbuffer_.executableCopy(buffer); + + // Patch any relative jumps that target code outside the buffer. + // The extended jump table may be used for distant jumps. + for (size_t i = 0; i < pendingJumps_.length(); i++) { + RelativePatch& rp = pendingJumps_[i]; + + if (!rp.target) { + // The patch target is nullptr for jumps that have been linked to + // a label within the same code block, but may be repatched later + // to jump to a different code block. + continue; + } + + Instruction* target = (Instruction*)rp.target; + Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset()); + JumpTableEntry* extendedJumpTable = + reinterpret_cast<JumpTableEntry*>(buffer + ExtendedJumpTable_.getOffset()); + if (branch->BranchType() != vixl::UnknownBranchType) { + if (branch->IsTargetReachable(target)) { + branch->SetImmPCOffsetTarget(target); + } else { + JumpTableEntry* entry = &extendedJumpTable[i]; + branch->SetImmPCOffsetTarget(entry->getLdr()); + entry->data = target; + } + } else { + // Currently a two-instruction call, it should be possible to optimize this + // into a single instruction call + nop in some instances, but this will work. + } + } +} + +BufferOffset +Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe) +{ + uint32_t inst = op | Rt(dest); + const size_t numInst = 1; + const unsigned sizeOfPoolEntryInBytes = 4; + const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes; + return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe); +} + +BufferOffset +Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe) +{ + return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe); +} + +BufferOffset +Assembler::immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c) +{ + MOZ_CRASH("immPool64Branch"); +} + +BufferOffset +Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op) +{ + uint32_t inst = op | Rt(dest); + const size_t numInst = 1; + const unsigned sizeOfPoolEntryInBits = 32; + const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits; + return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value); +} + +BufferOffset +Assembler::fImmPool64(ARMFPRegister dest, double value) +{ + return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit); +} +BufferOffset +Assembler::fImmPool32(ARMFPRegister dest, float value) +{ + return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit); +} + +void +Assembler::bind(Label* label, BufferOffset targetOffset) +{ + // Nothing has seen the label yet: just mark the location. + // If we've run out of memory, don't attempt to modify the buffer which may + // not be there. Just mark the label as bound to the (possibly bogus) + // targetOffset. + if (!label->used() || oom()) { + label->bind(targetOffset.getOffset()); + return; + } + + // Get the most recent instruction that used the label, as stored in the label. + // This instruction is the head of an implicit linked list of label uses. + BufferOffset branchOffset(label); + + while (branchOffset.assigned()) { + // Before overwriting the offset in this instruction, get the offset of + // the next link in the implicit branch list. + BufferOffset nextOffset = NextLink(branchOffset); + + // Linking against the actual (Instruction*) would be invalid, + // since that Instruction could be anywhere in memory. + // Instead, just link against the correct relative offset, assuming + // no constant pools, which will be taken into consideration + // during finalization. + ptrdiff_t relativeByteOffset = targetOffset.getOffset() - branchOffset.getOffset(); + Instruction* link = getInstructionAt(branchOffset); + + // This branch may still be registered for callbacks. Stop tracking it. + vixl::ImmBranchType branchType = link->BranchType(); + vixl::ImmBranchRangeType branchRange = Instruction::ImmBranchTypeToRange(branchType); + if (branchRange < vixl::NumShortBranchRangeTypes) { + BufferOffset deadline(branchOffset.getOffset() + + Instruction::ImmBranchMaxForwardOffset(branchRange)); + armbuffer_.unregisterBranchDeadline(branchRange, deadline); + } + + // Is link able to reach the label? + if (link->IsPCRelAddressing() || link->IsTargetReachable(link + relativeByteOffset)) { + // Write a new relative offset into the instruction. + link->SetImmPCOffsetTarget(link + relativeByteOffset); + } else { + // This is a short-range branch, and it can't reach the label directly. + // Verify that it branches to a veneer: an unconditional branch. + MOZ_ASSERT(getInstructionAt(nextOffset)->BranchType() == vixl::UncondBranchType); + } + + branchOffset = nextOffset; + } + + // Bind the label, so that future uses may encode the offset immediately. + label->bind(targetOffset.getOffset()); +} + +void +Assembler::bind(RepatchLabel* label) +{ + // Nothing has seen the label yet: just mark the location. + // If we've run out of memory, don't attempt to modify the buffer which may + // not be there. Just mark the label as bound to nextOffset(). + if (!label->used() || oom()) { + label->bind(nextOffset().getOffset()); + return; + } + int branchOffset = label->offset(); + Instruction* inst = getInstructionAt(BufferOffset(branchOffset)); + inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset); +} + +void +Assembler::trace(JSTracer* trc) +{ + for (size_t i = 0; i < pendingJumps_.length(); i++) { + RelativePatch& rp = pendingJumps_[i]; + if (rp.kind == Relocation::JITCODE) { + JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target); + TraceManuallyBarrieredEdge(trc, &code, "masmrel32"); + MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target)); + } + } + + // TODO: Trace. +#if 0 + if (tmpDataRelocations_.length()) + ::TraceDataRelocations(trc, &armbuffer_, &tmpDataRelocations_); +#endif +} + +void +Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc) +{ + // Only JITCODE relocations are patchable at runtime. + MOZ_ASSERT(reloc == Relocation::JITCODE); + + // The jump relocation table starts with a fixed-width integer pointing + // to the start of the extended jump table. But, we don't know the + // actual extended jump table offset yet, so write a 0 which we'll + // patch later in Assembler::finish(). + if (!jumpRelocations_.length()) + jumpRelocations_.writeFixedUint32_t(0); + + // Each entry in the table is an (offset, extendedTableIndex) pair. + jumpRelocations_.writeUnsigned(src.getOffset()); + jumpRelocations_.writeUnsigned(pendingJumps_.length()); +} + +void +Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc) +{ + MOZ_ASSERT(target.value != nullptr); + + if (reloc == Relocation::JITCODE) + addJumpRelocation(src, reloc); + + // This jump is not patchable at runtime. Extended jump table entry requirements + // cannot be known until finalization, so to be safe, give each jump and entry. + // This also causes GC tracing of the target. + enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc)); +} + +size_t +Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc) +{ + MOZ_CRASH("TODO: This is currently unused (and untested)"); + if (reloc == Relocation::JITCODE) + addJumpRelocation(src, reloc); + + size_t extendedTableIndex = pendingJumps_.length(); + enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc)); + return extendedTableIndex; +} + +void +PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect) +{ + MOZ_CRASH("PatchJump"); +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expected) +{ + Instruction* i = (Instruction*)label.raw(); + void** pValue = i->LiteralAddress<void**>(); + MOZ_ASSERT(*pValue == expected.value); + *pValue = newValue.value; +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expected) +{ + PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expected.value)); +} + +void +Assembler::ToggleToJmp(CodeLocationLabel inst_) +{ + Instruction* i = (Instruction*)inst_.raw(); + MOZ_ASSERT(i->IsAddSubImmediate()); + + // Refer to instruction layout in ToggleToCmp(). + int imm19 = (int)i->Bits(23, 5); + MOZ_ASSERT(vixl::is_int19(imm19)); + + b(i, imm19, Always); +} + +void +Assembler::ToggleToCmp(CodeLocationLabel inst_) +{ + Instruction* i = (Instruction*)inst_.raw(); + MOZ_ASSERT(i->IsCondB()); + + int imm19 = i->ImmCondBranch(); + // bit 23 is reserved, and the simulator throws an assertion when this happens + // It'll be messy to decode, but we can steal bit 30 or bit 31. + MOZ_ASSERT(vixl::is_int18(imm19)); + + // 31 - 64-bit if set, 32-bit if unset. (OK!) + // 30 - sub if set, add if unset. (OK!) + // 29 - SetFlagsBit. Must be set. + // 22:23 - ShiftAddSub. (OK!) + // 10:21 - ImmAddSub. (OK!) + // 5:9 - First source register (Rn). (OK!) + // 0:4 - Destination Register. Must be xzr. + + // From the above, there is a safe 19-bit contiguous region from 5:23. + Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) | + Rd(vixl::xzr) | (imm19 << vixl::Rn_offset)); +} + +void +Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) +{ + const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw()); + Instruction* load; + Instruction* call; + + // There might be a constant pool at the very first instruction. + first = first->skipPool(); + + // Skip the stack pointer restore instruction. + if (first->IsStackPtrSync()) + first = first->InstructionAtOffset(vixl::kInstructionSize)->skipPool(); + + load = const_cast<Instruction*>(first); + + // The call instruction follows the load, but there may be an injected + // constant pool. + call = const_cast<Instruction*>(load->InstructionAtOffset(vixl::kInstructionSize)->skipPool()); + + if (call->IsBLR() == enabled) + return; + + if (call->IsBLR()) { + // If the second instruction is blr(), then wehave: + // ldr x17, [pc, offset] + // blr x17 + MOZ_ASSERT(load->IsLDR()); + // We want to transform this to: + // adr xzr, [pc, offset] + // nop + int32_t offset = load->ImmLLiteral(); + adr(load, xzr, int32_t(offset)); + nop(call); + } else { + // We have: + // adr xzr, [pc, offset] (or ldr x17, [pc, offset]) + // nop + MOZ_ASSERT(load->IsADR() || load->IsLDR()); + MOZ_ASSERT(call->IsNOP()); + // Transform this to: + // ldr x17, [pc, offset] + // blr x17 + int32_t offset = (int)load->ImmPCRawOffset(); + MOZ_ASSERT(vixl::is_int19(offset)); + ldr(load, ScratchReg2_64, int32_t(offset)); + blr(call, ScratchReg2_64); + } +} + +class RelocationIterator +{ + CompactBufferReader reader_; + uint32_t tableStart_; + uint32_t offset_; + uint32_t extOffset_; + + public: + explicit RelocationIterator(CompactBufferReader& reader) + : reader_(reader) + { + // The first uint32_t stores the extended table offset. + tableStart_ = reader_.readFixedUint32_t(); + } + + bool read() { + if (!reader_.more()) + return false; + offset_ = reader_.readUnsigned(); + extOffset_ = reader_.readUnsigned(); + return true; + } + + uint32_t offset() const { + return offset_; + } + uint32_t extendedOffset() const { + return extOffset_; + } +}; + +static JitCode* +CodeFromJump(JitCode* code, uint8_t* jump) +{ + const Instruction* inst = (const Instruction*)jump; + uint8_t* target; + + // We're expecting a call created by MacroAssembler::call(JitCode*). + // It looks like: + // + // ldr scratch, [pc, offset] + // blr scratch + // + // If the call has been toggled by ToggleCall(), it looks like: + // + // adr xzr, [pc, offset] + // nop + // + // There might be a constant pool at the very first instruction. + // See also ToggleCall(). + inst = inst->skipPool(); + + // Skip the stack pointer restore instruction. + if (inst->IsStackPtrSync()) + inst = inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool(); + + if (inst->BranchType() != vixl::UnknownBranchType) { + // This is an immediate branch. + target = (uint8_t*)inst->ImmPCOffsetTarget(); + } else if (inst->IsLDR()) { + // This is an ldr+blr call that is enabled. See ToggleCall(). + mozilla::DebugOnly<const Instruction*> nextInst = + inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool(); + MOZ_ASSERT(nextInst->IsNOP() || nextInst->IsBLR()); + target = (uint8_t*)inst->Literal64(); + } else if (inst->IsADR()) { + // This is a disabled call: adr+nop. See ToggleCall(). + mozilla::DebugOnly<const Instruction*> nextInst = + inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool(); + MOZ_ASSERT(nextInst->IsNOP()); + ptrdiff_t offset = inst->ImmPCRawOffset() << vixl::kLiteralEntrySizeLog2; + // This is what Literal64 would do with the corresponding ldr. + memcpy(&target, inst + offset, sizeof(target)); + } else { + MOZ_CRASH("Unrecognized jump instruction."); + } + + // If the jump is within the code buffer, it uses the extended jump table. + if (target >= code->raw() && target < code->raw() + code->instructionsSize()) { + MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <= code->raw() + code->instructionsSize()); + + uint8_t** patchablePtr = (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer); + target = *patchablePtr; + } + + return JitCode::FromExecutable(target); +} + +void +Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + RelocationIterator iter(reader); + while (iter.read()) { + JitCode* child = CodeFromJump(code, code->raw() + iter.offset()); + TraceManuallyBarrieredEdge(trc, &child, "rel32"); + MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset())); + } +} + +static void +TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader) +{ + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction* load = (Instruction*)&buffer[offset]; + + // The only valid traceable operation is a 64-bit load to an ARMRegister. + // Refer to movePatchablePtr() for generation. + MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit); + + uintptr_t* literalAddr = load->LiteralAddress<uintptr_t*>(); + uintptr_t literal = *literalAddr; + + // All pointers on AArch64 will have the top bits cleared. + // If those bits are not cleared, this must be a Value. + if (literal >> JSVAL_TAG_SHIFT) { + Value v = Value::fromRawBits(literal); + TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value"); + if (*literalAddr != v.asRawBits()) { + // Only update the code if the value changed, because the code + // is not writable if we're not moving objects. + *literalAddr = v.asRawBits(); + } + + // TODO: When we can, flush caches here if a pointer was moved. + continue; + } + + // No barriers needed since the pointers are constants. + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(literalAddr), + "ion-masm-ptr"); + + // TODO: Flush caches at end? + } +} + +void +Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + ::TraceDataRelocations(trc, code->raw(), reader); +} + +void +Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader, + const ObjectVector& nurseryObjects) +{ + + MOZ_ASSERT(!nurseryObjects.empty()); + + uint8_t* buffer = code->raw(); + bool hasNurseryPointers = false; + + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction* ins = (Instruction*)&buffer[offset]; + + uintptr_t* literalAddr = ins->LiteralAddress<uintptr_t*>(); + uintptr_t literal = *literalAddr; + + if (literal >> JSVAL_TAG_SHIFT) + continue; // This is a Value. + + if (!(literal & 0x1)) + continue; + + uint32_t index = literal >> 1; + JSObject* obj = nurseryObjects[index]; + *literalAddr = uintptr_t(obj); + + // Either all objects are still in the nursery, or all objects are tenured. + MOZ_ASSERT_IF(hasNurseryPointers, IsInsideNursery(obj)); + + if (!hasNurseryPointers && IsInsideNursery(obj)) + hasNurseryPointers = true; + } + + if (hasNurseryPointers) + cx->runtime()->gc.storeBuffer.putWholeCell(code); +} + +void +Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) +{ + MOZ_CRASH("PatchInstructionImmediate()"); +} + +void +Assembler::retarget(Label* label, Label* target) +{ + if (label->used()) { + if (target->bound()) { + bind(label, BufferOffset(target)); + } else if (target->used()) { + // The target is not bound but used. Prepend label's branch list + // onto target's. + BufferOffset labelBranchOffset(label); + + // Find the head of the use chain for label. + BufferOffset next = NextLink(labelBranchOffset); + while (next.assigned()) { + labelBranchOffset = next; + next = NextLink(next); + } + + // Then patch the head of label's use chain to the tail of target's + // use chain, prepending the entire use chain of target. + SetNextLink(labelBranchOffset, BufferOffset(target)); + target->use(label->offset()); + } else { + // The target is unbound and unused. We can just take the head of + // the list hanging off of label, and dump that into target. + DebugOnly<uint32_t> prev = target->use(label->offset()); + MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET); + } + } + label->reset(); +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm64.h new file mode 100644 index 000000000..287ab23b3 --- /dev/null +++ b/js/src/jit/arm64/Assembler-arm64.h @@ -0,0 +1,557 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef A64_ASSEMBLER_A64_H_ +#define A64_ASSEMBLER_A64_H_ + +#include "jit/arm64/vixl/Assembler-vixl.h" + +#include "jit/JitCompartment.h" + +namespace js { +namespace jit { + +// VIXL imports. +typedef vixl::Register ARMRegister; +typedef vixl::FPRegister ARMFPRegister; +using vixl::ARMBuffer; +using vixl::Instruction; + +static const uint32_t AlignmentAtPrologue = 0; +static const uint32_t AlignmentMidPrologue = 8; +static const Scale ScalePointer = TimesEight; + +// The MacroAssembler uses scratch registers extensively and unexpectedly. +// For safety, scratch registers should always be acquired using +// vixl::UseScratchRegisterScope. +static constexpr Register ScratchReg = { Registers::ip0 }; +static constexpr ARMRegister ScratchReg64 = { ScratchReg, 64 }; + +static constexpr Register ScratchReg2 = { Registers::ip1 }; +static constexpr ARMRegister ScratchReg2_64 = { ScratchReg2, 64 }; + +static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::d31, FloatRegisters::Double }; +static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0, FloatRegisters::Double }; + +static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::s0, FloatRegisters::Single }; +static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::s31, FloatRegisters::Single }; + +static constexpr Register InvalidReg = { Registers::invalid_reg }; +static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_fpreg, FloatRegisters::Single }; + +static constexpr Register OsrFrameReg = { Registers::x3 }; +static constexpr Register ArgumentsRectifierReg = { Registers::x8 }; +static constexpr Register CallTempReg0 = { Registers::x9 }; +static constexpr Register CallTempReg1 = { Registers::x10 }; +static constexpr Register CallTempReg2 = { Registers::x11 }; +static constexpr Register CallTempReg3 = { Registers::x12 }; +static constexpr Register CallTempReg4 = { Registers::x13 }; +static constexpr Register CallTempReg5 = { Registers::x14 }; + +static constexpr Register PreBarrierReg = { Registers::x1 }; + +static constexpr Register ReturnReg = { Registers::x0 }; +static constexpr Register64 ReturnReg64(ReturnReg); +static constexpr Register JSReturnReg = { Registers::x2 }; +static constexpr Register FramePointer = { Registers::fp }; +static constexpr Register ZeroRegister = { Registers::sp }; +static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 }; +static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 }; + +static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg; +static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg; + +// StackPointer is intentionally undefined on ARM64 to prevent misuse: +// using sp as a base register is only valid if sp % 16 == 0. +static constexpr Register RealStackPointer = { Registers::sp }; + +static constexpr Register PseudoStackPointer = { Registers::x28 }; +static constexpr ARMRegister PseudoStackPointer64 = { Registers::x28, 64 }; +static constexpr ARMRegister PseudoStackPointer32 = { Registers::x28, 32 }; + +// StackPointer for use by irregexp. +static constexpr Register RegExpStackPointer = PseudoStackPointer; + +static constexpr Register IntArgReg0 = { Registers::x0 }; +static constexpr Register IntArgReg1 = { Registers::x1 }; +static constexpr Register IntArgReg2 = { Registers::x2 }; +static constexpr Register IntArgReg3 = { Registers::x3 }; +static constexpr Register IntArgReg4 = { Registers::x4 }; +static constexpr Register IntArgReg5 = { Registers::x5 }; +static constexpr Register IntArgReg6 = { Registers::x6 }; +static constexpr Register IntArgReg7 = { Registers::x7 }; +static constexpr Register GlobalReg = { Registers::x20 }; +static constexpr Register HeapReg = { Registers::x21 }; +static constexpr Register HeapLenReg = { Registers::x22 }; + +// Define unsized Registers. +#define DEFINE_UNSIZED_REGISTERS(N) \ +static constexpr Register r##N = { Registers::x##N }; +REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS) +#undef DEFINE_UNSIZED_REGISTERS +static constexpr Register ip0 = { Registers::x16 }; +static constexpr Register ip1 = { Registers::x16 }; +static constexpr Register fp = { Registers::x30 }; +static constexpr Register lr = { Registers::x30 }; +static constexpr Register rzr = { Registers::xzr }; + +// Import VIXL registers into the js::jit namespace. +#define IMPORT_VIXL_REGISTERS(N) \ +static constexpr ARMRegister w##N = vixl::w##N; \ +static constexpr ARMRegister x##N = vixl::x##N; +REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS) +#undef IMPORT_VIXL_REGISTERS +static constexpr ARMRegister wzr = vixl::wzr; +static constexpr ARMRegister xzr = vixl::xzr; +static constexpr ARMRegister wsp = vixl::wsp; +static constexpr ARMRegister sp = vixl::sp; + +// Import VIXL VRegisters into the js::jit namespace. +#define IMPORT_VIXL_VREGISTERS(N) \ +static constexpr ARMFPRegister s##N = vixl::s##N; \ +static constexpr ARMFPRegister d##N = vixl::d##N; +REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS) +#undef IMPORT_VIXL_VREGISTERS + +static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg); + +// Registers used in the GenerateFFIIonExit Enable Activation block. +static constexpr Register WasmIonExitRegCallee = r8; +static constexpr Register WasmIonExitRegE0 = r0; +static constexpr Register WasmIonExitRegE1 = r1; + +// Registers used in the GenerateFFIIonExit Disable Activation block. +// None of these may be the second scratch register. +static constexpr Register WasmIonExitRegReturnData = r2; +static constexpr Register WasmIonExitRegReturnType = r3; +static constexpr Register WasmIonExitRegD0 = r0; +static constexpr Register WasmIonExitRegD1 = r1; +static constexpr Register WasmIonExitRegD2 = r4; + +// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand). +static constexpr Register RegExpMatcherRegExpReg = CallTempReg0; +static constexpr Register RegExpMatcherStringReg = CallTempReg1; +static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2; + +// Registerd used in RegExpTester instruction (do not use ReturnReg). +static constexpr Register RegExpTesterRegExpReg = CallTempReg0; +static constexpr Register RegExpTesterStringReg = CallTempReg1; +static constexpr Register RegExpTesterLastIndexReg = CallTempReg2; + +static constexpr Register JSReturnReg_Type = r3; +static constexpr Register JSReturnReg_Data = r2; + +static constexpr FloatRegister NANReg = { FloatRegisters::d14, FloatRegisters::Single }; +// N.B. r8 isn't listed as an aapcs temp register, but we can use it as such because we never +// use return-structs. +static constexpr Register CallTempNonArgRegs[] = { r8, r9, r10, r11, r12, r13, r14, r15 }; +static const uint32_t NumCallTempNonArgRegs = + mozilla::ArrayLength(CallTempNonArgRegs); + +static constexpr uint32_t JitStackAlignment = 16; + +static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value); +static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1, + "Stack alignment should be a non-zero multiple of sizeof(Value)"); + +// This boolean indicates whether we support SIMD instructions flavoured for +// this architecture or not. Rather than a method in the LIRGenerator, it is +// here such that it is accessible from the entire codebase. Once full support +// for SIMD is reached on all tier-1 platforms, this constant can be deleted. +static constexpr bool SupportsSimd = false; +static constexpr uint32_t SimdMemoryAlignment = 16; + +static_assert(CodeAlignment % SimdMemoryAlignment == 0, + "Code alignment should be larger than any of the alignments which are used for " + "the constant sections of the code buffer. Thus it should be larger than the " + "alignment for SIMD constants."); + +static const uint32_t WasmStackAlignment = SimdMemoryAlignment; +static const int32_t WasmGlobalRegBias = 1024; + +// Does this architecture support SIMD conversions between Uint32x4 and Float32x4? +static constexpr bool SupportsUint32x4FloatConversions = false; + +// Does this architecture support comparisons of unsigned integer vectors? +static constexpr bool SupportsUint8x16Compares = false; +static constexpr bool SupportsUint16x8Compares = false; +static constexpr bool SupportsUint32x4Compares = false; + +class Assembler : public vixl::Assembler +{ + public: + Assembler() + : vixl::Assembler() + { } + + typedef vixl::Condition Condition; + + void finish(); + bool asmMergeWith(const Assembler& other) { + MOZ_CRASH("NYI"); + } + void trace(JSTracer* trc); + + // Emit the jump table, returning the BufferOffset to the first entry in the table. + BufferOffset emitExtendedJumpTable(); + BufferOffset ExtendedJumpTable_; + void executableCopy(uint8_t* buffer); + + BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, + ARMBuffer::PoolEntry* pe = nullptr); + BufferOffset immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe = nullptr); + BufferOffset immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, vixl::Condition c); + BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op); + BufferOffset fImmPool64(ARMFPRegister dest, double value); + BufferOffset fImmPool32(ARMFPRegister dest, float value); + + void bind(Label* label) { bind(label, nextOffset()); } + void bind(Label* label, BufferOffset boff); + void bind(RepatchLabel* label); + void bindLater(Label* label, wasm::TrapDesc target) { + MOZ_CRASH("NYI"); + } + + bool oom() const { + return AssemblerShared::oom() || + armbuffer_.oom() || + jumpRelocations_.oom() || + dataRelocations_.oom() || + preBarriers_.oom(); + } + + void copyJumpRelocationTable(uint8_t* dest) const { + if (jumpRelocations_.length()) + memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); + } + void copyDataRelocationTable(uint8_t* dest) const { + if (dataRelocations_.length()) + memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); + } + void copyPreBarrierTable(uint8_t* dest) const { + if (preBarriers_.length()) + memcpy(dest, preBarriers_.buffer(), preBarriers_.length()); + } + + size_t jumpRelocationTableBytes() const { + return jumpRelocations_.length(); + } + size_t dataRelocationTableBytes() const { + return dataRelocations_.length(); + } + size_t preBarrierTableBytes() const { + return preBarriers_.length(); + } + size_t bytesNeeded() const { + return SizeOfCodeGenerated() + + jumpRelocationTableBytes() + + dataRelocationTableBytes() + + preBarrierTableBytes(); + } + + void processCodeLabels(uint8_t* rawCode) { + for (size_t i = 0; i < codeLabels_.length(); i++) { + CodeLabel label = codeLabels_[i]; + Bind(rawCode, label.patchAt(), rawCode + label.target()->offset()); + } + } + + void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) { + *reinterpret_cast<const void**>(rawCode + label->offset()) = address; + } + + void retarget(Label* cur, Label* next); + + // The buffer is about to be linked. Ensure any constant pools or + // excess bookkeeping has been flushed to the instruction stream. + void flush() { + armbuffer_.flushPool(); + } + + void comment(const char* msg) { + // This is not implemented because setPrinter() is not implemented. + // TODO spew("; %s", msg); + } + + int actualIndex(int curOffset) { + ARMBuffer::PoolEntry pe(curOffset); + return armbuffer_.poolEntryOffset(pe); + } + size_t labelToPatchOffset(CodeOffset label) { + return label.offset(); + } + static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) { + return code->raw() + index; + } + void setPrinter(Sprinter* sp) { + } + + static bool SupportsFloatingPoint() { return true; } + static bool SupportsUnalignedAccesses() { return true; } + static bool SupportsSimd() { return js::jit::SupportsSimd; } + + // Tracks a jump that is patchable after finalization. + void addJumpRelocation(BufferOffset src, Relocation::Kind reloc); + + protected: + // Add a jump whose target is unknown until finalization. + // The jump may not be patched at runtime. + void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind); + + // Add a jump whose target is unknown until finalization, and may change + // thereafter. The jump is patchable at runtime. + size_t addPatchableJump(BufferOffset src, Relocation::Kind kind); + + public: + static uint32_t PatchWrite_NearCallSize() { + return 4; + } + + static uint32_t NopSize() { + return 4; + } + + static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) { + Instruction* dest = (Instruction*)start.raw(); + //printf("patching %p with call to %p\n", start.raw(), toCall.raw()); + bl(dest, ((Instruction*)toCall.raw() - dest)>>2); + + } + static void PatchDataWithValueCheck(CodeLocationLabel label, + PatchedImmPtr newValue, + PatchedImmPtr expected); + + static void PatchDataWithValueCheck(CodeLocationLabel label, + ImmPtr newValue, + ImmPtr expected); + + static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { + // Raw is going to be the return address. + uint32_t* raw = (uint32_t*)label.raw(); + // Overwrite the 4 bytes before the return address, which will end up being + // the call instruction. + *(raw - 1) = imm.value; + } + static uint32_t AlignDoubleArg(uint32_t offset) { + MOZ_CRASH("AlignDoubleArg()"); + } + static uintptr_t GetPointer(uint8_t* ptr) { + Instruction* i = reinterpret_cast<Instruction*>(ptr); + uint64_t ret = i->Literal64(); + return ret; + } + + // Toggle a jmp or cmp emitted by toggledJump(). + static void ToggleToJmp(CodeLocationLabel inst_); + static void ToggleToCmp(CodeLocationLabel inst_); + static void ToggleCall(CodeLocationLabel inst_, bool enabled); + + static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + + static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm); + + static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader, + const ObjectVector& nurseryObjects); + + public: + // A Jump table entry is 2 instructions, with 8 bytes of raw data + static const size_t SizeOfJumpTableEntry = 16; + + struct JumpTableEntry + { + uint32_t ldr; + uint32_t br; + void* data; + + Instruction* getLdr() { + return reinterpret_cast<Instruction*>(&ldr); + } + }; + + // Offset of the patchable target for the given entry. + static const size_t OffsetOfJumpTableEntryPointer = 8; + + public: + void writeCodePointer(AbsoluteLabel* absoluteLabel) { + MOZ_ASSERT(!absoluteLabel->bound()); + uintptr_t x = LabelBase::INVALID_OFFSET; + BufferOffset off = EmitData(&x, sizeof(uintptr_t)); + + // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list + // of uses of an AbsoluteLabel through the assembly. ARM only uses labels + // for the case statements of switch jump tables. Thus, for simplicity, we + // simply treat the AbsoluteLabel as a label and bind it to the offset of + // the jump table entry that needs to be patched. + LabelBase* label = absoluteLabel; + label->bind(off.getOffset()); + } + + void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, + const Disassembler::HeapAccess& heapAccess) + { + MOZ_CRASH("verifyHeapAccessDisassembly"); + } + + protected: + // Because jumps may be relocated to a target inaccessible by a short jump, + // each relocatable jump must have a unique entry in the extended jump table. + // Valid relocatable targets are of type Relocation::JITCODE. + struct JumpRelocation + { + BufferOffset jump; // Offset to the short jump, from the start of the code buffer. + uint32_t extendedTableIndex; // Unique index within the extended jump table. + + JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex) + : jump(jump), extendedTableIndex(extendedTableIndex) + { } + }; + + // Structure for fixing up pc-relative loads/jumps when the machine + // code gets moved (executable copy, gc, etc.). + struct RelativePatch + { + BufferOffset offset; + void* target; + Relocation::Kind kind; + + RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind) + : offset(offset), target(target), kind(kind) + { } + }; + + // List of jumps for which the target is either unknown until finalization, + // or cannot be known due to GC. Each entry here requires a unique entry + // in the extended jump table, and is patched at finalization. + js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_; + + // Final output formatters. + CompactBufferWriter jumpRelocations_; + CompactBufferWriter dataRelocations_; + CompactBufferWriter preBarriers_; +}; + +static const uint32_t NumIntArgRegs = 8; +static const uint32_t NumFloatArgRegs = 8; + +class ABIArgGenerator +{ + public: + ABIArgGenerator() + : intRegIndex_(0), + floatRegIndex_(0), + stackOffset_(0), + current_() + { } + + ABIArg next(MIRType argType); + ABIArg& current() { return current_; } + uint32_t stackBytesConsumedSoFar() const { return stackOffset_; } + + protected: + unsigned intRegIndex_; + unsigned floatRegIndex_; + uint32_t stackOffset_; + ABIArg current_; +}; + +static constexpr Register ABINonArgReg0 = r8; +static constexpr Register ABINonArgReg1 = r9; +static constexpr Register ABINonArgReg2 = r10; +static constexpr Register ABINonArgReturnReg0 = r8; +static constexpr Register ABINonArgReturnReg1 = r9; + +// TLS pointer argument register for WebAssembly functions. This must not alias +// any other register used for passing function arguments or return values. +// Preserved by WebAssembly functions. +static constexpr Register WasmTlsReg = { Registers::x17 }; + +// Registers used for wasm table calls. These registers must be disjoint +// from the ABI argument registers, WasmTlsReg and each other. +static constexpr Register WasmTableCallScratchReg = ABINonArgReg0; +static constexpr Register WasmTableCallSigReg = ABINonArgReg1; +static constexpr Register WasmTableCallIndexReg = ABINonArgReg2; + +static inline bool +GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) +{ + if (usedIntArgs >= NumIntArgRegs) + return false; + *out = Register::FromCode(usedIntArgs); + return true; +} + +static inline bool +GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out) +{ + if (usedFloatArgs >= NumFloatArgRegs) + return false; + *out = FloatRegister::FromCode(usedFloatArgs); + return true; +} + +// Get a register in which we plan to put a quantity that will be used as an +// integer argument. This differs from GetIntArgReg in that if we have no more +// actual argument registers to use we will fall back on using whatever +// CallTempReg* don't overlap the argument registers, and only fail once those +// run out too. +static inline bool +GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) +{ + if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) + return true; + // Unfortunately, we have to assume things about the point at which + // GetIntArgReg returns false, because we need to know how many registers it + // can allocate. + usedIntArgs -= NumIntArgRegs; + if (usedIntArgs >= NumCallTempNonArgRegs) + return false; + *out = CallTempNonArgRegs[usedIntArgs]; + return true; +} + +inline Imm32 +Imm64::firstHalf() const +{ + return low(); +} + +inline Imm32 +Imm64::secondHalf() const +{ + return hi(); +} + +void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, + ReprotectCode reprotect = DontReprotect); + +static inline void +PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target) +{ + PatchJump(jump_, label); +} + +// Forbids pool generation during a specified interval. Not nestable. +class AutoForbidPools +{ + Assembler* asm_; + + public: + AutoForbidPools(Assembler* asm_, size_t maxInst) + : asm_(asm_) + { + asm_->enterNoPool(maxInst); + } + + ~AutoForbidPools() { + asm_->leaveNoPool(); + } +}; + +} // namespace jit +} // namespace js + +#endif // A64_ASSEMBLER_A64_H_ diff --git a/js/src/jit/arm64/AtomicOperations-arm64.h b/js/src/jit/arm64/AtomicOperations-arm64.h new file mode 100644 index 000000000..b213b0218 --- /dev/null +++ b/js/src/jit/arm64/AtomicOperations-arm64.h @@ -0,0 +1,156 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* For documentation, see jit/AtomicOperations.h */ + +#ifndef jit_arm64_AtomicOperations_arm64_h +#define jit_arm64_AtomicOperations_arm64_h + +#include "mozilla/Assertions.h" +#include "mozilla/Types.h" + +inline bool +js::jit::AtomicOperations::isLockfree8() +{ + MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0)); + MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0)); + MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0)); + return true; +} + +inline void +js::jit::AtomicOperations::fenceSeqCst() +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::loadSeqCst(T* addr) +{ + MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); + T v; + __atomic_load(addr, &v, __ATOMIC_SEQ_CST); + return v; +} + +template<typename T> +inline void +js::jit::AtomicOperations::storeSeqCst(T* addr, T val) +{ + MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); + __atomic_store(addr, &val, __ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) +{ + MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); + T v; + __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST); + return v; +} + +template<typename T> +inline T +js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) +{ + MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); + __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return oldval; +} + +template<typename T> +inline T +js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) +{ + static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); + return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) +{ + static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); + return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) +{ + static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); + return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) +{ + static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); + return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST); +} + +template<typename T> +inline T +js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) +{ + static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); + return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST); +} + +template <typename T> +inline T +js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) +{ + return *addr; // FIXME (1208663): not yet safe +} + +template <typename T> +inline void +js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) +{ + *addr = val; // FIXME (1208663): not yet safe +} + +inline void +js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, + size_t nbytes) +{ + memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe +} + +inline void +js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, + size_t nbytes) +{ + memmove(dest, src, nbytes); // FIXME (1208663): not yet safe +} + +template<size_t nbytes> +inline void +js::jit::RegionLock::acquire(void* addr) +{ + uint32_t zero = 0; + uint32_t one = 1; + while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) { + zero = 0; + continue; + } +} + +template<size_t nbytes> +inline void +js::jit::RegionLock::release(void* addr) +{ + MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock"); + uint32_t zero = 0; + __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST); +} + +#endif // jit_arm64_AtomicOperations_arm64_h diff --git a/js/src/jit/arm64/Bailouts-arm64.cpp b/js/src/jit/arm64/Bailouts-arm64.cpp new file mode 100644 index 000000000..af764e8f7 --- /dev/null +++ b/js/src/jit/arm64/Bailouts-arm64.cpp @@ -0,0 +1,67 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/Bailouts.h" + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +class BailoutStack +{ + RegisterDump::FPUArray fpregs_; + RegisterDump::GPRArray regs_; + uintptr_t frameSize_; + uintptr_t snapshotOffset_; + + public: + MachineState machineState() { + return MachineState::FromBailout(regs_, fpregs_); + } + uint32_t snapshotOffset() const { + return snapshotOffset_; + } + uint32_t frameSize() const { + return frameSize_; + } + uint8_t* parentStackPointer() { + return (uint8_t*)this + sizeof(BailoutStack); + } +}; + +} // namespace jit +} // namespace js + +BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, + BailoutStack* bailout) + : machine_(bailout->machineState()) +{ + uint8_t* sp = bailout->parentStackPointer(); + framePointer_ = sp + bailout->frameSize(); + topFrameSize_ = framePointer_ - sp; + + JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken()); + topIonScript_ = script->ionScript(); + + attachOnJitActivation(activations); + snapshotOffset_ = bailout->snapshotOffset(); +} + +BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, + InvalidationBailoutStack* bailout) + : machine_(bailout->machine()) +{ + framePointer_ = (uint8_t*) bailout->fp(); + topFrameSize_ = framePointer_ - bailout->sp(); + topIonScript_ = bailout->ionScript(); + attachOnJitActivation(activations); + + uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress(); + const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_); + snapshotOffset_ = osiIndex->snapshotOffset(); +} diff --git a/js/src/jit/arm64/BaselineCompiler-arm64.h b/js/src/jit/arm64/BaselineCompiler-arm64.h new file mode 100644 index 000000000..946099ff1 --- /dev/null +++ b/js/src/jit/arm64/BaselineCompiler-arm64.h @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_BaselineCompiler_arm64_h +#define jit_arm64_BaselineCompiler_arm64_h + +#include "jit/shared/BaselineCompiler-shared.h" + +namespace js { +namespace jit { + +class BaselineCompilerARM64 : public BaselineCompilerShared +{ + protected: + BaselineCompilerARM64(JSContext* cx, TempAllocator& alloc, JSScript* script) + : BaselineCompilerShared(cx, alloc, script) + { } +}; + +typedef BaselineCompilerARM64 BaselineCompilerSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_BaselineCompiler_arm64_h */ diff --git a/js/src/jit/arm64/BaselineIC-arm64.cpp b/js/src/jit/arm64/BaselineIC-arm64.cpp new file mode 100644 index 000000000..54ac47d5b --- /dev/null +++ b/js/src/jit/arm64/BaselineIC-arm64.cpp @@ -0,0 +1,75 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/SharedIC.h" +#include "jit/SharedICHelpers.h" + +#ifdef JS_SIMULATOR_ARM64 +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/BaselineCompiler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#endif + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +// ICCompare_Int32 + +bool +ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Compare payload regs of R0 and R1. + Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); + masm.cmp32(R0.valueReg(), R1.valueReg()); + masm.Cset(ARMRegister(R0.valueReg(), 32), cond); + + // Result is implicitly boxed already. + masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); + EmitReturnFromIC(masm); + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +bool +ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure, isNaN; + masm.ensureDouble(R0, FloatReg0, &failure); + masm.ensureDouble(R1, FloatReg1, &failure); + + Register dest = R0.valueReg(); + + Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op); + Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond); + + masm.compareDouble(doubleCond, FloatReg0, FloatReg1); + masm.Cset(ARMRegister(dest, 32), cond); + + masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0); + EmitReturnFromIC(masm); + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp new file mode 100644 index 000000000..83330a262 --- /dev/null +++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp @@ -0,0 +1,783 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/CodeGenerator-arm64.h" + +#include "mozilla/MathAlgorithms.h" + +#include "jscntxt.h" +#include "jscompartment.h" +#include "jsnum.h" + +#include "jit/CodeGenerator.h" +#include "jit/JitCompartment.h" +#include "jit/JitFrames.h" +#include "jit/MIR.h" +#include "jit/MIRGraph.h" +#include "vm/Shape.h" +#include "vm/TraceLogging.h" + +#include "jsscriptinlines.h" + +#include "jit/shared/CodeGenerator-shared-inl.h" + +using namespace js; +using namespace js::jit; + +using mozilla::FloorLog2; +using mozilla::NegativeInfinity; +using JS::GenericNaN; + +// shared +CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) + : CodeGeneratorShared(gen, graph, masm) +{ +} + +bool +CodeGeneratorARM64::generateOutOfLineCode() +{ + MOZ_CRASH("generateOutOfLineCode"); +} + +void +CodeGeneratorARM64::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse) +{ + MOZ_CRASH("emitBranch"); +} + +void +OutOfLineBailout::accept(CodeGeneratorARM64* codegen) +{ + MOZ_CRASH("accept"); +} + +void +CodeGeneratorARM64::visitTestIAndBranch(LTestIAndBranch* test) +{ + MOZ_CRASH("visitTestIAndBranch"); +} + +void +CodeGeneratorARM64::visitCompare(LCompare* comp) +{ + MOZ_CRASH("visitCompare"); +} + +void +CodeGeneratorARM64::visitCompareAndBranch(LCompareAndBranch* comp) +{ + MOZ_CRASH("visitCompareAndBranch"); +} + +void +CodeGeneratorARM64::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot) +{ + MOZ_CRASH("bailoutIf"); +} + +void +CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) +{ + MOZ_CRASH("bailoutFrom"); +} + +void +CodeGeneratorARM64::bailout(LSnapshot* snapshot) +{ + MOZ_CRASH("bailout"); +} + +void +CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool) +{ + MOZ_CRASH("visitOutOfLineBailout"); +} + +void +CodeGeneratorARM64::visitMinMaxD(LMinMaxD* ins) +{ + MOZ_CRASH("visitMinMaxD"); +} + +void +CodeGeneratorARM64::visitMinMaxF(LMinMaxF* ins) +{ + MOZ_CRASH("visitMinMaxF"); +} + +void +CodeGeneratorARM64::visitAbsD(LAbsD* ins) +{ + MOZ_CRASH("visitAbsD"); +} + +void +CodeGeneratorARM64::visitAbsF(LAbsF* ins) +{ + MOZ_CRASH("visitAbsF"); +} + +void +CodeGeneratorARM64::visitSqrtD(LSqrtD* ins) +{ + MOZ_CRASH("visitSqrtD"); +} + +void +CodeGeneratorARM64::visitSqrtF(LSqrtF* ins) +{ + MOZ_CRASH("visitSqrtF"); +} + +// FIXME: Uh, is this a static function? It looks like it is... +template <typename T> +ARMRegister +toWRegister(const T* a) +{ + return ARMRegister(ToRegister(a), 32); +} + +// FIXME: Uh, is this a static function? It looks like it is... +template <typename T> +ARMRegister +toXRegister(const T* a) +{ + return ARMRegister(ToRegister(a), 64); +} + +js::jit::Operand +toWOperand(const LAllocation* a) +{ + MOZ_CRASH("toWOperand"); +} + +vixl::CPURegister +ToCPURegister(const LAllocation* a, Scalar::Type type) +{ + MOZ_CRASH("ToCPURegister"); +} + +vixl::CPURegister +ToCPURegister(const LDefinition* d, Scalar::Type type) +{ + return ToCPURegister(d->output(), type); +} + +void +CodeGeneratorARM64::visitAddI(LAddI* ins) +{ + MOZ_CRASH("visitAddI"); +} + +void +CodeGeneratorARM64::visitSubI(LSubI* ins) +{ + MOZ_CRASH("visitSubI"); +} + +void +CodeGeneratorARM64::visitMulI(LMulI* ins) +{ + MOZ_CRASH("visitMulI"); +} + + +void +CodeGeneratorARM64::visitDivI(LDivI* ins) +{ + MOZ_CRASH("visitDivI"); +} + +void +CodeGeneratorARM64::visitDivPowTwoI(LDivPowTwoI* ins) +{ + MOZ_CRASH("CodeGeneratorARM64::visitDivPowTwoI"); +} + +void +CodeGeneratorARM64::modICommon(MMod* mir, Register lhs, Register rhs, Register output, + LSnapshot* snapshot, Label& done) +{ + MOZ_CRASH("CodeGeneratorARM64::modICommon"); +} + +void +CodeGeneratorARM64::visitModI(LModI* ins) +{ + MOZ_CRASH("visitModI"); +} + +void +CodeGeneratorARM64::visitModPowTwoI(LModPowTwoI* ins) +{ + MOZ_CRASH("visitModPowTwoI"); +} + +void +CodeGeneratorARM64::visitModMaskI(LModMaskI* ins) +{ + MOZ_CRASH("CodeGeneratorARM64::visitModMaskI"); +} + +void +CodeGeneratorARM64::visitBitNotI(LBitNotI* ins) +{ + MOZ_CRASH("visitBitNotI"); +} + +void +CodeGeneratorARM64::visitBitOpI(LBitOpI* ins) +{ + MOZ_CRASH("visitBitOpI"); +} + +void +CodeGeneratorARM64::visitShiftI(LShiftI* ins) +{ + MOZ_CRASH("visitShiftI"); +} + +void +CodeGeneratorARM64::visitUrshD(LUrshD* ins) +{ + MOZ_CRASH("visitUrshD"); +} + +void +CodeGeneratorARM64::visitPowHalfD(LPowHalfD* ins) +{ + MOZ_CRASH("visitPowHalfD"); +} + +MoveOperand +CodeGeneratorARM64::toMoveOperand(const LAllocation a) const +{ + MOZ_CRASH("toMoveOperand"); +} + +class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM64> +{ + MTableSwitch* mir_; + Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_; + + void accept(CodeGeneratorARM64* codegen) { + codegen->visitOutOfLineTableSwitch(this); + } + + public: + OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir) + : mir_(mir), + codeLabels_(alloc) + { } + + MTableSwitch* mir() const { + return mir_; + } + + bool addCodeLabel(CodeLabel label) { + return codeLabels_.append(label); + } + CodeLabel codeLabel(unsigned i) { + return codeLabels_[i]; + } +}; + +void +CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) +{ + MOZ_CRASH("visitOutOfLineTableSwitch"); +} + +void +CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir, Register index_, Register base_) +{ + MOZ_CRASH("emitTableSwitchDispatch"); +} + +void +CodeGeneratorARM64::visitMathD(LMathD* math) +{ + MOZ_CRASH("visitMathD"); +} + +void +CodeGeneratorARM64::visitMathF(LMathF* math) +{ + MOZ_CRASH("visitMathF"); +} + +void +CodeGeneratorARM64::visitFloor(LFloor* lir) +{ + MOZ_CRASH("visitFloor"); +} + +void +CodeGeneratorARM64::visitFloorF(LFloorF* lir) +{ + MOZ_CRASH("visitFloorF"); +} + +void +CodeGeneratorARM64::visitCeil(LCeil* lir) +{ + MOZ_CRASH("visitCeil"); +} + +void +CodeGeneratorARM64::visitCeilF(LCeilF* lir) +{ + MOZ_CRASH("visitCeilF"); +} + +void +CodeGeneratorARM64::visitRound(LRound* lir) +{ + MOZ_CRASH("visitRound"); +} + +void +CodeGeneratorARM64::visitRoundF(LRoundF* lir) +{ + MOZ_CRASH("visitRoundF"); +} + +void +CodeGeneratorARM64::visitClzI(LClzI* lir) +{ + MOZ_CRASH("visitClzI"); +} + +void +CodeGeneratorARM64::visitCtzI(LCtzI* lir) +{ + MOZ_CRASH("visitCtzI"); +} + +void +CodeGeneratorARM64::emitRoundDouble(FloatRegister src, Register dest, Label* fail) +{ + MOZ_CRASH("CodeGeneratorARM64::emitRoundDouble"); +} + +void +CodeGeneratorARM64::visitTruncateDToInt32(LTruncateDToInt32* ins) +{ + MOZ_CRASH("visitTruncateDToInt32"); +} + +void +CodeGeneratorARM64::visitTruncateFToInt32(LTruncateFToInt32* ins) +{ + MOZ_CRASH("visitTruncateFToInt32"); +} + +static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 }; + +FrameSizeClass +FrameSizeClass::FromDepth(uint32_t frameDepth) +{ + return FrameSizeClass::None(); +} + +FrameSizeClass +FrameSizeClass::ClassLimit() +{ + return FrameSizeClass(0); +} + +uint32_t +FrameSizeClass::frameSize() const +{ + MOZ_CRASH("arm64 does not use frame size classes"); +} + +ValueOperand +CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos) +{ + return ValueOperand(ToRegister(ins->getOperand(pos))); +} + +ValueOperand +CodeGeneratorARM64::ToOutValue(LInstruction* ins) +{ + Register payloadReg = ToRegister(ins->getDef(0)); + return ValueOperand(payloadReg); +} + +ValueOperand +CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos) +{ + MOZ_CRASH("CodeGeneratorARM64::ToTempValue"); +} + +void +CodeGeneratorARM64::visitValue(LValue* value) +{ + MOZ_CRASH("visitValue"); +} + +void +CodeGeneratorARM64::visitBox(LBox* box) +{ + MOZ_CRASH("visitBox"); +} + +void +CodeGeneratorARM64::visitUnbox(LUnbox* unbox) +{ + MOZ_CRASH("visitUnbox"); +} + +void +CodeGeneratorARM64::visitDouble(LDouble* ins) +{ + MOZ_CRASH("visitDouble"); +} + +void +CodeGeneratorARM64::visitFloat32(LFloat32* ins) +{ + MOZ_CRASH("visitFloat32"); +} + +Register +CodeGeneratorARM64::splitTagForTest(const ValueOperand& value) +{ + MOZ_CRASH("splitTagForTest"); +} + +void +CodeGeneratorARM64::visitTestDAndBranch(LTestDAndBranch* test) +{ + MOZ_CRASH("visitTestDAndBranch"); +} + +void +CodeGeneratorARM64::visitTestFAndBranch(LTestFAndBranch* test) +{ + MOZ_CRASH("visitTestFAndBranch"); +} + +void +CodeGeneratorARM64::visitCompareD(LCompareD* comp) +{ + MOZ_CRASH("visitCompareD"); +} + +void +CodeGeneratorARM64::visitCompareF(LCompareF* comp) +{ + MOZ_CRASH("visitCompareF"); +} + +void +CodeGeneratorARM64::visitCompareDAndBranch(LCompareDAndBranch* comp) +{ + MOZ_CRASH("visitCompareDAndBranch"); +} + +void +CodeGeneratorARM64::visitCompareFAndBranch(LCompareFAndBranch* comp) +{ + MOZ_CRASH("visitCompareFAndBranch"); +} + +void +CodeGeneratorARM64::visitCompareB(LCompareB* lir) +{ + MOZ_CRASH("visitCompareB"); +} + +void +CodeGeneratorARM64::visitCompareBAndBranch(LCompareBAndBranch* lir) +{ + MOZ_CRASH("visitCompareBAndBranch"); +} + +void +CodeGeneratorARM64::visitCompareBitwise(LCompareBitwise* lir) +{ + MOZ_CRASH("visitCompareBitwise"); +} + +void +CodeGeneratorARM64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir) +{ + MOZ_CRASH("visitCompareBitwiseAndBranch"); +} + +void +CodeGeneratorARM64::visitBitAndAndBranch(LBitAndAndBranch* baab) +{ + MOZ_CRASH("visitBitAndAndBranch"); +} + +void +CodeGeneratorARM64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) +{ + MOZ_CRASH("visitWasmUint32ToDouble"); +} + +void +CodeGeneratorARM64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) +{ + MOZ_CRASH("visitWasmUint32ToFloat32"); +} + +void +CodeGeneratorARM64::visitNotI(LNotI* ins) +{ + MOZ_CRASH("visitNotI"); +} + +// NZCV +// NAN -> 0011 +// == -> 0110 +// < -> 1000 +// > -> 0010 +void +CodeGeneratorARM64::visitNotD(LNotD* ins) +{ + MOZ_CRASH("visitNotD"); +} + +void +CodeGeneratorARM64::visitNotF(LNotF* ins) +{ + MOZ_CRASH("visitNotF"); +} + +void +CodeGeneratorARM64::visitLoadSlotV(LLoadSlotV* load) +{ + MOZ_CRASH("CodeGeneratorARM64::visitLoadSlotV"); +} + +void +CodeGeneratorARM64::visitLoadSlotT(LLoadSlotT* load) +{ + MOZ_CRASH("CodeGeneratorARM64::visitLoadSlotT"); +} + +void +CodeGeneratorARM64::visitStoreSlotT(LStoreSlotT* store) +{ + MOZ_CRASH("CodeGeneratorARM64::visitStoreSlotT"); +} + +void +CodeGeneratorARM64::visitLoadElementT(LLoadElementT* load) +{ + MOZ_CRASH("CodeGeneratorARM64::visitLoadElementT"); +} + +void +CodeGeneratorARM64::storeElementTyped(const LAllocation* value, MIRType valueType, + MIRType elementType, Register elements, + const LAllocation* index) +{ + MOZ_CRASH("CodeGeneratorARM64::storeElementTyped"); +} + +void +CodeGeneratorARM64::visitGuardShape(LGuardShape* guard) +{ + MOZ_CRASH("visitGuardShape"); +} + +void +CodeGeneratorARM64::visitGuardObjectGroup(LGuardObjectGroup* guard) +{ + MOZ_CRASH("visitGuardObjectGroup"); +} + +void +CodeGeneratorARM64::visitGuardClass(LGuardClass* guard) +{ + MOZ_CRASH("CodeGeneratorARM64::visitGuardClass"); +} + +void +CodeGeneratorARM64::visitInterruptCheck(LInterruptCheck* lir) +{ + MOZ_CRASH("CodeGeneratorARM64::visitInterruptCheck"); +} + +void +CodeGeneratorARM64::generateInvalidateEpilogue() +{ + MOZ_CRASH("generateInvalidateEpilogue"); +} + +template <class U> +Register +getBase(U* mir) +{ + switch (mir->base()) { + case U::Heap: return HeapReg; + case U::Global: return GlobalReg; + } + return InvalidReg; +} + +void +CodeGeneratorARM64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins) +{ + MOZ_CRASH("CodeGeneratorARM64::visitLoadTypedArrayElementStatic"); +} + +void +CodeGeneratorARM64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins) +{ + MOZ_CRASH("CodeGeneratorARM64::visitStoreTypedArrayElementStatic"); +} + +void +CodeGeneratorARM64::visitWasmCall(LWasmCall* ins) +{ + MOZ_CRASH("vistWasmCall"); +} + +void +CodeGeneratorARM64::visitWasmCallI64(LWasmCallI64* ins) +{ + MOZ_CRASH("vistWasmCallI64"); +} + +void +CodeGeneratorARM64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) +{ + MOZ_CRASH("visitAsmJSLoadHeap"); +} + +void +CodeGeneratorARM64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) +{ + MOZ_CRASH("visitAsmJSStoreHeap"); +} + +void +CodeGeneratorARM64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) +{ + MOZ_CRASH("visitAsmJSCompareExchangeHeap"); +} + +void +CodeGeneratorARM64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) +{ + MOZ_CRASH("visitAsmJSAtomicBinopHeap"); +} + +void +CodeGeneratorARM64::visitWasmStackArg(LWasmStackArg* ins) +{ + MOZ_CRASH("visitWasmStackArg"); +} + +void +CodeGeneratorARM64::visitUDiv(LUDiv* ins) +{ + MOZ_CRASH("visitUDiv"); +} + +void +CodeGeneratorARM64::visitUMod(LUMod* ins) +{ + MOZ_CRASH("visitUMod"); +} + +void +CodeGeneratorARM64::visitEffectiveAddress(LEffectiveAddress* ins) +{ + MOZ_CRASH("visitEffectiveAddress"); +} + +void +CodeGeneratorARM64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins) +{ + MOZ_CRASH("visitWasmLoadGlobalVar"); +} + +void +CodeGeneratorARM64::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins) +{ + MOZ_CRASH("visitWasmStoreGlobalVar"); +} + +void +CodeGeneratorARM64::visitNegI(LNegI* ins) +{ + MOZ_CRASH("visitNegI"); +} + +void +CodeGeneratorARM64::visitNegD(LNegD* ins) +{ + MOZ_CRASH("visitNegD"); +} + +void +CodeGeneratorARM64::visitNegF(LNegF* ins) +{ + MOZ_CRASH("visitNegF"); +} + +void +CodeGeneratorARM64::setReturnDoubleRegs(LiveRegisterSet* regs) +{ + MOZ_ASSERT(ReturnFloat32Reg.code_ == FloatRegisters::s0); + MOZ_ASSERT(ReturnDoubleReg.code_ == FloatRegisters::d0); + FloatRegister s1 = {FloatRegisters::s1, FloatRegisters::Single}; + regs->add(ReturnFloat32Reg); + regs->add(s1); + regs->add(ReturnDoubleReg); +} + +void +CodeGeneratorARM64::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir) +{ + Register elements = ToRegister(lir->elements()); + AnyRegister output = ToAnyRegister(lir->output()); + Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp()); + + Register oldval = ToRegister(lir->oldval()); + Register newval = ToRegister(lir->newval()); + + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address dest(elements, ToInt32(lir->index()) * width); + masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output); + } else { + BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output); + } +} + +void +CodeGeneratorARM64::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir) +{ + Register elements = ToRegister(lir->elements()); + AnyRegister output = ToAnyRegister(lir->output()); + Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp()); + + Register value = ToRegister(lir->value()); + + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address dest(elements, ToInt32(lir->index()) * width); + masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output); + } else { + BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output); + } +} + diff --git a/js/src/jit/arm64/CodeGenerator-arm64.h b/js/src/jit/arm64/CodeGenerator-arm64.h new file mode 100644 index 000000000..63199d1fd --- /dev/null +++ b/js/src/jit/arm64/CodeGenerator-arm64.h @@ -0,0 +1,262 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_CodeGenerator_arm64_h +#define jit_arm64_CodeGenerator_arm64_h + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/shared/CodeGenerator-shared.h" + +namespace js { +namespace jit { + +class OutOfLineBailout; +class OutOfLineTableSwitch; + +class CodeGeneratorARM64 : public CodeGeneratorShared +{ + friend class MoveResolverARM64; + + CodeGeneratorARM64* thisFromCtor() { return this; } + + public: + CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm); + + protected: + NonAssertingLabel deoptLabel_; + + MoveOperand toMoveOperand(const LAllocation a) const; + + void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot); + void bailoutFrom(Label* label, LSnapshot* snapshot); + void bailout(LSnapshot* snapshot); + + template <typename T1, typename T2> + void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { + masm.cmpPtr(lhs, rhs); + return bailoutIf(c, snapshot); + } + void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) { + masm.testPtr(lhs, rhs); + return bailoutIf(c, snapshot); + } + template <typename T1, typename T2> + void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { + masm.cmp32(lhs, rhs); + return bailoutIf(c, snapshot); + } + template <typename T1, typename T2> + void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { + masm.test32(lhs, rhs); + return bailoutIf(c, snapshot); + } + void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) { + masm.test32(reg, Imm32(0xFF)); + return bailoutIf(Assembler::Zero, snapshot); + } + + protected: + bool generateOutOfLineCode(); + + void emitRoundDouble(FloatRegister src, Register dest, Label* fail); + + // Emits a branch that directs control flow to the true block if |cond| is + // true, and the false block if |cond| is false. + void emitBranch(Assembler::Condition cond, MBasicBlock* ifTrue, MBasicBlock* ifFalse); + + void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + cond = masm.testNull(cond, value); + emitBranch(cond, ifTrue, ifFalse); + } + void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + cond = masm.testUndefined(cond, value); + emitBranch(cond, ifTrue, ifFalse); + } + void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + cond = masm.testObject(cond, value); + emitBranch(cond, ifTrue, ifFalse); + } + void testZeroEmitBranch(Assembler::Condition cond, Register reg, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + masm.cmpPtr(reg, ImmWord(0)); + emitBranch(cond, ifTrue, ifFalse); + } + + void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base); + + public: + // Instruction visitors. + virtual void visitMinMaxD(LMinMaxD* ins); + virtual void visitMinMaxF(LMinMaxF* math); + virtual void visitAbsD(LAbsD* ins); + virtual void visitAbsF(LAbsF* ins); + virtual void visitSqrtD(LSqrtD* ins); + virtual void visitSqrtF(LSqrtF* ins); + virtual void visitAddI(LAddI* ins); + virtual void visitSubI(LSubI* ins); + virtual void visitBitNotI(LBitNotI* ins); + virtual void visitBitOpI(LBitOpI* ins); + + virtual void visitMulI(LMulI* ins); + + virtual void visitDivI(LDivI* ins); + virtual void visitDivPowTwoI(LDivPowTwoI* ins); + virtual void visitModI(LModI* ins); + virtual void visitModPowTwoI(LModPowTwoI* ins); + virtual void visitModMaskI(LModMaskI* ins); + virtual void visitPowHalfD(LPowHalfD* ins); + virtual void visitShiftI(LShiftI* ins); + virtual void visitUrshD(LUrshD* ins); + + virtual void visitTestIAndBranch(LTestIAndBranch* test); + virtual void visitCompare(LCompare* comp); + virtual void visitCompareAndBranch(LCompareAndBranch* comp); + virtual void visitTestDAndBranch(LTestDAndBranch* test); + virtual void visitTestFAndBranch(LTestFAndBranch* test); + virtual void visitCompareD(LCompareD* comp); + virtual void visitCompareF(LCompareF* comp); + virtual void visitCompareDAndBranch(LCompareDAndBranch* comp); + virtual void visitCompareFAndBranch(LCompareFAndBranch* comp); + virtual void visitCompareB(LCompareB* lir); + virtual void visitCompareBAndBranch(LCompareBAndBranch* lir); + virtual void visitCompareBitwise(LCompareBitwise* lir); + virtual void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir); + virtual void visitBitAndAndBranch(LBitAndAndBranch* baab); + virtual void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir); + virtual void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir); + virtual void visitNotI(LNotI* ins); + virtual void visitNotD(LNotD* ins); + virtual void visitNotF(LNotF* ins); + + virtual void visitMathD(LMathD* math); + virtual void visitMathF(LMathF* math); + virtual void visitFloor(LFloor* lir); + virtual void visitFloorF(LFloorF* lir); + virtual void visitCeil(LCeil* lir); + virtual void visitCeilF(LCeilF* lir); + virtual void visitRound(LRound* lir); + virtual void visitRoundF(LRoundF* lir); + virtual void visitTruncateDToInt32(LTruncateDToInt32* ins); + virtual void visitTruncateFToInt32(LTruncateFToInt32* ins); + + virtual void visitClzI(LClzI* lir); + virtual void visitCtzI(LCtzI* lir); + // Out of line visitors. + void visitOutOfLineBailout(OutOfLineBailout* ool); + void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool); + + protected: + ValueOperand ToValue(LInstruction* ins, size_t pos); + ValueOperand ToOutValue(LInstruction* ins); + ValueOperand ToTempValue(LInstruction* ins, size_t pos); + + // Functions for LTestVAndBranch. + Register splitTagForTest(const ValueOperand& value); + + void storeElementTyped(const LAllocation* value, MIRType valueType, MIRType elementType, + Register elements, const LAllocation* index); + + void divICommon(MDiv* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot, + Label& done); + void modICommon(MMod* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot, + Label& done); + + public: + void visitBox(LBox* box); + void visitUnbox(LUnbox* unbox); + void visitValue(LValue* value); + void visitDouble(LDouble* ins); + void visitFloat32(LFloat32* ins); + + void visitLoadSlotV(LLoadSlotV* load); + void visitLoadSlotT(LLoadSlotT* load); + void visitStoreSlotT(LStoreSlotT* load); + + void visitLoadElementT(LLoadElementT* load); + + void visitGuardShape(LGuardShape* guard); + void visitGuardObjectGroup(LGuardObjectGroup* guard); + void visitGuardClass(LGuardClass* guard); + + void visitInterruptCheck(LInterruptCheck* lir); + + void visitNegI(LNegI* lir); + void visitNegD(LNegD* lir); + void visitNegF(LNegF* lir); + void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins); + void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins); + void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir); + void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir); + void visitWasmCall(LWasmCall* ins); + void visitWasmCallI64(LWasmCallI64* ins); + void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins); + void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins); + void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins); + void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins); + void visitWasmStackArg(LWasmStackArg* ins); + + void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins); + void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins); + + void generateInvalidateEpilogue(); + + void setReturnDoubleRegs(LiveRegisterSet* regs); + + protected: + void postWasmCall(LWasmCall* lir) { + MOZ_CRASH("postWasmCall"); + } + + void visitEffectiveAddress(LEffectiveAddress* ins); + void visitUDiv(LUDiv* ins); + void visitUMod(LUMod* ins); + + public: + // Unimplemented SIMD instructions. + void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); } + void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); } + void visitSimd128Float(LSimd128Float* ins) { MOZ_CRASH("NYI"); } + void visitSimdExtractElementI(LSimdExtractElementI* ins) { MOZ_CRASH("NYI"); } + void visitSimdExtractElementF(LSimdExtractElementF* ins) { MOZ_CRASH("NYI"); } + void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir) { MOZ_CRASH("NYI"); } + void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir) { MOZ_CRASH("NYI"); } + void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir) { MOZ_CRASH("NYI"); } + void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir) { MOZ_CRASH("NYI"); } + void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir) { MOZ_CRASH("NYI"); } +}; + +typedef CodeGeneratorARM64 CodeGeneratorSpecific; + +// An out-of-line bailout thunk. +class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM64> +{ + protected: // Silence Clang warning. + LSnapshot* snapshot_; + + public: + OutOfLineBailout(LSnapshot* snapshot) + : snapshot_(snapshot) + { } + + void accept(CodeGeneratorARM64* codegen); + + LSnapshot* snapshot() const { + return snapshot_; + } +}; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_CodeGenerator_arm64_h */ diff --git a/js/src/jit/arm64/LIR-arm64.h b/js/src/jit/arm64/LIR-arm64.h new file mode 100644 index 000000000..7cfb784c0 --- /dev/null +++ b/js/src/jit/arm64/LIR-arm64.h @@ -0,0 +1,395 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_LIR_arm64_h +#define jit_arm64_LIR_arm64_h + +namespace js { +namespace jit { + +class LUnboxBase : public LInstructionHelper<1, 1, 0> +{ + public: + LUnboxBase(const LAllocation& input) { + setOperand(0, input); + } + + static const size_t Input = 0; + + MUnbox* mir() const { + return mir_->toUnbox(); + } +}; + +class LUnbox : public LUnboxBase +{ + public: + LIR_HEADER(Unbox); + + LUnbox(const LAllocation& input) + : LUnboxBase(input) + { } + + const char* extraName() const { + return StringFromMIRType(mir()->type()); + } +}; + +class LUnboxFloatingPoint : public LUnboxBase +{ + MIRType type_; + + public: + LIR_HEADER(UnboxFloatingPoint); + + LUnboxFloatingPoint(const LAllocation& input, MIRType type) + : LUnboxBase(input), + type_(type) + { } + + MIRType type() const { + return type_; + } + const char* extraName() const { + return StringFromMIRType(type_); + } +}; + +// Convert a 32-bit unsigned integer to a double. +class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(WasmUint32ToDouble) + + LWasmUint32ToDouble(const LAllocation& input) { + setOperand(0, input); + } +}; + +// Convert a 32-bit unsigned integer to a float32. +class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(WasmUint32ToFloat32) + + LWasmUint32ToFloat32(const LAllocation& input) { + setOperand(0, input); + } +}; + +class LDivI : public LBinaryMath<1> +{ + public: + LIR_HEADER(DivI); + + LDivI(const LAllocation& lhs, const LAllocation& rhs, + const LDefinition& temp) + { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp); + } + + MDiv* mir() const { + return mir_->toDiv(); + } +}; + +// LSoftDivI is a software divide for ARM cores that don't support a hardware +// divide instruction. +// +// It is implemented as a proper C function so it trashes r0, r1, r2 and r3. +// The call also trashes lr, and has the ability to trash ip. The function also +// takes two arguments (dividend in r0, divisor in r1). The LInstruction gets +// encoded such that the divisor and dividend are passed in their apropriate +// registers and end their life at the start of the instruction by the use of +// useFixedAtStart. The result is returned in r0 and the other three registers +// that can be trashed are marked as temps. For the time being, the link +// register is not marked as trashed because we never allocate to the link +// register. The FP registers are not trashed. +class LSoftDivI : public LBinaryMath<3> +{ + public: + LIR_HEADER(SoftDivI); + + LSoftDivI(const LAllocation& lhs, const LAllocation& rhs, + const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3) { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp1); + setTemp(1, temp2); + setTemp(2, temp3); + } + + MDiv* mir() const { + return mir_->toDiv(); + } +}; + +class LDivPowTwoI : public LInstructionHelper<1, 1, 0> +{ + const int32_t shift_; + + public: + LIR_HEADER(DivPowTwoI) + + LDivPowTwoI(const LAllocation& lhs, int32_t shift) + : shift_(shift) + { + setOperand(0, lhs); + } + + const LAllocation* numerator() { + return getOperand(0); + } + + int32_t shift() { + return shift_; + } + + MDiv* mir() const { + return mir_->toDiv(); + } +}; + +class LModI : public LBinaryMath<1> +{ + public: + LIR_HEADER(ModI); + + LModI(const LAllocation& lhs, const LAllocation& rhs, + const LDefinition& callTemp) + { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, callTemp); + } + + const LDefinition* callTemp() { + return getTemp(0); + } + + MMod* mir() const { + return mir_->toMod(); + } +}; + +class LSoftModI : public LBinaryMath<4> +{ + public: + LIR_HEADER(SoftModI); + + LSoftModI(const LAllocation& lhs, const LAllocation& rhs, + const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3, + const LDefinition& callTemp) + { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp1); + setTemp(1, temp2); + setTemp(2, temp3); + setTemp(3, callTemp); + } + + const LDefinition* callTemp() { + return getTemp(3); + } + + MMod* mir() const { + return mir_->toMod(); + } +}; + +class LModPowTwoI : public LInstructionHelper<1, 1, 0> +{ + const int32_t shift_; + + public: + LIR_HEADER(ModPowTwoI); + int32_t shift() + { + return shift_; + } + + LModPowTwoI(const LAllocation& lhs, int32_t shift) + : shift_(shift) + { + setOperand(0, lhs); + } + + MMod* mir() const { + return mir_->toMod(); + } +}; + +class LModMaskI : public LInstructionHelper<1, 1, 1> +{ + const int32_t shift_; + + public: + LIR_HEADER(ModMaskI); + + LModMaskI(const LAllocation& lhs, const LDefinition& temp1, int32_t shift) + : shift_(shift) + { + setOperand(0, lhs); + setTemp(0, temp1); + } + + int32_t shift() const { + return shift_; + } + + MMod* mir() const { + return mir_->toMod(); + } +}; + +// Takes a tableswitch with an integer to decide +class LTableSwitch : public LInstructionHelper<0, 1, 1> +{ + public: + LIR_HEADER(TableSwitch); + + LTableSwitch(const LAllocation& in, const LDefinition& inputCopy, MTableSwitch* ins) { + setOperand(0, in); + setTemp(0, inputCopy); + setMir(ins); + } + + MTableSwitch* mir() const { + return mir_->toTableSwitch(); + } + + const LAllocation* index() { + return getOperand(0); + } + const LDefinition* tempInt() { + return getTemp(0); + } + // This is added to share the same CodeGenerator prefixes. + const LDefinition* tempPointer() { + return nullptr; + } +}; + +// Takes a tableswitch with an integer to decide +class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2> +{ + public: + LIR_HEADER(TableSwitchV); + + LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy, + const LDefinition& floatCopy, MTableSwitch* ins) + { + setBoxOperand(InputValue, input); + setTemp(0, inputCopy); + setTemp(1, floatCopy); + setMir(ins); + } + + MTableSwitch* mir() const { + return mir_->toTableSwitch(); + } + + static const size_t InputValue = 0; + + const LDefinition* tempInt() { + return getTemp(0); + } + const LDefinition* tempFloat() { + return getTemp(1); + } + const LDefinition* tempPointer() { + return nullptr; + } +}; + +class LGuardShape : public LInstructionHelper<0, 1, 1> +{ + public: + LIR_HEADER(GuardShape); + + LGuardShape(const LAllocation& in, const LDefinition& temp) { + setOperand(0, in); + setTemp(0, temp); + } + const MGuardShape* mir() const { + return mir_->toGuardShape(); + } + const LDefinition* tempInt() { + return getTemp(0); + } +}; + +class LGuardObjectGroup : public LInstructionHelper<0, 1, 1> +{ + public: + LIR_HEADER(GuardObjectGroup); + + LGuardObjectGroup(const LAllocation& in, const LDefinition& temp) { + setOperand(0, in); + setTemp(0, temp); + } + const MGuardObjectGroup* mir() const { + return mir_->toGuardObjectGroup(); + } + const LDefinition* tempInt() { + return getTemp(0); + } +}; + +class LMulI : public LBinaryMath<0> +{ + public: + LIR_HEADER(MulI); + + MMul* mir() { + return mir_->toMul(); + } +}; + +class LUDiv : public LBinaryMath<0> +{ + public: + LIR_HEADER(UDiv); + + MDiv* mir() { + return mir_->toDiv(); + } +}; + +class LUMod : public LBinaryMath<0> +{ + public: + LIR_HEADER(UMod); + + MMod* mir() { + return mir_->toMod(); + } +}; + +// This class performs a simple x86 'div', yielding either a quotient or remainder depending on +// whether this instruction is defined to output eax (quotient) or edx (remainder). +class LSoftUDivOrMod : public LBinaryMath<3> +{ + public: + LIR_HEADER(SoftUDivOrMod); + + LSoftUDivOrMod(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp1, + const LDefinition& temp2, const LDefinition& temp3) { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp1); + setTemp(1, temp2); + setTemp(2, temp3); + } +}; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_LIR_arm64_h */ diff --git a/js/src/jit/arm64/LOpcodes-arm64.h b/js/src/jit/arm64/LOpcodes-arm64.h new file mode 100644 index 000000000..5d9e05e30 --- /dev/null +++ b/js/src/jit/arm64/LOpcodes-arm64.h @@ -0,0 +1,20 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_LOpcodes_arm64_h +#define jit_arm64_LOpcodes_arm64_h + +#include "jit/shared/LOpcodes-shared.h" + +#define LIR_CPU_OPCODE_LIST(_) \ + _(SoftDivI) \ + _(SoftModI) \ + _(ModMaskI) \ + _(UDiv) \ + _(UMod) \ + _(SoftUDivOrMod) + +#endif /* jit_arm64_LOpcodes_arm64_h */ diff --git a/js/src/jit/arm64/Lowering-arm64.cpp b/js/src/jit/arm64/Lowering-arm64.cpp new file mode 100644 index 000000000..ca86b450d --- /dev/null +++ b/js/src/jit/arm64/Lowering-arm64.cpp @@ -0,0 +1,369 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/MathAlgorithms.h" + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/Lowering.h" +#include "jit/MIR.h" + +#include "jit/shared/Lowering-shared-inl.h" + +using namespace js; +using namespace js::jit; + +using mozilla::FloorLog2; + +LBoxAllocation +LIRGeneratorARM64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart) +{ + MOZ_CRASH("useBoxFixed"); +} + +LAllocation +LIRGeneratorARM64::useByteOpRegister(MDefinition* mir) +{ + MOZ_CRASH("useByteOpRegister"); +} + +LAllocation +LIRGeneratorARM64::useByteOpRegisterAtStart(MDefinition* mir) +{ + MOZ_CRASH("useByteOpRegister"); +} + +LAllocation +LIRGeneratorARM64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir) +{ + MOZ_CRASH("useByteOpRegisterOrNonDoubleConstant"); +} + +void +LIRGeneratorARM64::visitBox(MBox* box) +{ + MOZ_CRASH("visitBox"); +} + +void +LIRGeneratorARM64::visitUnbox(MUnbox* unbox) +{ + MOZ_CRASH("visitUnbox"); +} + +void +LIRGeneratorARM64::visitReturn(MReturn* ret) +{ + MOZ_CRASH("visitReturn"); +} + +// x = !y +void +LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input) +{ + MOZ_CRASH("lowerForALU"); +} + +// z = x+y +void +LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("lowerForALU"); +} + +void +LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input) +{ + MOZ_CRASH("lowerForFPU"); +} + +template <size_t Temps> +void +LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("lowerForFPU"); +} + +template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); +template void LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); + +void +LIRGeneratorARM64::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins, + MDefinition* mir, MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("NYI"); +} + +template<size_t Temps> +void +LIRGeneratorARM64::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins, + MDefinition* mir, MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("NYI"); +} + +template void LIRGeneratorARM64::lowerForShiftInt64( + LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); +template void LIRGeneratorARM64::lowerForShiftInt64( + LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); + +void +LIRGeneratorARM64::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir, + MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("lowerForBitAndAndBranch"); +} + +void +LIRGeneratorARM64::defineUntypedPhi(MPhi* phi, size_t lirIndex) +{ + MOZ_CRASH("defineUntypedPhi"); +} + +void +LIRGeneratorARM64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, + LBlock* block, size_t lirIndex) +{ + MOZ_CRASH("lowerUntypedPhiInput"); +} + +void +LIRGeneratorARM64::lowerForShift(LInstructionHelper<1, 2, 0>* ins, + MDefinition* mir, MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("lowerForShift"); +} + +void +LIRGeneratorARM64::lowerDivI(MDiv* div) +{ + MOZ_CRASH("lowerDivI"); +} + +void +LIRGeneratorARM64::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs) +{ + MOZ_CRASH("lowerMulI"); +} + +void +LIRGeneratorARM64::lowerModI(MMod* mod) +{ + MOZ_CRASH("lowerModI"); +} + +void +LIRGeneratorARM64::lowerDivI64(MDiv* div) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::lowerModI64(MMod* mod) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::visitPowHalf(MPowHalf* ins) +{ + MOZ_CRASH("visitPowHalf"); +} + +LTableSwitch* +LIRGeneratorARM64::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy, + MTableSwitch* tableswitch) +{ + MOZ_CRASH("newLTableSwitch"); +} + +LTableSwitchV* +LIRGeneratorARM64::newLTableSwitchV(MTableSwitch* tableswitch) +{ + MOZ_CRASH("newLTableSwitchV"); +} + +void +LIRGeneratorARM64::visitGuardShape(MGuardShape* ins) +{ + MOZ_CRASH("visitGuardShape"); +} + +void +LIRGeneratorARM64::visitGuardObjectGroup(MGuardObjectGroup* ins) +{ + MOZ_CRASH("visitGuardObjectGroup"); +} + +void +LIRGeneratorARM64::lowerUrshD(MUrsh* mir) +{ + MOZ_CRASH("lowerUrshD"); +} + +void +LIRGeneratorARM64::visitAsmJSNeg(MAsmJSNeg* ins) +{ + MOZ_CRASH("visitAsmJSNeg"); +} + +void +LIRGeneratorARM64::visitWasmSelect(MWasmSelect* ins) +{ + MOZ_CRASH("visitWasmSelect"); +} + +void +LIRGeneratorARM64::lowerUDiv(MDiv* div) +{ + MOZ_CRASH("lowerUDiv"); +} + +void +LIRGeneratorARM64::lowerUMod(MMod* mod) +{ + MOZ_CRASH("lowerUMod"); +} + +void +LIRGeneratorARM64::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) +{ + MOZ_CRASH("visitWasmUnsignedToDouble"); +} + +void +LIRGeneratorARM64::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) +{ + MOZ_CRASH("visitWasmUnsignedToFloat32"); +} + +void +LIRGeneratorARM64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) +{ + MOZ_CRASH("visitAsmJSLoadHeap"); +} + +void +LIRGeneratorARM64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) +{ + MOZ_CRASH("visitAsmJSStoreHeap"); +} + +void +LIRGeneratorARM64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins) +{ + MOZ_CRASH("visitAsmJSCompareExchangeHeap"); +} + +void +LIRGeneratorARM64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins) +{ + MOZ_CRASH("visitAsmJSAtomicExchangeHeap"); +} + +void +LIRGeneratorARM64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins) +{ + MOZ_CRASH("visitAsmJSAtomicBinopHeap"); +} + +void +LIRGeneratorARM64::lowerTruncateDToInt32(MTruncateToInt32* ins) +{ + MOZ_CRASH("lowerTruncateDToInt32"); +} + +void +LIRGeneratorARM64::lowerTruncateFToInt32(MTruncateToInt32* ins) +{ + MOZ_CRASH("lowerTruncateFToInt32"); +} + +void +LIRGeneratorARM64::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins) +{ + MOZ_CRASH("NYI"); +} + +void +LIRGeneratorARM64::visitSubstr(MSubstr* ins) +{ + MOZ_CRASH("visitSubstr"); +} + +void +LIRGeneratorARM64::visitRandom(MRandom* ins) +{ + LRandom *lir = new(alloc()) LRandom(temp(), + temp(), + temp()); + defineFixed(lir, ins, LFloatReg(ReturnDoubleReg)); +} + +void +LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) +{ + MOZ_CRASH("NY"); +} + +void +LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins) +{ + MOZ_CRASH("NY"); +} + +void +LIRGeneratorARM64::visitWasmStore(MWasmStore* ins) +{ + MOZ_CRASH("NY"); +} + +void +LIRGeneratorARM64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) +{ + MOZ_CRASH("NY"); +} + +void +LIRGeneratorARM64::visitCopySign(MCopySign* ins) +{ + MOZ_CRASH("NY"); +} + +void +LIRGeneratorARM64::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) +{ + MOZ_CRASH("NYI"); +} diff --git a/js/src/jit/arm64/Lowering-arm64.h b/js/src/jit/arm64/Lowering-arm64.h new file mode 100644 index 000000000..f23627d10 --- /dev/null +++ b/js/src/jit/arm64/Lowering-arm64.h @@ -0,0 +1,132 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_Lowering_arm64_h +#define jit_arm64_Lowering_arm64_h + +#include "jit/shared/Lowering-shared.h" + +namespace js { +namespace jit { + +class LIRGeneratorARM64 : public LIRGeneratorShared +{ + public: + LIRGeneratorARM64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) + : LIRGeneratorShared(gen, graph, lirGraph) + { } + + protected: + // Returns a box allocation. reg2 is ignored on 64-bit platforms. + LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2, + bool useAtStart = false); + + LAllocation useByteOpRegister(MDefinition* mir); + LAllocation useByteOpRegisterAtStart(MDefinition* mir); + LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir); + + inline LDefinition tempToUnbox() { + return temp(); + } + + bool needTempForPostBarrier() { return true; } + + // ARM64 has a scratch register, so no need for another temp for dispatch ICs. + LDefinition tempForDispatchCache(MIRType outputType = MIRType::None) { + return LDefinition::BogusTemp(); + } + + void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex); + void defineUntypedPhi(MPhi* phi, size_t lirIndex); + void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t) { MOZ_CRASH("NYI"); } + void defineInt64Phi(MPhi*, size_t) { MOZ_CRASH("NYI"); } + void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs, + MDefinition* rhs); + void lowerUrshD(MUrsh* mir); + + void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input); + void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); + + void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins, + MDefinition* mir, MDefinition* lhs, MDefinition* rhs); + void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs); + template<size_t Temps> + void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins, + MDefinition* mir, MDefinition* lhs, MDefinition* rhs); + + void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input); + + template <size_t Temps> + void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, + MDefinition* lhs, MDefinition* rhs); + + void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, + MDefinition* lhs, MDefinition* rhs) + { + return lowerForFPU(ins, mir, lhs, rhs); + } + + void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, + MDefinition* lhs, MDefinition* rhs) + { + return lowerForFPU(ins, mir, lhs, rhs); + } + + void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir, + MDefinition* lhs, MDefinition* rhs); + void lowerTruncateDToInt32(MTruncateToInt32* ins); + void lowerTruncateFToInt32(MTruncateToInt32* ins); + void lowerDivI(MDiv* div); + void lowerModI(MMod* mod); + void lowerDivI64(MDiv* div); + void lowerModI64(MMod* mod); + void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs); + void lowerUDiv(MDiv* div); + void lowerUMod(MMod* mod); + void visitPowHalf(MPowHalf* ins); + void visitAsmJSNeg(MAsmJSNeg* ins); + void visitWasmSelect(MWasmSelect* ins); + + LTableSwitchV* newLTableSwitchV(MTableSwitch* ins); + LTableSwitch* newLTableSwitch(const LAllocation& in, + const LDefinition& inputCopy, + MTableSwitch* ins); + + public: + void visitBox(MBox* box); + void visitUnbox(MUnbox* unbox); + void visitReturn(MReturn* ret); + void lowerPhi(MPhi* phi); + void visitGuardShape(MGuardShape* ins); + void visitGuardObjectGroup(MGuardObjectGroup* ins); + void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins); + void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins); + void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins); + void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins); + void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins); + void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins); + void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins); + void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins); + void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins); + void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins); + void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins); + void visitSubstr(MSubstr* ins); + void visitRandom(MRandom* ins); + void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins); + void visitWasmLoad(MWasmLoad* ins); + void visitWasmStore(MWasmStore* ins); + void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins); + void visitCopySign(MCopySign* ins); + void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins); +}; + +typedef LIRGeneratorARM64 LIRGeneratorSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_Lowering_arm64_h */ diff --git a/js/src/jit/arm64/MacroAssembler-arm64-inl.h b/js/src/jit/arm64/MacroAssembler-arm64-inl.h new file mode 100644 index 000000000..4300d90eb --- /dev/null +++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h @@ -0,0 +1,1793 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_MacroAssembler_arm64_inl_h +#define jit_arm64_MacroAssembler_arm64_inl_h + +#include "jit/arm64/MacroAssembler-arm64.h" + +namespace js { +namespace jit { + +//{{{ check_macroassembler_style + +void +MacroAssembler::move64(Register64 src, Register64 dest) +{ + movePtr(src.reg, dest.reg); +} + +void +MacroAssembler::move64(Imm64 imm, Register64 dest) +{ + movePtr(ImmWord(imm.value), dest.reg); +} + +void +MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) +{ + MOZ_CRASH("NYI: moveFloat32ToGPR"); +} + +void +MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) +{ + MOZ_CRASH("NYI: moveGPRToFloat32"); +} + +void +MacroAssembler::move8SignExtend(Register src, Register dest) +{ + MOZ_CRASH("NYI: move8SignExtend"); +} + +void +MacroAssembler::move16SignExtend(Register src, Register dest) +{ + MOZ_CRASH("NYI: move16SignExtend"); +} + +// =============================================================== +// Logical instructions + +void +MacroAssembler::not32(Register reg) +{ + Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32)); +} + +void +MacroAssembler::and32(Register src, Register dest) +{ + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); +} + +void +MacroAssembler::and32(Imm32 imm, Register dest) +{ + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); +} + +void +MacroAssembler::and32(Imm32 imm, Register src, Register dest) +{ + And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value)); +} + +void +MacroAssembler::and32(Imm32 imm, const Address& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + load32(dest, scratch32.asUnsized()); + And(scratch32, scratch32, Operand(imm.value)); + store32(scratch32.asUnsized(), dest); +} + +void +MacroAssembler::and32(const Address& src, Register dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != src.base); + load32(src, scratch32.asUnsized()); + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32)); +} + +void +MacroAssembler::andPtr(Register src, Register dest) +{ + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); +} + +void +MacroAssembler::andPtr(Imm32 imm, Register dest) +{ + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); +} + +void +MacroAssembler::and64(Imm64 imm, Register64 dest) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + mov(ImmWord(imm.value), scratch); + andPtr(scratch, dest.reg); +} + +void +MacroAssembler::and64(Register64 src, Register64 dest) +{ + MOZ_CRASH("NYI: and64"); +} + +void +MacroAssembler::or64(Imm64 imm, Register64 dest) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + mov(ImmWord(imm.value), scratch); + orPtr(scratch, dest.reg); +} + +void +MacroAssembler::xor64(Imm64 imm, Register64 dest) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + mov(ImmWord(imm.value), scratch); + xorPtr(scratch, dest.reg); +} + +void +MacroAssembler::or32(Imm32 imm, Register dest) +{ + Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); +} + +void +MacroAssembler::or32(Register src, Register dest) +{ + Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); +} + +void +MacroAssembler::or32(Imm32 imm, const Address& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + load32(dest, scratch32.asUnsized()); + Orr(scratch32, scratch32, Operand(imm.value)); + store32(scratch32.asUnsized(), dest); +} + +void +MacroAssembler::orPtr(Register src, Register dest) +{ + Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); +} + +void +MacroAssembler::orPtr(Imm32 imm, Register dest) +{ + Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); +} + +void +MacroAssembler::or64(Register64 src, Register64 dest) +{ + orPtr(src.reg, dest.reg); +} + +void +MacroAssembler::xor64(Register64 src, Register64 dest) +{ + xorPtr(src.reg, dest.reg); +} + +void +MacroAssembler::xor32(Register src, Register dest) +{ + Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); +} + +void +MacroAssembler::xor32(Imm32 imm, Register dest) +{ + Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); +} + +void +MacroAssembler::xorPtr(Register src, Register dest) +{ + Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); +} + +void +MacroAssembler::xorPtr(Imm32 imm, Register dest) +{ + Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); +} + +// =============================================================== +// Arithmetic functions + +void +MacroAssembler::add32(Register src, Register dest) +{ + Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); +} + +void +MacroAssembler::add32(Imm32 imm, Register dest) +{ + Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); +} + +void +MacroAssembler::add32(Imm32 imm, const Address& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + + Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Add(scratch32, scratch32, Operand(imm.value)); + Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); +} + +void +MacroAssembler::addPtr(Register src, Register dest) +{ + addPtr(src, dest, dest); +} + +void +MacroAssembler::addPtr(Register src1, Register src2, Register dest) +{ + Add(ARMRegister(dest, 64), ARMRegister(src1, 64), Operand(ARMRegister(src2, 64))); +} + +void +MacroAssembler::addPtr(Imm32 imm, Register dest) +{ + addPtr(imm, dest, dest); +} + +void +MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) +{ + Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value)); +} + +void +MacroAssembler::addPtr(ImmWord imm, Register dest) +{ + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); +} + +void +MacroAssembler::addPtr(Imm32 imm, const Address& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != dest.base); + + Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Add(scratch64, scratch64, Operand(imm.value)); + Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); +} + +void +MacroAssembler::addPtr(const Address& src, Register dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + + Ldr(scratch64, MemOperand(ARMRegister(src.base, 64), src.offset)); + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); +} + +void +MacroAssembler::add64(Register64 src, Register64 dest) +{ + addPtr(src.reg, dest.reg); +} + +void +MacroAssembler::add64(Imm32 imm, Register64 dest) +{ + Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value)); +} + +void +MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) +{ + fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); +} + +void +MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) +{ + fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); +} + +void +MacroAssembler::sub32(Imm32 imm, Register dest) +{ + Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); +} + +void +MacroAssembler::sub32(Register src, Register dest) +{ + Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); +} + +void +MacroAssembler::sub32(const Address& src, Register dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != src.base); + load32(src, scratch32.asUnsized()); + Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32)); +} + +void +MacroAssembler::subPtr(Register src, Register dest) +{ + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); +} + +void +MacroAssembler::subPtr(Register src, const Address& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != dest.base); + + Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Sub(scratch64, scratch64, Operand(ARMRegister(src, 64))); + Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); +} + +void +MacroAssembler::subPtr(Imm32 imm, Register dest) +{ + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); +} + +void +MacroAssembler::subPtr(const Address& addr, Register dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != addr.base); + + Ldr(scratch64, MemOperand(ARMRegister(addr.base, 64), addr.offset)); + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); +} + +void +MacroAssembler::sub64(Register64 src, Register64 dest) +{ + MOZ_CRASH("NYI: sub64"); +} + +void +MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) +{ + fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); +} + +void +MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) +{ + fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); +} + +void +MacroAssembler::mul32(Register rhs, Register srcDest) +{ + MOZ_CRASH("NYI - mul32"); +} + +void +MacroAssembler::mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) +{ + Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32)); + if (onOver) { + Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW)); + B(onOver, NotEqual); + } + if (onZero) + Cbz(ARMRegister(dest, 32), onZero); + + // Clear upper 32 bits. + Mov(ARMRegister(dest, 32), ARMRegister(dest, 32)); +} + +void +MacroAssembler::mul64(Imm64 imm, const Register64& dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(dest.reg != scratch64.asUnsized()); + mov(ImmWord(imm.value), scratch64.asUnsized()); + Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64); +} + +void +MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp) +{ + MOZ_CRASH("NYI: mul64"); +} + +void +MacroAssembler::mulBy3(Register src, Register dest) +{ + ARMRegister xdest(dest, 64); + ARMRegister xsrc(src, 64); + Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1)); +} + +void +MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) +{ + fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); +} + +void +MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) +{ + fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); +} + +void +MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(temp != scratch); + movePtr(imm, scratch); + const ARMFPRegister scratchDouble = temps.AcquireD(); + Ldr(scratchDouble, MemOperand(Address(scratch, 0))); + fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble); +} + +void +MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned) +{ + MOZ_CRASH("NYI - quotient32"); +} + +void +MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned) +{ + MOZ_CRASH("NYI - remainder32"); +} + +void +MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) +{ + fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); +} + +void +MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) +{ + fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); +} + +void +MacroAssembler::inc64(AbsoluteAddress dest) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratchAddr64 = temps.AcquireX(); + const ARMRegister scratch64 = temps.AcquireX(); + + Mov(scratchAddr64, uint64_t(dest.addr)); + Ldr(scratch64, MemOperand(scratchAddr64, 0)); + Add(scratch64, scratch64, Operand(1)); + Str(scratch64, MemOperand(scratchAddr64, 0)); +} + +void +MacroAssembler::neg32(Register reg) +{ + Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32))); +} + +void +MacroAssembler::negateFloat(FloatRegister reg) +{ + fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32)); +} + +void +MacroAssembler::negateDouble(FloatRegister reg) +{ + fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64)); +} + +void +MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) +{ + MOZ_CRASH("NYI - absFloat32"); +} + +void +MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) +{ + MOZ_CRASH("NYI - absDouble"); +} + +void +MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) +{ + MOZ_CRASH("NYI - sqrtFloat32"); +} + +void +MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) +{ + MOZ_CRASH("NYI - sqrtDouble"); +} + +void +MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) +{ + MOZ_CRASH("NYI - minFloat32"); +} + +void +MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) +{ + MOZ_CRASH("NYI - minDouble"); +} + +void +MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) +{ + MOZ_CRASH("NYI - maxFloat32"); +} + +void +MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) +{ + MOZ_CRASH("NYI - maxDouble"); +} + +// =============================================================== +// Shift functions + +void +MacroAssembler::lshiftPtr(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); +} + +void +MacroAssembler::lshift64(Imm32 imm, Register64 dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + lshiftPtr(imm, dest.reg); +} + +void +MacroAssembler::lshift64(Register shift, Register64 srcDest) +{ + MOZ_CRASH("NYI: lshift64"); +} + +void +MacroAssembler::lshift32(Register shift, Register dest) +{ + Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32)); +} + +void +MacroAssembler::lshift32(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 32); + Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value); +} + +void +MacroAssembler::rshiftPtr(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); +} + +void +MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value); +} + +void +MacroAssembler::rshift32(Register shift, Register dest) +{ + Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32)); +} + +void +MacroAssembler::rshift32(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 32); + Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value); +} + +void +MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); +} + +void +MacroAssembler::rshift32Arithmetic(Register shift, Register dest) +{ + Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32)); +} + +void +MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 32); + Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value); +} + +void +MacroAssembler::rshift64(Imm32 imm, Register64 dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + rshiftPtr(imm, dest.reg); +} + +void +MacroAssembler::rshift64(Register shift, Register64 srcDest) +{ + MOZ_CRASH("NYI: rshift64"); +} + +void +MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) +{ + MOZ_CRASH("NYI: rshift64Arithmetic"); +} + +void +MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) +{ + MOZ_CRASH("NYI: rshift64Arithmetic"); +} + +// =============================================================== +// Condition functions + +template <typename T1, typename T2> +void +MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) +{ + cmp32(lhs, rhs); + emitSet(cond, dest); +} + +template <typename T1, typename T2> +void +MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) +{ + cmpPtr(lhs, rhs); + emitSet(cond, dest); +} + +// =============================================================== +// Rotation functions + +void +MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) +{ + MOZ_CRASH("NYI: rotateLeft by immediate"); +} + +void +MacroAssembler::rotateLeft(Register count, Register input, Register dest) +{ + MOZ_CRASH("NYI: rotateLeft by register"); +} + +void +MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) +{ + MOZ_CRASH("NYI: rotateRight by immediate"); +} + +void +MacroAssembler::rotateRight(Register count, Register input, Register dest) +{ + MOZ_CRASH("NYI: rotateRight by register"); +} + +void +MacroAssembler::rotateLeft64(Register count, Register64 input, Register64 dest, Register temp) +{ + MOZ_CRASH("NYI: rotateLeft64"); +} + +void +MacroAssembler::rotateRight64(Register count, Register64 input, Register64 dest, Register temp) +{ + MOZ_CRASH("NYI: rotateRight64"); +} + +// =============================================================== +// Bit counting functions + +void +MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) +{ + MOZ_CRASH("NYI: clz32"); +} + +void +MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) +{ + MOZ_CRASH("NYI: ctz32"); +} + +void +MacroAssembler::clz64(Register64 src, Register dest) +{ + MOZ_CRASH("NYI: clz64"); +} + +void +MacroAssembler::ctz64(Register64 src, Register dest) +{ + MOZ_CRASH("NYI: ctz64"); +} + +void +MacroAssembler::popcnt32(Register src, Register dest, Register temp) +{ + MOZ_CRASH("NYI: popcnt32"); +} + +void +MacroAssembler::popcnt64(Register64 src, Register64 dest, Register temp) +{ + MOZ_CRASH("NYI: popcnt64"); +} + +// =============================================================== +// Branch functions + +template <class L> +void +MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label) +{ + cmp32(lhs, rhs); + B(label, cond); +} + +template <class L> +void +MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm, L label) +{ + cmp32(lhs, imm); + B(label, cond); +} + +void +MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + MOZ_ASSERT(scratch != rhs); + load32(lhs, scratch); + branch32(cond, scratch, rhs, label); +} + +void +MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + load32(lhs, scratch); + branch32(cond, scratch, imm, label); +} + +void +MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(ImmPtr(lhs.addr), scratch); + branch32(cond, Address(scratch, 0), rhs, label); +} + +void +MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(ImmPtr(lhs.addr), scratch); + branch32(cond, Address(scratch, 0), rhs, label); +} + +void +MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != lhs.base); + MOZ_ASSERT(scratch32.asUnsized() != lhs.index); + doBaseIndex(scratch32, lhs, vixl::LDR_w); + branch32(cond, scratch32.asUnsized(), rhs, label); +} + +void +MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(lhs, scratch); + branch32(cond, Address(scratch, 0), rhs, label); +} + +void +MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail) +{ + MOZ_CRASH("NYI: branch64 reg-imm"); +} + +void +MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) +{ + MOZ_ASSERT(cond == Assembler::NotEqual, + "other condition codes not supported"); + + branchPtr(cond, lhs, ImmWord(val.value), label); +} + +void +MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::NotEqual, + "other condition codes not supported"); + MOZ_ASSERT(lhs.base != scratch); + MOZ_ASSERT(rhs.base != scratch); + + loadPtr(rhs, scratch); + branchPtr(cond, lhs, scratch, label); +} + +template <class L> +void +MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label) +{ + Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); + B(label, cond); +} + +void +MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) +{ + cmpPtr(lhs, rhs); + B(label, cond); +} + +void +MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) +{ + cmpPtr(lhs, rhs); + B(label, cond); +} + +void +MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs); + movePtr(rhs, scratch); + branchPtr(cond, lhs, scratch, label); +} + +void +MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label) +{ + cmpPtr(lhs, rhs); + B(label, cond); +} + +template <class L> +void +MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + MOZ_ASSERT(scratch != rhs); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch1_64 = temps.AcquireX(); + const ARMRegister scratch2_64 = temps.AcquireX(); + MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base); + MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base); + + movePtr(rhs, scratch1_64.asUnsized()); + loadPtr(lhs, scratch2_64.asUnsized()); + branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label); +} + +void +MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != rhs); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != rhs); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); +} + +template <typename T> +CodeOffsetJump +MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label) +{ + cmpPtr(lhs, rhs); + return jumpWithPatch(label, cond); +} + +template <typename T> +CodeOffsetJump +MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label) +{ + // The scratch register is unused after the condition codes are set. + { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + cmpPtr(scratch, rhs); + } + return jumpWithPatch(label, cond); +} + +void +MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + if (rhs != scratch) + movePtr(rhs, scratch); + // Instead of unboxing lhs, box rhs and do direct comparison with lhs. + rshiftPtr(Imm32(1), scratch); + branchPtr(cond, lhs, scratch, label); +} + +void +MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, + Label* label) +{ + compareFloat(cond, lhs, rhs); + switch (cond) { + case DoubleNotEqual: { + Label unordered; + // not equal *and* ordered + branch(Overflow, &unordered); + branch(NotEqual, label); + bind(&unordered); + break; + } + case DoubleEqualOrUnordered: + branch(Overflow, label); + branch(Equal, label); + break; + default: + branch(Condition(cond), label); + } +} + +void +MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + + ARMFPRegister src32(src, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(dest64)); + + Fcvtzs(dest64, src32); + Add(scratch64, dest64, Operand(0x7fffffffffffffff)); + Cmn(scratch64, 3); + B(fail, Assembler::Above); + And(dest64, dest64, Operand(0xffffffff)); +} + +void +MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail) +{ + convertFloat32ToInt32(src, dest, fail); +} + +void +MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, + Label* label) +{ + compareDouble(cond, lhs, rhs); + switch (cond) { + case DoubleNotEqual: { + Label unordered; + // not equal *and* ordered + branch(Overflow, &unordered); + branch(NotEqual, label); + bind(&unordered); + break; + } + case DoubleEqualOrUnordered: + branch(Overflow, label); + branch(Equal, label); + break; + default: + branch(Condition(cond), label); + } +} + +void +MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + + // An out of range integer will be saturated to the destination size. + ARMFPRegister src64(src, 64); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(dest64)); + + Fcvtzs(dest64, src64); + Add(scratch64, dest64, Operand(0x7fffffffffffffff)); + Cmn(scratch64, 3); + B(fail, Assembler::Above); + And(dest64, dest64, Operand(0xffffffff)); +} + +void +MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail) +{ + convertDoubleToInt32(src, dest, fail); +} + +template <typename T, typename L> +void +MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label) +{ + adds32(src, dest); + B(label, cond); +} + +template <typename T> +void +MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label) +{ + subs32(src, dest); + branch(cond, label); +} + +void +MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) +{ + Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(rhs.value)); + B(cond, label); +} + +template <class L> +void +MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label) +{ + MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); + // x86 prefers |test foo, foo| to |cmp foo, #0|. + // Convert the former to the latter for ARM. + if (lhs == rhs && (cond == Zero || cond == NonZero)) + cmp32(lhs, Imm32(0)); + else + test32(lhs, rhs); + B(label, cond); +} + +template <class L> +void +MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label) +{ + MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); + test32(lhs, rhs); + B(label, cond); +} + +void +MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + load32(lhs, scratch); + branchTest32(cond, scratch, rhs, label); +} + +void +MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + load32(lhs, scratch); + branchTest32(cond, scratch, rhs, label); +} + +template <class L> +void +MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label) +{ + Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); + B(label, cond); +} + +void +MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) +{ + Tst(ARMRegister(lhs, 64), Operand(rhs.value)); + B(label, cond); +} + +void +MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchTestPtr(cond, scratch, rhs, label); +} + +template <class L> +void +MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp, + L label) +{ + branchTestPtr(cond, lhs.reg, rhs.reg, label); +} + +void +MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label) +{ + branchTestUndefinedImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label) +{ + branchTestUndefinedImpl(cond, address, label); +} + +void +MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestUndefinedImpl(cond, address, label); +} + +void +MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestUndefinedImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testUndefined(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label) +{ + branchTestInt32Impl(cond, tag, label); +} + +void +MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label) +{ + branchTestInt32Impl(cond, address, label); +} + +void +MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestInt32Impl(cond, address, label); +} + +void +MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestInt32Impl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestInt32Impl(Condition cond, const T& t, Label* label) +{ + Condition c = testInt32(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label) +{ + Condition c = testInt32Truthy(truthy, value); + B(label, c); +} + +void +MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label) +{ + branchTestDoubleImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label) +{ + branchTestDoubleImpl(cond, address, label); +} + +void +MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestDoubleImpl(cond, address, label); +} + +void +MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestDoubleImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testDouble(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) +{ + Fcmp(ARMFPRegister(reg, 64), 0.0); + if (!truthy) { + // falsy values are zero, and NaN. + branch(Zero, label); + branch(Overflow, label); + } else { + // truthy values are non-zero and not nan. + // If it is overflow + Label onFalse; + branch(Zero, &onFalse); + branch(Overflow, &onFalse); + B(label); + bind(&onFalse); + } +} + +void +MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label) +{ + branchTestNumberImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestNumberImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestNumberImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testNumber(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label) +{ + branchTestBooleanImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label) +{ + branchTestBooleanImpl(cond, address, label); +} + +void +MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestBooleanImpl(cond, address, label); +} + +void +MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestBooleanImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestBooleanImpl(Condition cond, const T& tag, Label* label) +{ + Condition c = testBoolean(cond, tag); + B(label, c); +} + +void +MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label) +{ + Condition c = testBooleanTruthy(truthy, value); + B(label, c); +} + +void +MacroAssembler::branchTestString(Condition cond, Register tag, Label* label) +{ + branchTestStringImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestStringImpl(cond, address, label); +} + +void +MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestStringImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestStringImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testString(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label) +{ + Condition c = testStringTruthy(truthy, value); + B(label, c); +} + +void +MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label) +{ + branchTestSymbolImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestSymbolImpl(cond, address, label); +} + +void +MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestSymbolImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testSymbol(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label) +{ + branchTestNullImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label) +{ + branchTestNullImpl(cond, address, label); +} + +void +MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestNullImpl(cond, address, label); +} + +void +MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestNullImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestNullImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testNull(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label) +{ + branchTestObjectImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label) +{ + branchTestObjectImpl(cond, address, label); +} + +void +MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestObjectImpl(cond, address, label); +} + +void +MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestObjectImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestObjectImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testObject(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label) +{ + branchTestGCThingImpl(cond, address, label); +} + +void +MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestGCThingImpl(cond, address, label); +} + +template <typename T> +void +MacroAssembler::branchTestGCThingImpl(Condition cond, const T& src, Label* label) +{ + Condition c = testGCThing(cond, src); + B(label, c); +} + +void +MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label) +{ + branchTestPrimitiveImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label) +{ + branchTestPrimitiveImpl(cond, value, label); +} + +template <typename T> +void +MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t, Label* label) +{ + Condition c = testPrimitive(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label) +{ + branchTestMagicImpl(cond, tag, label); +} + +void +MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label) +{ + branchTestMagicImpl(cond, address, label); +} + +void +MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label) +{ + branchTestMagicImpl(cond, address, label); +} + +template <class L> +void +MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label) +{ + branchTestMagicImpl(cond, value, label); +} + +template <typename T, class L> +void +MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label) +{ + Condition c = testMagic(cond, t); + B(label, c); +} + +void +MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) +{ + uint64_t magic = MagicValue(why).asRawBits(); + cmpPtr(valaddr, ImmWord(magic)); + B(label, cond); +} + +// ======================================================================== +// Memory access primitives. +void +MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest) +{ + Str(ARMFPRegister(src, 64), MemOperand(ARMRegister(dest.base, 64), dest.offset)); +} +void +MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest) +{ + doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d); +} + +void +MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr) +{ + Str(ARMFPRegister(src, 32), MemOperand(ARMRegister(addr.base, 64), addr.offset)); +} +void +MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr) +{ + doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s); +} + +void +MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest) +{ + MOZ_CRASH("NYI"); +} +void +MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) +{ + MOZ_CRASH("NYI"); +} + +// =============================================================== +// Clamping functions. + +void +MacroAssembler::clampIntToUint8(Register reg) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + const ARMRegister reg32(reg, 32); + MOZ_ASSERT(!scratch32.Is(reg32)); + + Cmp(reg32, Operand(reg32, vixl::UXTB)); + Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual); + Mov(scratch32, Operand(0xff)); + Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual); +} + +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + MOZ_CRASH("NYI"); +} + +//}}} check_macroassembler_style +// =============================================================== + +template <typename T> +void +MacroAssemblerCompat::addToStackPtr(T t) +{ + asMasm().addPtr(t, getStackPointer()); +} + +template <typename T> +void +MacroAssemblerCompat::addStackPtrTo(T t) +{ + asMasm().addPtr(getStackPointer(), t); +} + +template <typename T> +void +MacroAssemblerCompat::subFromStackPtr(T t) +{ + asMasm().subPtr(t, getStackPointer()); syncStackPtr(); +} + +template <typename T> +void +MacroAssemblerCompat::subStackPtrFrom(T t) +{ + asMasm().subPtr(getStackPointer(), t); +} + +template <typename T> +void +MacroAssemblerCompat::andToStackPtr(T t) +{ + asMasm().andPtr(t, getStackPointer()); + syncStackPtr(); +} + +template <typename T> +void +MacroAssemblerCompat::andStackPtrTo(T t) +{ + asMasm().andPtr(getStackPointer(), t); +} + +template <typename T> +void +MacroAssemblerCompat::branchStackPtr(Condition cond, T rhs, Label* label) +{ + asMasm().branchPtr(cond, getStackPointer(), rhs, label); +} + +template <typename T> +void +MacroAssemblerCompat::branchStackPtrRhs(Condition cond, T lhs, Label* label) +{ + asMasm().branchPtr(cond, lhs, getStackPointer(), label); +} + +template <typename T> +void +MacroAssemblerCompat::branchTestStackPtr(Condition cond, T t, Label* label) +{ + asMasm().branchTestPtr(cond, getStackPointer(), t, label); +} + +// If source is a double, load into dest. +// If source is int32, convert to double and store in dest. +// Else, branch to failure. +void +MacroAssemblerCompat::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure) +{ + Label isDouble, done; + + // TODO: splitTagForTest really should not leak a scratch register. + Register tag = splitTagForTest(source); + { + vixl::UseScratchRegisterScope temps(this); + temps.Exclude(ARMRegister(tag, 64)); + + asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble); + asMasm().branchTestInt32(Assembler::NotEqual, tag, failure); + } + + convertInt32ToDouble(source.valueReg(), dest); + jump(&done); + + bind(&isDouble); + unboxDouble(source, dest); + + bind(&done); +} + +void +MacroAssemblerCompat::unboxValue(const ValueOperand& src, AnyRegister dest) +{ + if (dest.isFloat()) { + Label notInt32, end; + asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32); + convertInt32ToDouble(src.valueReg(), dest.fpu()); + jump(&end); + bind(¬Int32); + unboxDouble(src, dest.fpu()); + bind(&end); + } else { + unboxNonDouble(src, dest.gpr()); + } +} + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_MacroAssembler_arm64_inl_h */ diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp new file mode 100644 index 000000000..d3d3cc210 --- /dev/null +++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp @@ -0,0 +1,838 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/MacroAssembler-arm64.h" + +#include "jit/arm64/MoveEmitter-arm64.h" +#include "jit/arm64/SharedICRegisters-arm64.h" +#include "jit/Bailouts.h" +#include "jit/BaselineFrame.h" +#include "jit/MacroAssembler.h" + +#include "jit/MacroAssembler-inl.h" + +namespace js { +namespace jit { + +void +MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) +{ + ARMRegister dest(output, 32); + Fcvtns(dest, ARMFPRegister(input, 64)); + + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + + Mov(scratch32, Operand(0xff)); + Cmp(dest, scratch32); + Csel(dest, dest, scratch32, LessThan); + } + + Cmp(dest, Operand(0)); + Csel(dest, dest, wzr, GreaterThan); +} + +void +MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic) +{ + // Exists for MIPS compatibility. +} + +void +MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic) +{ + // Exists for MIPS compatibility. +} + +js::jit::MacroAssembler& +MacroAssemblerCompat::asMasm() +{ + return *static_cast<js::jit::MacroAssembler*>(this); +} + +const js::jit::MacroAssembler& +MacroAssemblerCompat::asMasm() const +{ + return *static_cast<const js::jit::MacroAssembler*>(this); +} + +vixl::MacroAssembler& +MacroAssemblerCompat::asVIXL() +{ + return *static_cast<vixl::MacroAssembler*>(this); +} + +const vixl::MacroAssembler& +MacroAssemblerCompat::asVIXL() const +{ + return *static_cast<const vixl::MacroAssembler*>(this); +} + +BufferOffset +MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest) +{ + const size_t numInst = 1; // Inserting one load instruction. + const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes. + uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const. + + // Scratch space for generating the load instruction. + // + // allocEntry() will use InsertIndexIntoTag() to store a temporary + // index to the corresponding PoolEntry in the instruction itself. + // + // That index will be fixed up later when finishPool() + // walks over all marked loads and calls PatchConstantPoolLoad(). + uint32_t instructionScratch = 0; + + // Emit the instruction mask in the scratch space. + // The offset doesn't matter: it will be fixed up later. + vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0); + + // Add the entry to the pool, fix up the LDR imm19 offset, + // and add the completed instruction to the buffer. + return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch, + literalAddr); +} + +BufferOffset +MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest) +{ + const size_t numInst = 1; // Inserting one load instruction. + const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes. + uint8_t* literalAddr = (uint8_t*)(&ptr.value); + + // Scratch space for generating the load instruction. + // + // allocEntry() will use InsertIndexIntoTag() to store a temporary + // index to the corresponding PoolEntry in the instruction itself. + // + // That index will be fixed up later when finishPool() + // walks over all marked loads and calls PatchConstantPoolLoad(). + uint32_t instructionScratch = 0; + + // Emit the instruction mask in the scratch space. + // The offset doesn't matter: it will be fixed up later. + vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0); + + // Add the entry to the pool, fix up the LDR imm19 offset, + // and add the completed instruction to the buffer. + return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch, + literalAddr); +} + +void +MacroAssemblerCompat::loadPrivate(const Address& src, Register dest) +{ + loadPtr(src, dest); + asMasm().lshiftPtr(Imm32(1), dest); +} + +void +MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler) +{ + // Reserve space for exception information. + int64_t size = (sizeof(ResumeFromException) + 7) & ~7; + Sub(GetStackPointer64(), GetStackPointer64(), Operand(size)); + if (!GetStackPointer64().Is(sp)) + Mov(sp, GetStackPointer64()); + + Mov(x0, GetStackPointer64()); + + // Call the handler. + asMasm().setupUnalignedABICall(r1); + asMasm().passABIArg(r0); + asMasm().callWithABI(handler); + + Label entryFrame; + Label catch_; + Label finally; + Label return_; + Label bailout; + + MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner. + + loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0); + asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), + &entryFrame); + asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); + asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); + asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), + &return_); + asMasm().branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); + + breakpoint(); // Invalid kind. + + // No exception handler. Load the error value, load the new stack pointer, + // and return from the entry frame. + bind(&entryFrame); + moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + retn(Imm32(1 * sizeof(void*))); // Pop from stack and return. + + // If we found a catch handler, this must be a baseline frame. Restore state + // and jump to the catch block. + bind(&catch_); + loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0); + loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + syncStackPtr(); + Br(x0); + + // If we found a finally block, this must be a baseline frame. + // Push two values expected by JSOP_RETSUB: BooleanValue(true) + // and the exception. + bind(&finally); + ARMRegister exception = x1; + Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception))); + Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target))); + Ldr(ARMRegister(BaselineFrameReg, 64), + MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer))); + Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer))); + syncStackPtr(); + pushValue(BooleanValue(true)); + push(exception); + Br(x0); + + // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. + bind(&return_); + loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()), + JSReturnOperand); + movePtr(BaselineFrameReg, r28); + vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr); + syncStackPtr(); + vixl::MacroAssembler::Ret(vixl::lr); + + // If we are bailing out to baseline to handle an exception, + // jump to the bailout tail stub. + bind(&bailout); + Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo))); + Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target))); + Mov(x0, BAILOUT_RETURN_OK); + Br(x1); +} + +void +MacroAssemblerCompat::breakpoint() +{ + static int code = 0xA77; + Brk((code++) & 0xffff); +} + +template<typename T> +void +MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, + Register oldval, Register newval, + Register temp, AnyRegister output) +{ + switch (arrayType) { + case Scalar::Int8: + compareExchange8SignExtend(mem, oldval, newval, output.gpr()); + break; + case Scalar::Uint8: + compareExchange8ZeroExtend(mem, oldval, newval, output.gpr()); + break; + case Scalar::Int16: + compareExchange16SignExtend(mem, oldval, newval, output.gpr()); + break; + case Scalar::Uint16: + compareExchange16ZeroExtend(mem, oldval, newval, output.gpr()); + break; + case Scalar::Int32: + compareExchange32(mem, oldval, newval, output.gpr()); + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + compareExchange32(mem, oldval, newval, temp); + convertUInt32ToDouble(temp, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem, + Register oldval, Register newval, Register temp, + AnyRegister output); +template void +MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem, + Register oldval, Register newval, Register temp, + AnyRegister output); + +template<typename T> +void +MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, + Register value, Register temp, AnyRegister output) +{ + switch (arrayType) { + case Scalar::Int8: + atomicExchange8SignExtend(mem, value, output.gpr()); + break; + case Scalar::Uint8: + atomicExchange8ZeroExtend(mem, value, output.gpr()); + break; + case Scalar::Int16: + atomicExchange16SignExtend(mem, value, output.gpr()); + break; + case Scalar::Uint16: + atomicExchange16ZeroExtend(mem, value, output.gpr()); + break; + case Scalar::Int32: + atomicExchange32(mem, value, output.gpr()); + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + atomicExchange32(mem, value, temp); + convertUInt32ToDouble(temp, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem, + Register value, Register temp, AnyRegister output); +template void +MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem, + Register value, Register temp, AnyRegister output); + +void +MacroAssembler::reserveStack(uint32_t amount) +{ + // TODO: This bumps |sp| every time we reserve using a second register. + // It would save some instructions if we had a fixed frame size. + vixl::MacroAssembler::Claim(Operand(amount)); + adjustFrame(amount); +} + +//{{{ check_macroassembler_style +// =============================================================== +// MacroAssembler high-level usage. + +void +MacroAssembler::flush() +{ + Assembler::flush(); +} + +// =============================================================== +// Stack manipulation functions. + +void +MacroAssembler::PushRegsInMask(LiveRegisterSet set) +{ + for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) { + vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg }; + + for (size_t i = 0; i < 4 && iter.more(); i++) { + src[i] = ARMRegister(*iter, 64); + ++iter; + adjustFrame(8); + } + vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]); + } + + for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) { + vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg }; + + for (size_t i = 0; i < 4 && iter.more(); i++) { + src[i] = ARMFPRegister(*iter, 64); + ++iter; + adjustFrame(8); + } + vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]); + } +} + +void +MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore) +{ + // The offset of the data from the stack pointer. + uint32_t offset = 0; + + for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) { + vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg }; + uint32_t nextOffset = offset; + + for (size_t i = 0; i < 2 && iter.more(); i++) { + if (!ignore.has(*iter)) + dest[i] = ARMFPRegister(*iter, 64); + ++iter; + nextOffset += sizeof(double); + } + + if (!dest[0].IsNone() && !dest[1].IsNone()) + Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset)); + else if (!dest[0].IsNone()) + Ldr(dest[0], MemOperand(GetStackPointer64(), offset)); + else if (!dest[1].IsNone()) + Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double))); + + offset = nextOffset; + } + + MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes()); + + for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) { + vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg }; + uint32_t nextOffset = offset; + + for (size_t i = 0; i < 2 && iter.more(); i++) { + if (!ignore.has(*iter)) + dest[i] = ARMRegister(*iter, 64); + ++iter; + nextOffset += sizeof(uint64_t); + } + + if (!dest[0].IsNone() && !dest[1].IsNone()) + Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset)); + else if (!dest[0].IsNone()) + Ldr(dest[0], MemOperand(GetStackPointer64(), offset)); + else if (!dest[1].IsNone()) + Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t))); + + offset = nextOffset; + } + + size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes(); + MOZ_ASSERT(offset == bytesPushed); + freeStack(bytesPushed); +} + +void +MacroAssembler::Push(Register reg) +{ + push(reg); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(Register reg1, Register reg2, Register reg3, Register reg4) +{ + push(reg1, reg2, reg3, reg4); + adjustFrame(4 * sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const Imm32 imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmWord imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmPtr imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmGCPtr ptr) +{ + push(ptr); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(FloatRegister f) +{ + push(f); + adjustFrame(sizeof(double)); +} + +void +MacroAssembler::Pop(Register reg) +{ + pop(reg); + adjustFrame(-1 * int64_t(sizeof(int64_t))); +} + +void +MacroAssembler::Pop(FloatRegister f) +{ + MOZ_CRASH("NYI: Pop(FloatRegister)"); +} + +void +MacroAssembler::Pop(const ValueOperand& val) +{ + pop(val); + adjustFrame(-1 * int64_t(sizeof(int64_t))); +} + +// =============================================================== +// Simple call functions. + +CodeOffset +MacroAssembler::call(Register reg) +{ + syncStackPtr(); + Blr(ARMRegister(reg, 64)); + return CodeOffset(currentOffset()); +} + +CodeOffset +MacroAssembler::call(Label* label) +{ + syncStackPtr(); + Bl(label); + return CodeOffset(currentOffset()); +} + +void +MacroAssembler::call(ImmWord imm) +{ + call(ImmPtr((void*)imm.value)); +} + +void +MacroAssembler::call(ImmPtr imm) +{ + syncStackPtr(); + movePtr(imm, ip0); + Blr(vixl::ip0); +} + +void +MacroAssembler::call(wasm::SymbolicAddress imm) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + syncStackPtr(); + movePtr(imm, scratch); + call(scratch); +} + +void +MacroAssembler::call(JitCode* c) +{ + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + syncStackPtr(); + BufferOffset off = immPool64(scratch64, uint64_t(c->raw())); + addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE); + blr(scratch64); +} + +CodeOffset +MacroAssembler::callWithPatch() +{ + MOZ_CRASH("NYI"); + return CodeOffset(); +} +void +MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) +{ + MOZ_CRASH("NYI"); +} + +CodeOffset +MacroAssembler::farJumpWithPatch() +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset) +{ + MOZ_CRASH("NYI"); +} + +CodeOffset +MacroAssembler::nopPatchableToNearJump() +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::patchNearJumpToNop(uint8_t* jump) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::pushReturnAddress() +{ + push(lr); +} + +void +MacroAssembler::popReturnAddress() +{ + pop(lr); +} + +// =============================================================== +// ABI function calls. + +void +MacroAssembler::setupUnalignedABICall(Register scratch) +{ + setupABICall(); + dynamicAlignment_ = true; + + int64_t alignment = ~(int64_t(ABIStackAlignment) - 1); + ARMRegister scratch64(scratch, 64); + + // Always save LR -- Baseline ICs assume that LR isn't modified. + push(lr); + + // Unhandled for sp -- needs slightly different logic. + MOZ_ASSERT(!GetStackPointer64().Is(sp)); + + // Remember the stack address on entry. + Mov(scratch64, GetStackPointer64()); + + // Make alignment, including the effective push of the previous sp. + Sub(GetStackPointer64(), GetStackPointer64(), Operand(8)); + And(GetStackPointer64(), GetStackPointer64(), Operand(alignment)); + + // If the PseudoStackPointer is used, sp must be <= psp before a write is valid. + syncStackPtr(); + + // Store previous sp to the top of the stack, aligned. + Str(scratch64, MemOperand(GetStackPointer64(), 0)); +} + +void +MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) +{ + MOZ_ASSERT(inCall_); + uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar(); + + // ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it + // getting it aligned for an abi call is pretty easy. + MOZ_ASSERT(dynamicAlignment_); + stackForCall += ComputeByteAlignment(stackForCall, StackAlignment); + *stackAdjust = stackForCall; + reserveStack(*stackAdjust); + { + moveResolver_.resolve(); + MoveEmitter emitter(*this); + emitter.emit(moveResolver_); + emitter.finish(); + } + + // Call boundaries communicate stack via sp. + syncStackPtr(); +} + +void +MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) +{ + // Call boundaries communicate stack via sp. + if (!GetStackPointer64().Is(sp)) + Mov(GetStackPointer64(), sp); + + freeStack(stackAdjust); + + // Restore the stack pointer from entry. + if (dynamicAlignment_) + Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0)); + + // Restore LR. + pop(lr); + + // TODO: This one shouldn't be necessary -- check that callers + // aren't enforcing the ABI themselves! + syncStackPtr(); + + // If the ABI's return regs are where ION is expecting them, then + // no other work needs to be done. + +#ifdef DEBUG + MOZ_ASSERT(inCall_); + inCall_ = false; +#endif +} + +void +MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(fun, scratch); + + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(scratch); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result) +{ + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + loadPtr(fun, scratch); + + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(scratch); + callWithABIPost(stackAdjust, result); +} + +// =============================================================== +// Jit Frames. + +uint32_t +MacroAssembler::pushFakeReturnAddress(Register scratch) +{ + enterNoPool(3); + Label fakeCallsite; + + Adr(ARMRegister(scratch, 64), &fakeCallsite); + Push(scratch); + bind(&fakeCallsite); + uint32_t pseudoReturnOffset = currentOffset(); + + leaveNoPool(); + return pseudoReturnOffset; +} + +// =============================================================== +// Branch functions + +void +MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + MOZ_ASSERT(ptr != temp); + MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally. + MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); + + movePtr(ptr, temp); + orPtr(Imm32(gc::ChunkMask), temp); + branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte), + Imm32(int32_t(gc::ChunkLocation::Nursery)), label); +} + +void +MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp, + Label* label) +{ + branchValueIsNurseryObjectImpl(cond, address, temp, label); +} + +void +MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, + Label* label) +{ + branchValueIsNurseryObjectImpl(cond, value, temp, label); +} + +template <typename T> +void +MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally. + + Label done; + branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label); + + extractObject(value, temp); + orPtr(Imm32(gc::ChunkMask), temp); + branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte), + Imm32(int32_t(gc::ChunkLocation::Nursery)), label); + + bind(&done); +} + +void +MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs, + const Value& rhs, Label* label) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.valueReg()); + moveValue(rhs, ValueOperand(scratch64.asUnsized())); + Cmp(ARMRegister(lhs.valueReg(), 64), scratch64); + B(label, cond); +} + +// ======================================================================== +// Memory access primitives. +template <typename T> +void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const T& dest, MIRType slotType) +{ + if (valueType == MIRType::Double) { + storeDouble(value.reg().typedReg().fpu(), dest); + return; + } + + // For known integers and booleans, we can just store the unboxed value if + // the slot has the same type. + if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) { + if (value.constant()) { + Value val = value.value(); + if (valueType == MIRType::Int32) + store32(Imm32(val.toInt32()), dest); + else + store32(Imm32(val.toBoolean() ? 1 : 0), dest); + } else { + store32(value.reg().typedReg().gpr(), dest); + } + return; + } + + if (value.constant()) + storeValue(value.value(), dest); + else + storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest); + +} + +template void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const Address& dest, MIRType slotType); +template void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const BaseIndex& dest, MIRType slotType); + +void +MacroAssembler::comment(const char* msg) +{ + Assembler::comment(msg); +} + +//}}} check_macroassembler_style + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h new file mode 100644 index 000000000..b95831443 --- /dev/null +++ b/js/src/jit/arm64/MacroAssembler-arm64.h @@ -0,0 +1,2338 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_MacroAssembler_arm64_h +#define jit_arm64_MacroAssembler_arm64_h + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#include "jit/arm64/vixl/MacroAssembler-vixl.h" + +#include "jit/AtomicOp.h" +#include "jit/JitFrames.h" +#include "jit/MoveResolver.h" + +namespace js { +namespace jit { + +// Import VIXL operands directly into the jit namespace for shared code. +using vixl::Operand; +using vixl::MemOperand; + +struct ImmShiftedTag : public ImmWord +{ + ImmShiftedTag(JSValueShiftedTag shtag) + : ImmWord((uintptr_t)shtag) + { } + + ImmShiftedTag(JSValueType type) + : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) + { } +}; + +struct ImmTag : public Imm32 +{ + ImmTag(JSValueTag tag) + : Imm32(tag) + { } +}; + +class MacroAssemblerCompat : public vixl::MacroAssembler +{ + public: + typedef vixl::Condition Condition; + + private: + // Perform a downcast. Should be removed by Bug 996602. + js::jit::MacroAssembler& asMasm(); + const js::jit::MacroAssembler& asMasm() const; + + public: + // Restrict to only VIXL-internal functions. + vixl::MacroAssembler& asVIXL(); + const MacroAssembler& asVIXL() const; + + protected: + bool enoughMemory_; + uint32_t framePushed_; + + MacroAssemblerCompat() + : vixl::MacroAssembler(), + enoughMemory_(true), + framePushed_(0) + { } + + protected: + MoveResolver moveResolver_; + + public: + bool oom() const { + return Assembler::oom() || !enoughMemory_; + } + static MemOperand toMemOperand(Address& a) { + return MemOperand(ARMRegister(a.base, 64), a.offset); + } + void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) { + const ARMRegister base = ARMRegister(addr.base, 64); + const ARMRegister index = ARMRegister(addr.index, 64); + const unsigned scale = addr.scale; + + if (!addr.offset && (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) { + LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op); + return; + } + + vixl::UseScratchRegisterScope temps(this); + ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(!scratch64.Is(rt)); + MOZ_ASSERT(!scratch64.Is(base)); + MOZ_ASSERT(!scratch64.Is(index)); + + Add(scratch64, base, Operand(index, vixl::LSL, scale)); + LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op); + } + void Push(ARMRegister reg) { + push(reg); + adjustFrame(reg.size() / 8); + } + void Push(Register reg) { + vixl::MacroAssembler::Push(ARMRegister(reg, 64)); + adjustFrame(8); + } + void Push(Imm32 imm) { + push(imm); + adjustFrame(8); + } + void Push(FloatRegister f) { + push(ARMFPRegister(f, 64)); + adjustFrame(8); + } + void Push(ImmPtr imm) { + push(imm); + adjustFrame(sizeof(void*)); + } + void push(FloatRegister f) { + vixl::MacroAssembler::Push(ARMFPRegister(f, 64)); + } + void push(ARMFPRegister f) { + vixl::MacroAssembler::Push(f); + } + void push(Imm32 imm) { + if (imm.value == 0) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + move32(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmWord imm) { + if (imm.value == 0) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, imm.value); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmPtr imm) { + if (imm.value == nullptr) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmGCPtr imm) { + if (imm.value == nullptr) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ARMRegister reg) { + vixl::MacroAssembler::Push(reg); + } + void push(Address a) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(a.base != scratch64.asUnsized()); + loadPtr(a, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + + // Push registers. + void push(Register reg) { + vixl::MacroAssembler::Push(ARMRegister(reg, 64)); + } + void push(Register r0, Register r1) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64)); + } + void push(Register r0, Register r1, Register r2) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); + } + void push(Register r0, Register r1, Register r2, Register r3) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), + ARMRegister(r2, 64), ARMRegister(r3, 64)); + } + void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { + vixl::MacroAssembler::Push(r0, r1, r2, r3); + } + + // Pop registers. + void pop(Register reg) { + vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); + } + void pop(Register r0, Register r1) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64)); + } + void pop(Register r0, Register r1, Register r2) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); + } + void pop(Register r0, Register r1, Register r2, Register r3) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), + ARMRegister(r2, 64), ARMRegister(r3, 64)); + } + void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { + vixl::MacroAssembler::Pop(r0, r1, r2, r3); + } + + void pop(const ValueOperand& v) { + pop(v.valueReg()); + } + void pop(const FloatRegister& f) { + vixl::MacroAssembler::Pop(ARMRegister(f.code(), 64)); + } + + void implicitPop(uint32_t args) { + MOZ_ASSERT(args % sizeof(intptr_t) == 0); + adjustFrame(-args); + } + void Pop(ARMRegister r) { + vixl::MacroAssembler::Pop(r); + adjustFrame(- r.size() / 8); + } + // FIXME: This is the same on every arch. + // FIXME: If we can share framePushed_, we can share this. + // FIXME: Or just make it at the highest level. + CodeOffset PushWithPatch(ImmWord word) { + framePushed_ += sizeof(word.value); + return pushWithPatch(word); + } + CodeOffset PushWithPatch(ImmPtr ptr) { + return PushWithPatch(ImmWord(uintptr_t(ptr.value))); + } + + uint32_t framePushed() const { + return framePushed_; + } + void adjustFrame(int32_t diff) { + setFramePushed(framePushed_ + diff); + } + + void setFramePushed(uint32_t framePushed) { + framePushed_ = framePushed; + } + + void freeStack(Register amount) { + vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64))); + } + + // Update sp with the value of the current active stack pointer, if necessary. + void syncStackPtr() { + if (!GetStackPointer64().Is(vixl::sp)) + Mov(vixl::sp, GetStackPointer64()); + } + void initStackPtr() { + if (!GetStackPointer64().Is(vixl::sp)) + Mov(GetStackPointer64(), vixl::sp); + } + void storeValue(ValueOperand val, const Address& dest) { + storePtr(val.valueReg(), dest); + } + + template <typename T> + void storeValue(JSValueType type, Register reg, const T& dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != reg); + tagValue(type, reg, ValueOperand(scratch)); + storeValue(ValueOperand(scratch), dest); + } + template <typename T> + void storeValue(const Value& val, const T& dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + moveValue(val, ValueOperand(scratch)); + storeValue(ValueOperand(scratch), dest); + } + void storeValue(ValueOperand val, BaseIndex dest) { + storePtr(val.valueReg(), dest); + } + void storeValue(const Address& src, const Address& dest, Register temp) { + loadPtr(src, temp); + storePtr(temp, dest); + } + + void loadValue(Address src, Register val) { + Ldr(ARMRegister(val, 64), MemOperand(src)); + } + void loadValue(Address src, ValueOperand val) { + Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src)); + } + void loadValue(const BaseIndex& src, ValueOperand val) { + doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x); + } + void tagValue(JSValueType type, Register payload, ValueOperand dest) { + // This could be cleverer, but the first attempt had bugs. + Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64), Operand(ImmShiftedTag(type).value)); + } + void pushValue(ValueOperand val) { + vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64)); + } + void popValue(ValueOperand val) { + vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64)); + } + void pushValue(const Value& val) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + if (val.isMarkable()) { + BufferOffset load = movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), scratch); + writeDataRelocation(val, load); + push(scratch); + } else { + moveValue(val, scratch); + push(scratch); + } + } + void pushValue(JSValueType type, Register reg) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != reg); + tagValue(type, reg, ValueOperand(scratch)); + push(scratch); + } + void pushValue(const Address& addr) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != addr.base); + loadValue(addr, scratch); + push(scratch); + } + template <typename T> + void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) { + switch (nbytes) { + case 8: { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + unboxNonDouble(value, scratch); + storePtr(scratch, address); + return; + } + case 4: + store32(value.valueReg(), address); + return; + case 1: + store8(value.valueReg(), address); + return; + default: MOZ_CRASH("Bad payload width"); + } + } + void moveValue(const Value& val, Register dest) { + if (val.isMarkable()) { + BufferOffset load = movePatchablePtr(ImmPtr(val.bitsAsPunboxPointer()), dest); + writeDataRelocation(val, load); + } else { + movePtr(ImmWord(val.asRawBits()), dest); + } + } + void moveValue(const Value& src, const ValueOperand& dest) { + moveValue(src, dest.valueReg()); + } + void moveValue(const ValueOperand& src, const ValueOperand& dest) { + if (src.valueReg() != dest.valueReg()) + movePtr(src.valueReg(), dest.valueReg()); + } + + CodeOffset pushWithPatch(ImmWord imm) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + CodeOffset label = movWithPatch(imm, scratch); + push(scratch); + return label; + } + + CodeOffset movWithPatch(ImmWord imm, Register dest) { + BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value); + return CodeOffset(off.getOffset()); + } + CodeOffset movWithPatch(ImmPtr imm, Register dest) { + BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value)); + return CodeOffset(off.getOffset()); + } + + void boxValue(JSValueType type, Register src, Register dest) { + Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value)); + } + void splitTag(Register src, Register dest) { + ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT)); + } + Register extractTag(const Address& address, Register scratch) { + loadPtr(address, scratch); + splitTag(scratch, scratch); + return scratch; + } + Register extractTag(const ValueOperand& value, Register scratch) { + splitTag(value.valueReg(), scratch); + return scratch; + } + Register extractObject(const Address& address, Register scratch) { + loadPtr(address, scratch); + unboxObject(scratch, scratch); + return scratch; + } + Register extractObject(const ValueOperand& value, Register scratch) { + unboxObject(value, scratch); + return scratch; + } + Register extractInt32(const ValueOperand& value, Register scratch) { + unboxInt32(value, scratch); + return scratch; + } + Register extractBoolean(const ValueOperand& value, Register scratch) { + unboxBoolean(value, scratch); + return scratch; + } + + inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure); + + void emitSet(Condition cond, Register dest) { + Cset(ARMRegister(dest, 64), cond); + } + + void testNullSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testNull(cond, value); + emitSet(cond, dest); + } + void testObjectSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testObject(cond, value); + emitSet(cond, dest); + } + void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testUndefined(cond, value); + emitSet(cond, dest); + } + + void convertBoolToInt32(Register source, Register dest) { + Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64)); + } + + void convertInt32ToDouble(Register src, FloatRegister dest) { + Scvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertInt32ToDouble(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertInt32ToDouble(scratch, dest); + } + void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + MOZ_ASSERT(scratch != src.index); + load32(src, scratch); + convertInt32ToDouble(scratch, dest); + } + + void convertInt32ToFloat32(Register src, FloatRegister dest) { + Scvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertInt32ToFloat32(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertInt32ToFloat32(scratch, dest); + } + + void convertUInt32ToDouble(Register src, FloatRegister dest) { + Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertUInt32ToDouble(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertUInt32ToDouble(scratch, dest); + } + + void convertUInt32ToFloat32(Register src, FloatRegister dest) { + Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertUInt32ToFloat32(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertUInt32ToFloat32(scratch, dest); + } + + void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) { + Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32)); + } + void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) { + Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64)); + } + + using vixl::MacroAssembler::B; + void B(wasm::TrapDesc) { + MOZ_CRASH("NYI"); + } + void B(wasm::TrapDesc, Condition cond) { + MOZ_CRASH("NYI"); + } + + void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true) + { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratch64 = temps.AcquireD(); + + ARMFPRegister fsrc(src, 64); + ARMRegister dest32(dest, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(fsrc)); + + Fcvtzs(dest32, fsrc); // Convert, rounding toward zero. + Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode. + Fcmp(scratch64, fsrc); + B(fail, Assembler::NotEqual); + + if (negativeZeroCheck) { + Label nonzero; + Cbnz(dest32, &nonzero); + Fmov(dest64, fsrc); + Cbnz(dest64, fail); + bind(&nonzero); + } + } + void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true) + { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratch32 = temps.AcquireS(); + + ARMFPRegister fsrc(src, 32); + ARMRegister dest32(dest, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch32.Is(fsrc)); + + Fcvtzs(dest64, fsrc); // Convert, rounding toward zero. + Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode. + Fcmp(scratch32, fsrc); + B(fail, Assembler::NotEqual); + + if (negativeZeroCheck) { + Label nonzero; + Cbnz(dest32, &nonzero); + Fmov(dest32, fsrc); + Cbnz(dest32, fail); + bind(&nonzero); + } + And(dest64, dest64, Operand(0xffffffff)); + } + + void floor(FloatRegister input, Register output, Label* bail) { + Label handleZero; + //Label handleNeg; + Label fin; + ARMFPRegister iDbl(input, 64); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iDbl, 0.0); + B(Assembler::Equal, &handleZero); + //B(Assembler::Signed, &handleNeg); + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtms(o64, iDbl); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o64, iDbl); + Cbnz(o64, bail); + bind(&fin); + } + + void floorf(FloatRegister input, Register output, Label* bail) { + Label handleZero; + //Label handleNeg; + Label fin; + ARMFPRegister iFlt(input, 32); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iFlt, 0.0); + B(Assembler::Equal, &handleZero); + //B(Assembler::Signed, &handleNeg); + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtms(o64, iFlt); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o32, iFlt); + Cbnz(o32, bail); + bind(&fin); + } + + void ceil(FloatRegister input, Register output, Label* bail) { + Label handleZero; + Label fin; + ARMFPRegister iDbl(input, 64); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iDbl, 0.0); + B(Assembler::Overflow, bail); + Fcvtps(o64, iDbl); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Cbz(o64, &handleZero); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + Fmov(scratch, iDbl); + Cbnz(scratch, bail); + bind(&fin); + } + + void ceilf(FloatRegister input, Register output, Label* bail) { + Label handleZero; + Label fin; + ARMFPRegister iFlt(input, 32); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iFlt, 0.0); + + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtps(o64, iFlt); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Cbz(o64, &handleZero); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o32, iFlt); + Cbnz(o32, bail); + bind(&fin); + } + + void jump(Label* label) { + B(label); + } + void jump(JitCode* code) { + branch(code); + } + void jump(RepatchLabel* label) { + MOZ_CRASH("jump (repatchlabel)"); + } + void jump(Register reg) { + Br(ARMRegister(reg, 64)); + } + void jump(const Address& addr) { + loadPtr(addr, ip0); + Br(vixl::ip0); + } + void jump(wasm::TrapDesc target) { + MOZ_CRASH("NYI"); + } + + void align(int alignment) { + armbuffer_.align(alignment); + } + + void haltingAlign(int alignment) { + // TODO: Implement a proper halting align. + // ARM doesn't have one either. + armbuffer_.align(alignment); + } + void nopAlign(int alignment) { + MOZ_CRASH("NYI"); + } + + void movePtr(Register src, Register dest) { + Mov(ARMRegister(dest, 64), ARMRegister(src, 64)); + } + void movePtr(ImmWord imm, Register dest) { + Mov(ARMRegister(dest, 64), int64_t(imm.value)); + } + void movePtr(ImmPtr imm, Register dest) { + Mov(ARMRegister(dest, 64), int64_t(imm.value)); + } + void movePtr(wasm::SymbolicAddress imm, Register dest) { + BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest); + append(wasm::SymbolicAccess(CodeOffset(off.getOffset()), imm)); + } + void movePtr(ImmGCPtr imm, Register dest) { + BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest); + writeDataRelocation(imm, load); + } + + void mov(ImmWord imm, Register dest) { + movePtr(imm, dest); + } + void mov(ImmPtr imm, Register dest) { + movePtr(imm, dest); + } + void mov(wasm::SymbolicAddress imm, Register dest) { + movePtr(imm, dest); + } + void mov(Register src, Register dest) { + movePtr(src, dest); + } + + void move32(Imm32 imm, Register dest) { + Mov(ARMRegister(dest, 32), (int64_t)imm.value); + } + void move32(Register src, Register dest) { + Mov(ARMRegister(dest, 32), ARMRegister(src, 32)); + } + + // Move a pointer using a literal pool, so that the pointer + // may be easily patched or traced. + // Returns the BufferOffset of the load instruction emitted. + BufferOffset movePatchablePtr(ImmWord ptr, Register dest); + BufferOffset movePatchablePtr(ImmPtr ptr, Register dest); + + void loadPtr(wasm::SymbolicAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + movePtr(address, scratch.asUnsized()); + Ldr(ARMRegister(dest, 64), MemOperand(scratch)); + } + void loadPtr(AbsoluteAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized()); + Ldr(ARMRegister(dest, 64), MemOperand(scratch)); + } + void loadPtr(const Address& address, Register dest) { + Ldr(ARMRegister(dest, 64), MemOperand(address)); + } + void loadPtr(const BaseIndex& src, Register dest) { + Register base = src.base; + uint32_t scale = Imm32::ShiftOf(src.scale).value; + ARMRegister dest64(dest, 64); + ARMRegister index64(src.index, 64); + + if (src.offset) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64))); + MOZ_ASSERT(!scratch.Is(dest64)); + MOZ_ASSERT(!scratch.Is(index64)); + + Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset))); + Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale)); + return; + } + + Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale)); + } + void loadPrivate(const Address& src, Register dest); + + void store8(Register src, const Address& address) { + Strb(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store8(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + move32(imm, scratch32.asUnsized()); + Strb(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store8(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w); + } + void store8(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, Operand(imm.value)); + doBaseIndex(scratch32, address, vixl::STRB_w); + } + + void store16(Register src, const Address& address) { + Strh(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store16(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + move32(imm, scratch32.asUnsized()); + Strh(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store16(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w); + } + void store16(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, Operand(imm.value)); + doBaseIndex(scratch32, address, vixl::STRH_w); + } + + void storePtr(ImmWord imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + movePtr(imm, scratch); + storePtr(scratch, address); + } + void storePtr(ImmPtr imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + Mov(scratch64, uint64_t(imm.value)); + Str(scratch64, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void storePtr(ImmGCPtr imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + movePtr(imm, scratch); + storePtr(scratch, address); + } + void storePtr(Register src, const Address& address) { + Str(ARMRegister(src, 64), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + + void storePtr(ImmWord imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + MOZ_ASSERT(scratch64.asUnsized() != address.index); + Mov(scratch64, Operand(imm.value)); + doBaseIndex(scratch64, address, vixl::STR_x); + } + void storePtr(ImmGCPtr imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + MOZ_ASSERT(scratch != address.index); + movePtr(imm, scratch); + doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x); + } + void storePtr(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x); + } + + void storePtr(Register src, AbsoluteAddress address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, uint64_t(address.addr)); + Str(ARMRegister(src, 64), MemOperand(scratch64)); + } + + void store32(Register src, AbsoluteAddress address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, uint64_t(address.addr)); + Str(ARMRegister(src, 32), MemOperand(scratch64)); + } + void store32(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + Mov(scratch32, uint64_t(imm.value)); + Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store32(Register r, const Address& address) { + Str(ARMRegister(r, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store32(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, imm.value); + doBaseIndex(scratch32, address, vixl::STR_w); + } + void store32(Register r, const BaseIndex& address) { + doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w); + } + + void store32_NoSecondScratch(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2. + const ARMRegister scratch32 = temps.AcquireW(); + + MOZ_ASSERT(scratch32.asUnsized() != address.base); + Mov(scratch32, uint64_t(imm.value)); + Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + + void store64(Register64 src, Address address) { + storePtr(src.reg, address); + } + + // SIMD. + void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Int(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Int(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Int(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Int(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + + void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Float(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Float(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Float(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Float(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + + // StackPointer manipulation. + template <typename T> void addToStackPtr(T t); + template <typename T> void addStackPtrTo(T t); + + template <typename T> inline void subFromStackPtr(T t); + template <typename T> inline void subStackPtrFrom(T t); + + template <typename T> void andToStackPtr(T t); + template <typename T> void andStackPtrTo(T t); + + template <typename T> + void moveToStackPtr(T t) { movePtr(t, getStackPointer()); syncStackPtr(); } + template <typename T> + void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); } + + template <typename T> + void loadStackPtr(T t) { loadPtr(t, getStackPointer()); syncStackPtr(); } + template <typename T> + void storeStackPtr(T t) { storePtr(getStackPointer(), t); } + + // StackPointer testing functions. + template <typename T> + inline void branchTestStackPtr(Condition cond, T t, Label* label); + template <typename T> + void branchStackPtr(Condition cond, T rhs, Label* label); + template <typename T> + void branchStackPtrRhs(Condition cond, T lhs, Label* label); + + void testPtr(Register lhs, Register rhs) { + Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); + } + void test32(Register lhs, Register rhs) { + Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32))); + } + void test32(const Address& addr, Imm32 imm) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != addr.base); + load32(addr, scratch32.asUnsized()); + Tst(scratch32, Operand(imm.value)); + } + void test32(Register lhs, Imm32 rhs) { + Tst(ARMRegister(lhs, 32), Operand(rhs.value)); + } + void cmp32(Register lhs, Imm32 rhs) { + Cmp(ARMRegister(lhs, 32), Operand(rhs.value)); + } + void cmp32(Register a, Register b) { + Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32))); + } + void cmp32(const Address& lhs, Imm32 rhs) { + cmp32(Operand(lhs.base, lhs.offset), rhs); + } + void cmp32(const Address& lhs, Register rhs) { + cmp32(Operand(lhs.base, lhs.offset), rhs); + } + void cmp32(const Operand& lhs, Imm32 rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + Mov(scratch32, lhs); + Cmp(scratch32, Operand(rhs.value)); + } + void cmp32(const Operand& lhs, Register rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + Mov(scratch32, lhs); + Cmp(scratch32, Operand(ARMRegister(rhs, 32))); + } + + void cmpPtr(Register lhs, Imm32 rhs) { + Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); + } + void cmpPtr(Register lhs, ImmWord rhs) { + Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); + } + void cmpPtr(Register lhs, ImmPtr rhs) { + Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value))); + } + void cmpPtr(Register lhs, Register rhs) { + Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); + } + void cmpPtr(Register lhs, ImmGCPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs); + movePtr(rhs, scratch); + cmpPtr(lhs, scratch); + } + + void cmpPtr(const Address& lhs, Register rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + MOZ_ASSERT(scratch64.asUnsized() != rhs); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(ARMRegister(rhs, 64))); + } + void cmpPtr(const Address& lhs, ImmWord rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(rhs.value)); + } + void cmpPtr(const Address& lhs, ImmPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(uint64_t(rhs.value))); + } + void cmpPtr(const Address& lhs, ImmGCPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + cmpPtr(scratch, rhs); + } + + void loadDouble(const Address& src, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 64), MemOperand(src)); + } + void loadDouble(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 64), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + return; + } + + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset)); + } + void loadFloatAsDouble(const Address& addr, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); + fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); + } + void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); + } + fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); + } + + void loadFloat32(const Address& addr, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); + } + void loadFloat32(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); + } + } + + void moveDouble(FloatRegister src, FloatRegister dest) { + fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + void zeroDouble(FloatRegister reg) { + fmov(ARMFPRegister(reg, 64), vixl::xzr); + } + void zeroFloat32(FloatRegister reg) { + fmov(ARMFPRegister(reg, 32), vixl::wzr); + } + + void moveFloat32(FloatRegister src, FloatRegister dest) { + fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); + } + void moveFloatAsDouble(Register src, FloatRegister dest) { + MOZ_CRASH("moveFloatAsDouble"); + } + + void splitTag(const ValueOperand& operand, Register dest) { + splitTag(operand.valueReg(), dest); + } + void splitTag(const Address& operand, Register dest) { + loadPtr(operand, dest); + splitTag(dest, dest); + } + void splitTag(const BaseIndex& operand, Register dest) { + loadPtr(operand, dest); + splitTag(dest, dest); + } + + // Extracts the tag of a value and places it in ScratchReg. + Register splitTagForTest(const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); + Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT); + return scratch64.asUnsized(); // FIXME: Surely we can make a better interface. + } + void cmpTag(const ValueOperand& operand, ImmTag tag) { + MOZ_CRASH("cmpTag"); + } + + void load32(const Address& address, Register dest) { + Ldr(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load32(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w); + } + void load32(AbsoluteAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized()); + ldr(ARMRegister(dest, 32), MemOperand(scratch64)); + } + void load64(const Address& address, Register64 dest) { + loadPtr(address, dest.reg); + } + + void load8SignExtend(const Address& address, Register dest) { + Ldrsb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load8SignExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w); + } + + void load8ZeroExtend(const Address& address, Register dest) { + Ldrb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load8ZeroExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w); + } + + void load16SignExtend(const Address& address, Register dest) { + Ldrsh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load16SignExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w); + } + + void load16ZeroExtend(const Address& address, Register dest) { + Ldrh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load16ZeroExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w); + } + + void adds32(Register src, Register dest) { + Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + void adds32(Imm32 imm, Register dest) { + Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void adds32(Imm32 imm, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + + Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Adds(scratch32, scratch32, Operand(imm.value)); + Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + + void subs32(Imm32 imm, Register dest) { + Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void subs32(Register src, Register dest) { + Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + + void ret() { + pop(lr); + abiret(); + } + + void retn(Imm32 n) { + // ip0 <- [sp]; sp += n; ret ip0 + Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex)); + syncStackPtr(); // SP is always used to transmit the stack between calls. + Ret(vixl::ip0); + } + + void j(Condition cond, Label* dest) { + B(dest, cond); + } + + void branch(Condition cond, Label* label) { + B(label, cond); + } + void branch(JitCode* target) { + syncStackPtr(); + BufferOffset loc = b(-1); // The jump target will be patched by executableCopy(). + addPendingJump(loc, ImmPtr(target->raw()), Relocation::JITCODE); + } + + CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always, + Label* documentation = nullptr) + { + ARMBuffer::PoolEntry pe; + BufferOffset load_bo; + BufferOffset branch_bo; + + // Does not overwrite condition codes from the caller. + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + load_bo = immPool64(scratch64, (uint64_t)label, &pe); + } + + MOZ_ASSERT(!label->bound()); + if (cond != Always) { + Label notTaken; + B(¬Taken, Assembler::InvertCondition(cond)); + branch_bo = b(-1); + bind(¬Taken); + } else { + nop(); + branch_bo = b(-1); + } + label->use(branch_bo.getOffset()); + return CodeOffsetJump(load_bo.getOffset(), pe.index()); + } + CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) { + return jumpWithPatch(label, Always, documentation); + } + + void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { + Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64)); + } + + void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { + Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32)); + } + + void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) { + MOZ_CRASH("branchNegativeZero"); + } + void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label) { + MOZ_CRASH("branchNegativeZeroFloat32"); + } + + void boxDouble(FloatRegister src, const ValueOperand& dest) { + Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64)); + } + void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { + boxValue(type, src, dest.valueReg()); + } + + // Note that the |dest| register here may be ScratchReg, so we shouldn't use it. + void unboxInt32(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + void unboxInt32(const Address& src, Register dest) { + load32(src, dest); + } + void unboxDouble(const Address& src, FloatRegister dest) { + loadDouble(src, dest); + } + void unboxDouble(const ValueOperand& src, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64)); + } + + void unboxArgObjMagic(const ValueOperand& src, Register dest) { + MOZ_CRASH("unboxArgObjMagic"); + } + void unboxArgObjMagic(const Address& src, Register dest) { + MOZ_CRASH("unboxArgObjMagic"); + } + + void unboxBoolean(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + void unboxBoolean(const Address& src, Register dest) { + load32(src, dest); + } + + void unboxMagic(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean + // instead if the source type is known. + void unboxNonDouble(const ValueOperand& src, Register dest) { + unboxNonDouble(src.valueReg(), dest); + } + void unboxNonDouble(Address src, Register dest) { + loadPtr(src, dest); + unboxNonDouble(dest, dest); + } + + void unboxNonDouble(Register src, Register dest) { + And(ARMRegister(dest, 64), ARMRegister(src, 64), Operand((1ULL << JSVAL_TAG_SHIFT) - 1ULL)); + } + + void unboxPrivate(const ValueOperand& src, Register dest) { + ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1); + } + + void notBoolean(const ValueOperand& val) { + ARMRegister r(val.valueReg(), 64); + eor(r, r, Operand(1)); + } + void unboxObject(const ValueOperand& src, Register dest) { + unboxNonDouble(src.valueReg(), dest); + } + void unboxObject(Register src, Register dest) { + unboxNonDouble(src, dest); + } + void unboxObject(const Address& src, Register dest) { + loadPtr(src, dest); + unboxNonDouble(dest, dest); + } + void unboxObject(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x); + unboxNonDouble(dest, dest); + } + + inline void unboxValue(const ValueOperand& src, AnyRegister dest); + + void unboxString(const ValueOperand& operand, Register dest) { + unboxNonDouble(operand, dest); + } + void unboxString(const Address& src, Register dest) { + unboxNonDouble(src, dest); + } + void unboxSymbol(const ValueOperand& operand, Register dest) { + unboxNonDouble(operand, dest); + } + void unboxSymbol(const Address& src, Register dest) { + unboxNonDouble(src, dest); + } + // These two functions use the low 32-bits of the full value register. + void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToDouble(operand.valueReg(), dest); + } + void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToDouble(operand.valueReg(), dest); + } + + void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToFloat32(operand.valueReg(), dest); + } + void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToFloat32(operand.valueReg(), dest); + } + + void loadConstantDouble(wasm::RawF64 d, FloatRegister dest) { + loadConstantDouble(d.fp(), dest); + } + void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest) { + loadConstantFloat32(f.fp(), dest); + } + void loadConstantDouble(double d, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 64), d); + } + void loadConstantFloat32(float f, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 32), f); + } + + // Register-based tests. + Condition testUndefined(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED)); + return cond; + } + Condition testInt32(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_INT32)); + return cond; + } + Condition testBoolean(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN)); + return cond; + } + Condition testNull(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_NULL)); + return cond; + } + Condition testString(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_STRING)); + return cond; + } + Condition testSymbol(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL)); + return cond; + } + Condition testObject(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_OBJECT)); + return cond; + } + Condition testDouble(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE)); + return (cond == Equal) ? BelowOrEqual : Above; + } + Condition testNumber(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET)); + return (cond == Equal) ? BelowOrEqual : Above; + } + Condition testGCThing(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); + return (cond == Equal) ? AboveOrEqual : Below; + } + Condition testMagic(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_MAGIC)); + return cond; + } + Condition testPrimitive(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); + return (cond == Equal) ? Below : AboveOrEqual; + } + Condition testError(Condition cond, Register tag) { + return testMagic(cond, tag); + } + + // ValueOperand-based tests. + Condition testInt32(Condition cond, const ValueOperand& value) { + // The incoming ValueOperand may use scratch registers. + vixl::UseScratchRegisterScope temps(this); + + if (value.valueReg() == ScratchReg2) { + MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); + MOZ_ASSERT(!temps.IsAvailable(ScratchReg2_64)); + temps.Exclude(ScratchReg64); + + if (cond != Equal && cond != NotEqual) + MOZ_CRASH("NYI: non-equality comparisons"); + + // In the event that the tag is not encodable in a single cmp / teq instruction, + // perform the xor that teq would use, this will leave the tag bits being + // zero, or non-zero, which can be tested with either and or shift. + unsigned int n, imm_r, imm_s; + uint64_t immediate = uint64_t(ImmTag(JSVAL_TAG_INT32).value) << JSVAL_TAG_SHIFT; + if (IsImmLogical(immediate, 64, &n, &imm_s, &imm_r)) { + Eor(ScratchReg64, ScratchReg2_64, Operand(immediate)); + } else { + Mov(ScratchReg64, immediate); + Eor(ScratchReg64, ScratchReg2_64, ScratchReg64); + } + Tst(ScratchReg64, Operand((unsigned long long)(-1ll) << JSVAL_TAG_SHIFT)); + return cond; + } + + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != value.valueReg()); + + splitTag(value, scratch); + return testInt32(cond, scratch); + } + Condition testBoolean(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testBoolean(cond, scratch); + } + Condition testDouble(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testDouble(cond, scratch); + } + Condition testNull(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testNull(cond, scratch); + } + Condition testUndefined(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testUndefined(cond, scratch); + } + Condition testString(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testSymbol(cond, scratch); + } + Condition testObject(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testObject(cond, scratch); + } + Condition testNumber(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testNumber(cond, scratch); + } + Condition testPrimitive(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testPrimitive(cond, scratch); + } + Condition testMagic(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testMagic(cond, scratch); + } + Condition testError(Condition cond, const ValueOperand& value) { + return testMagic(cond, value); + } + + // Address-based tests. + Condition testGCThing(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testGCThing(cond, scratch); + } + Condition testMagic(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testMagic(cond, scratch); + } + Condition testInt32(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testInt32(cond, scratch); + } + Condition testDouble(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testDouble(cond, scratch); + } + Condition testBoolean(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testBoolean(cond, scratch); + } + Condition testNull(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testNull(cond, scratch); + } + Condition testUndefined(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testUndefined(cond, scratch); + } + Condition testString(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testSymbol(cond, scratch); + } + Condition testObject(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testObject(cond, scratch); + } + Condition testNumber(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testNumber(cond, scratch); + } + + // BaseIndex-based tests. + Condition testUndefined(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testUndefined(cond, scratch); + } + Condition testNull(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testNull(cond, scratch); + } + Condition testBoolean(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testBoolean(cond, scratch); + } + Condition testString(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testSymbol(cond, scratch); + } + Condition testInt32(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testInt32(cond, scratch); + } + Condition testObject(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testObject(cond, scratch); + } + Condition testDouble(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testDouble(cond, scratch); + } + Condition testMagic(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testMagic(cond, scratch); + } + Condition testGCThing(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testGCThing(cond, scratch); + } + + Condition testInt32Truthy(bool truthy, const ValueOperand& operand) { + ARMRegister payload32(operand.valueReg(), 32); + Tst(payload32, payload32); + return truthy ? NonZero : Zero; + } + + Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) { + ARMRegister payload32(operand.valueReg(), 32); + Tst(payload32, payload32); + return truthy ? NonZero : Zero; + } + Condition testStringTruthy(bool truthy, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + const ARMRegister scratch32(scratch, 32); + const ARMRegister scratch64(scratch, 64); + + MOZ_ASSERT(value.valueReg() != scratch); + + unboxString(value, scratch); + Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength())); + Cmp(scratch32, Operand(0)); + return truthy ? Condition::NonZero : Condition::Zero; + } + void int32OrDouble(Register src, ARMFPRegister dest) { + Label isInt32; + Label join; + testInt32(Equal, ValueOperand(src)); + B(&isInt32, Equal); + // is double, move teh bits as is + Fmov(dest, ARMRegister(src, 64)); + B(&join); + bind(&isInt32); + // is int32, do a conversion while moving + Scvtf(dest, ARMRegister(src, 32)); + bind(&join); + } + void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) { + if (dest.isFloat()) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + Ldr(scratch64, toMemOperand(address)); + int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); + } else if (type == MIRType::Int32 || type == MIRType::Boolean) { + load32(address, dest.gpr()); + } else { + loadPtr(address, dest.gpr()); + unboxNonDouble(dest.gpr(), dest.gpr()); + } + } + + void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) { + if (dest.isFloat()) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + MOZ_ASSERT(scratch64.asUnsized() != address.index); + doBaseIndex(scratch64, address, vixl::LDR_x); + int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); + } else if (type == MIRType::Int32 || type == MIRType::Boolean) { + load32(address, dest.gpr()); + } else { + loadPtr(address, dest.gpr()); + unboxNonDouble(dest.gpr(), dest.gpr()); + } + } + + void loadInstructionPointerAfterCall(Register dest) { + MOZ_CRASH("loadInstructionPointerAfterCall"); + } + + // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). + CodeOffset toggledJump(Label* label) { + BufferOffset offset = b(label, Always); + CodeOffset ret(offset.getOffset()); + return ret; + } + + // load: offset to the load instruction obtained by movePatchablePtr(). + void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) { + if (ptr.value) + dataRelocations_.writeUnsigned(load.getOffset()); + } + void writeDataRelocation(const Value& val, BufferOffset load) { + if (val.isMarkable()) { + gc::Cell* cell = val.toMarkablePointer(); + if (cell && gc::IsInsideNursery(cell)) + embedsNurseryPointers_ = true; + dataRelocations_.writeUnsigned(load.getOffset()); + } + } + + void writePrebarrierOffset(CodeOffset label) { + preBarriers_.writeUnsigned(label.offset()); + } + + void computeEffectiveAddress(const Address& address, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset)); + } + void computeEffectiveAddress(const BaseIndex& address, Register dest) { + ARMRegister dest64(dest, 64); + ARMRegister base64(address.base, 64); + ARMRegister index64(address.index, 64); + + Add(dest64, base64, Operand(index64, vixl::LSL, address.scale)); + if (address.offset) + Add(dest64, dest64, Operand(address.offset)); + } + + public: + CodeOffset labelForPatch() { + return CodeOffset(nextOffset().getOffset()); + } + + void handleFailureWithHandlerTail(void* handler); + + void profilerEnterFrame(Register framePtr, Register scratch) { + AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation()); + loadPtr(activation, scratch); + storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame())); + storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite())); + } + void profilerExitFrame() { + branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); + } + Address ToPayload(Address value) { + return value; + } + Address ToType(Address value) { + return value; + } + + private: + template <typename T> + void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval, + Register newval, Register output) + { + MOZ_CRASH("compareExchange"); + } + + template <typename T> + void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value, + const T& address, Register temp, Register output) + { + MOZ_CRASH("atomicFetchOp"); + } + + template <typename T> + void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value, + const T& address, Register temp, Register output) + { + MOZ_CRASH("atomicFetchOp"); + } + + template <typename T> + void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& mem) { + MOZ_CRASH("atomicEffectOp"); + } + + template <typename T> + void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& mem) { + MOZ_CRASH("atomicEffectOp"); + } + + public: + // T in {Address,BaseIndex} + // S in {Imm32,Register} + + template <typename T> + void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(1, true, mem, oldval, newval, output); + } + template <typename T> + void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(1, false, mem, oldval, newval, output); + } + template <typename T> + void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(2, true, mem, oldval, newval, output); + } + template <typename T> + void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(2, false, mem, oldval, newval, output); + } + template <typename T> + void compareExchange32(const T& mem, Register oldval, Register newval, Register output) { + compareExchange(4, false, mem, oldval, newval, output); + } + template <typename T> + void atomicExchange32(const T& mem, Register value, Register output) { + MOZ_CRASH("atomicExchang32"); + } + + template <typename T> + void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) { + MOZ_CRASH("atomicExchange8ZeroExtend"); + } + template <typename T> + void atomicExchange8SignExtend(const T& mem, Register value, Register output) { + MOZ_CRASH("atomicExchange8SignExtend"); + } + + template <typename T, typename S> + void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output); + } + + template <typename T, typename S> + void atomicAdd8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchAddOp, value, mem); + } + template <typename T, typename S> + void atomicAdd16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchAddOp, value, mem); + } + template <typename T, typename S> + void atomicAdd32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchAddOp, value, mem); + } + + template <typename T> + void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) { + MOZ_CRASH("atomicExchange16ZeroExtend"); + } + template <typename T> + void atomicExchange16SignExtend(const T& mem, Register value, Register output) { + MOZ_CRASH("atomicExchange16SignExtend"); + } + + template <typename T, typename S> + void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output); + } + + template <typename T, typename S> + void atomicSub8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchSubOp, value, mem); + } + template <typename T, typename S> + void atomicSub16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchSubOp, value, mem); + } + template <typename T, typename S> + void atomicSub32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchSubOp, value, mem); + } + + template <typename T, typename S> + void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output); + } + + template <typename T, typename S> + void atomicAnd8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchAndOp, value, mem); + } + template <typename T, typename S> + void atomicAnd16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchAndOp, value, mem); + } + template <typename T, typename S> + void atomicAnd32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchAndOp, value, mem); + } + + template <typename T, typename S> + void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output); + } + + template <typename T, typename S> + void atomicOr8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchOrOp, value, mem); + } + template <typename T, typename S> + void atomicOr16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchOrOp, value, mem); + } + template <typename T, typename S> + void atomicOr32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchOrOp, value, mem); + } + + template <typename T, typename S> + void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output); + } + template <typename T, typename S> + void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output); + } + + template <typename T, typename S> + void atomicXor8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchXorOp, value, mem); + } + template <typename T, typename S> + void atomicXor16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchXorOp, value, mem); + } + template <typename T, typename S> + void atomicXor32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchXorOp, value, mem); + } + + template<typename T> + void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval, + Register temp, AnyRegister output); + + template<typename T> + void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value, + Register temp, AnyRegister output); + + // Emit a BLR or NOP instruction. ToggleCall can be used to patch + // this instruction. + CodeOffset toggledCall(JitCode* target, bool enabled) { + // The returned offset must be to the first instruction generated, + // for the debugger to match offset with Baseline's pcMappingEntries_. + BufferOffset offset = nextOffset(); + + syncStackPtr(); + + BufferOffset loadOffset; + { + vixl::UseScratchRegisterScope temps(this); + + // The register used for the load is hardcoded, so that ToggleCall + // can patch in the branch instruction easily. This could be changed, + // but then ToggleCall must read the target register from the load. + MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64)); + temps.Exclude(ScratchReg2_64); + + loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw())); + + if (enabled) + blr(ScratchReg2_64); + else + nop(); + } + + addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE); + CodeOffset ret(offset.getOffset()); + return ret; + } + + static size_t ToggledCallSize(uint8_t* code) { + static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28 + + // start it off as an 8 byte sequence + int ret = 8; + Instruction* cur = (Instruction*)code; + uint32_t* curw = (uint32_t*)code; + + if (*curw == syncStackInstruction) { + ret += 4; + cur += 4; + } + + if (cur->IsUncondB()) + ret += cur->ImmPCRawOffset() << vixl::kInstructionSizeLog2; + + return ret; + } + + void checkARMRegAlignment(const ARMRegister& reg) { +#ifdef DEBUG + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != reg.asUnsized()); + Label aligned; + Mov(scratch64, reg); + Tst(scratch64, Operand(StackAlignment - 1)); + B(Zero, &aligned); + breakpoint(); + bind(&aligned); + Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity. +#endif + } + + void checkStackAlignment() { +#ifdef DEBUG + checkARMRegAlignment(GetStackPointer64()); + + // If another register is being used to track pushes, check sp explicitly. + if (!GetStackPointer64().Is(vixl::sp)) + checkARMRegAlignment(vixl::sp); +#endif + } + + void abiret() { + syncStackPtr(); // SP is always used to transmit the stack between calls. + vixl::MacroAssembler::Ret(vixl::lr); + } + + bool convertUInt64ToDoubleNeedsTemp() { + return false; + } + + void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp) { + MOZ_ASSERT(temp == Register::Invalid()); + Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64)); + } + + void clampCheck(Register r, Label* handleNotAnInt) { + MOZ_CRASH("clampCheck"); + } + + void stackCheck(ImmWord limitAddr, Label* label) { + MOZ_CRASH("stackCheck"); + } + + void incrementInt32Value(const Address& addr) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != addr.base); + + load32(addr, scratch32.asUnsized()); + Add(scratch32, scratch32, Operand(1)); + store32(scratch32.asUnsized(), addr); + } + + void BoundsCheck(Register ptrReg, Label* onFail, vixl::CPURegister zeroMe = vixl::NoReg) { + // use tst rather than Tst to *ensure* that a single instrution is generated. + Cmp(ARMRegister(ptrReg, 32), ARMRegister(HeapLenReg, 32)); + if (!zeroMe.IsNone()) { + if (zeroMe.IsRegister()) { + Csel(ARMRegister(zeroMe), + ARMRegister(zeroMe), + Operand(zeroMe.Is32Bits() ? vixl::wzr : vixl::xzr), + Assembler::Below); + } else if (zeroMe.Is32Bits()) { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratchFloat = temps.AcquireS(); + Fmov(scratchFloat, JS::GenericNaN()); + Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchFloat, Assembler::Below); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratchDouble = temps.AcquireD(); + Fmov(scratchDouble, JS::GenericNaN()); + Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchDouble, Assembler::Below); + } + } + B(onFail, Assembler::AboveOrEqual); + } + void breakpoint(); + + // Emits a simulator directive to save the current sp on an internal stack. + void simulatorMarkSP() { +#ifdef JS_SIMULATOR_ARM64 + svc(vixl::kMarkStackPointer); +#endif + } + + // Emits a simulator directive to pop from its internal stack + // and assert that the value is equal to the current sp. + void simulatorCheckSP() { +#ifdef JS_SIMULATOR_ARM64 + svc(vixl::kCheckStackPointer); +#endif + } + + void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) { + loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest); + } + void loadWasmPinnedRegsFromTls() { + loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg); + loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg); + adds32(Imm32(WasmGlobalRegBias), GlobalReg); + } + + // Overwrites the payload bits of a dest register containing a Value. + void movePayload(Register src, Register dest) { + // Bfxil cannot be used with the zero register as a source. + if (src == rzr) + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK))); + else + Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT); + } + + // FIXME: Should be in Assembler? + // FIXME: Should be const? + uint32_t currentOffset() const { + return nextOffset().getOffset(); + } + + struct AutoPrepareForPatching { + explicit AutoPrepareForPatching(MacroAssemblerCompat&) {} + }; + + protected: + bool buildOOLFakeExitFrame(void* fakeReturnAddr) { + uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS, + ExitFrameLayout::Size()); + Push(Imm32(descriptor)); + Push(ImmPtr(fakeReturnAddr)); + return true; + } +}; + +typedef MacroAssemblerCompat MacroAssemblerSpecific; + +} // namespace jit +} // namespace js + +#endif // jit_arm64_MacroAssembler_arm64_h diff --git a/js/src/jit/arm64/MoveEmitter-arm64.cpp b/js/src/jit/arm64/MoveEmitter-arm64.cpp new file mode 100644 index 000000000..f8e237683 --- /dev/null +++ b/js/src/jit/arm64/MoveEmitter-arm64.cpp @@ -0,0 +1,300 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/MoveEmitter-arm64.h" +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +MemOperand +MoveEmitterARM64::toMemOperand(const MoveOperand& operand) const +{ + MOZ_ASSERT(operand.isMemory()); + ARMRegister base(operand.base(), 64); + if (operand.base() == masm.getStackPointer()) + return MemOperand(base, operand.disp() + (masm.framePushed() - pushedAtStart_)); + return MemOperand(base, operand.disp()); +} + +void +MoveEmitterARM64::emit(const MoveResolver& moves) +{ + if (moves.numCycles()) { + masm.reserveStack(sizeof(void*)); + pushedAtCycle_ = masm.framePushed(); + } + + for (size_t i = 0; i < moves.numMoves(); i++) + emitMove(moves.getMove(i)); +} + +void +MoveEmitterARM64::finish() +{ + assertDone(); + masm.freeStack(masm.framePushed() - pushedAtStart_); + MOZ_ASSERT(masm.framePushed() == pushedAtStart_); +} + +void +MoveEmitterARM64::emitMove(const MoveOp& move) +{ + const MoveOperand& from = move.from(); + const MoveOperand& to = move.to(); + + if (move.isCycleBegin()) { + MOZ_ASSERT(!inCycle_ && !move.isCycleEnd()); + breakCycle(from, to, move.endCycleType()); + inCycle_ = true; + } else if (move.isCycleEnd()) { + MOZ_ASSERT(inCycle_); + completeCycle(from, to, move.type()); + inCycle_ = false; + return; + } + + switch (move.type()) { + case MoveOp::FLOAT32: + emitFloat32Move(from, to); + break; + case MoveOp::DOUBLE: + emitDoubleMove(from, to); + break; + case MoveOp::INT32: + emitInt32Move(from, to); + break; + case MoveOp::GENERAL: + emitGeneralMove(from, to); + break; + default: + MOZ_CRASH("Unexpected move type"); + } +} + +void +MoveEmitterARM64::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) +{ + if (from.isFloatReg()) { + if (to.isFloatReg()) + masm.Fmov(toFPReg(to, MoveOp::FLOAT32), toFPReg(from, MoveOp::FLOAT32)); + else + masm.Str(toFPReg(from, MoveOp::FLOAT32), toMemOperand(to)); + return; + } + + if (to.isFloatReg()) { + masm.Ldr(toFPReg(to, MoveOp::FLOAT32), toMemOperand(from)); + return; + } + + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch32 = temps.AcquireS(); + masm.Ldr(scratch32, toMemOperand(from)); + masm.Str(scratch32, toMemOperand(to)); +} + +void +MoveEmitterARM64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) +{ + if (from.isFloatReg()) { + if (to.isFloatReg()) + masm.Fmov(toFPReg(to, MoveOp::DOUBLE), toFPReg(from, MoveOp::DOUBLE)); + else + masm.Str(toFPReg(from, MoveOp::DOUBLE), toMemOperand(to)); + return; + } + + if (to.isFloatReg()) { + masm.Ldr(toFPReg(to, MoveOp::DOUBLE), toMemOperand(from)); + return; + } + + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch = temps.AcquireD(); + masm.Ldr(scratch, toMemOperand(from)); + masm.Str(scratch, toMemOperand(to)); +} + +void +MoveEmitterARM64::emitInt32Move(const MoveOperand& from, const MoveOperand& to) +{ + if (from.isGeneralReg()) { + if (to.isGeneralReg()) + masm.Mov(toARMReg32(to), toARMReg32(from)); + else + masm.Str(toARMReg32(from), toMemOperand(to)); + return; + } + + if (to.isGeneralReg()) { + masm.Ldr(toARMReg32(to), toMemOperand(from)); + return; + } + + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch32 = temps.AcquireW(); + masm.Ldr(scratch32, toMemOperand(from)); + masm.Str(scratch32, toMemOperand(to)); +} + +void +MoveEmitterARM64::emitGeneralMove(const MoveOperand& from, const MoveOperand& to) +{ + if (from.isGeneralReg()) { + MOZ_ASSERT(to.isGeneralReg() || to.isMemory()); + if (to.isGeneralReg()) + masm.Mov(toARMReg64(to), toARMReg64(from)); + else + masm.Str(toARMReg64(from), toMemOperand(to)); + return; + } + + // {Memory OR EffectiveAddress} -> Register move. + if (to.isGeneralReg()) { + MOZ_ASSERT(from.isMemoryOrEffectiveAddress()); + if (from.isMemory()) + masm.Ldr(toARMReg64(to), toMemOperand(from)); + else + masm.Add(toARMReg64(to), toARMReg64(from), Operand(from.disp())); + return; + } + + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + + // Memory -> Memory move. + if (from.isMemory()) { + MOZ_ASSERT(to.isMemory()); + masm.Ldr(scratch64, toMemOperand(from)); + masm.Str(scratch64, toMemOperand(to)); + return; + } + + // EffectiveAddress -> Memory move. + MOZ_ASSERT(from.isEffectiveAddress()); + MOZ_ASSERT(to.isMemory()); + masm.Add(scratch64, toARMReg64(from), Operand(from.disp())); + masm.Str(scratch64, toMemOperand(to)); +} + +MemOperand +MoveEmitterARM64::cycleSlot() +{ + // Using SP as stack pointer requires alignment preservation below. + MOZ_ASSERT(!masm.GetStackPointer64().Is(sp)); + + // emit() already allocated a slot for resolving the cycle. + MOZ_ASSERT(pushedAtCycle_ != -1); + + return MemOperand(masm.GetStackPointer64(), masm.framePushed() - pushedAtCycle_); +} + +void +MoveEmitterARM64::breakCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type) +{ + switch (type) { + case MoveOp::FLOAT32: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch32 = temps.AcquireS(); + masm.Ldr(scratch32, toMemOperand(to)); + masm.Str(scratch32, cycleSlot()); + } else { + masm.Str(toFPReg(to, type), cycleSlot()); + } + break; + + case MoveOp::DOUBLE: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch64 = temps.AcquireD(); + masm.Ldr(scratch64, toMemOperand(to)); + masm.Str(scratch64, cycleSlot()); + } else { + masm.Str(toFPReg(to, type), cycleSlot()); + } + break; + + case MoveOp::INT32: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch32 = temps.AcquireW(); + masm.Ldr(scratch32, toMemOperand(to)); + masm.Str(scratch32, cycleSlot()); + } else { + masm.Str(toARMReg32(to), cycleSlot()); + } + break; + + case MoveOp::GENERAL: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + masm.Ldr(scratch64, toMemOperand(to)); + masm.Str(scratch64, cycleSlot()); + } else { + masm.Str(toARMReg64(to), cycleSlot()); + } + break; + + default: + MOZ_CRASH("Unexpected move type"); + } +} + +void +MoveEmitterARM64::completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type) +{ + switch (type) { + case MoveOp::FLOAT32: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch32 = temps.AcquireS(); + masm.Ldr(scratch32, cycleSlot()); + masm.Str(scratch32, toMemOperand(to)); + } else { + masm.Ldr(toFPReg(to, type), cycleSlot()); + } + break; + + case MoveOp::DOUBLE: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMFPRegister scratch = temps.AcquireD(); + masm.Ldr(scratch, cycleSlot()); + masm.Str(scratch, toMemOperand(to)); + } else { + masm.Ldr(toFPReg(to, type), cycleSlot()); + } + break; + + case MoveOp::INT32: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch32 = temps.AcquireW(); + masm.Ldr(scratch32, cycleSlot()); + masm.Str(scratch32, toMemOperand(to)); + } else { + masm.Ldr(toARMReg64(to), cycleSlot()); + } + break; + + case MoveOp::GENERAL: + if (to.isMemory()) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + masm.Ldr(scratch64, cycleSlot()); + masm.Str(scratch64, toMemOperand(to)); + } else { + masm.Ldr(toARMReg64(to), cycleSlot()); + } + break; + + default: + MOZ_CRASH("Unexpected move type"); + } +} diff --git a/js/src/jit/arm64/MoveEmitter-arm64.h b/js/src/jit/arm64/MoveEmitter-arm64.h new file mode 100644 index 000000000..09f96315f --- /dev/null +++ b/js/src/jit/arm64/MoveEmitter-arm64.h @@ -0,0 +1,86 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_MoveEmitter_arm64_h +#define jit_arm64_MoveEmitter_arm64_h + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/MacroAssembler.h" +#include "jit/MoveResolver.h" + +namespace js { +namespace jit { + +class CodeGenerator; + +class MoveEmitterARM64 +{ + bool inCycle_; + MacroAssembler& masm; + + // Original stack push value. + uint32_t pushedAtStart_; + + // These store stack offsets to spill locations, snapshotting + // codegen->framePushed_ at the time they were allocated. They are -1 if no + // stack space has been allocated for that particular spill. + int32_t pushedAtCycle_; + int32_t pushedAtSpill_; + + void assertDone() { + MOZ_ASSERT(!inCycle_); + } + + MemOperand cycleSlot(); + MemOperand toMemOperand(const MoveOperand& operand) const; + ARMRegister toARMReg32(const MoveOperand& operand) const { + MOZ_ASSERT(operand.isGeneralReg()); + return ARMRegister(operand.reg(), 32); + } + ARMRegister toARMReg64(const MoveOperand& operand) const { + if (operand.isGeneralReg()) + return ARMRegister(operand.reg(), 64); + else + return ARMRegister(operand.base(), 64); + } + ARMFPRegister toFPReg(const MoveOperand& operand, MoveOp::Type t) const { + MOZ_ASSERT(operand.isFloatReg()); + return ARMFPRegister(operand.floatReg().encoding(), t == MoveOp::FLOAT32 ? 32 : 64); + } + + void emitFloat32Move(const MoveOperand& from, const MoveOperand& to); + void emitDoubleMove(const MoveOperand& from, const MoveOperand& to); + void emitInt32Move(const MoveOperand& from, const MoveOperand& to); + void emitGeneralMove(const MoveOperand& from, const MoveOperand& to); + + void emitMove(const MoveOp& move); + void breakCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type); + void completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type); + + public: + MoveEmitterARM64(MacroAssembler& masm) + : inCycle_(false), + masm(masm), + pushedAtStart_(masm.framePushed()), + pushedAtCycle_(-1), + pushedAtSpill_(-1) + { } + + ~MoveEmitterARM64() { + assertDone(); + } + + void emit(const MoveResolver& moves); + void finish(); + void setScratchRegister(Register reg) {} +}; + +typedef MoveEmitterARM64 MoveEmitter; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_MoveEmitter_arm64_h */ diff --git a/js/src/jit/arm64/SharedIC-arm64.cpp b/js/src/jit/arm64/SharedIC-arm64.cpp new file mode 100644 index 000000000..0c7aa1364 --- /dev/null +++ b/js/src/jit/arm64/SharedIC-arm64.cpp @@ -0,0 +1,219 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/SharedIC.h" +#include "jit/SharedICHelpers.h" + +#ifdef JS_SIMULATOR_ARM64 +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/BaselineCompiler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#endif + + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +// ICBinaryArith_Int32 + +bool +ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Add R0 and R1. Don't need to explicitly unbox, just use R2. + Register Rscratch = R2_; + ARMRegister Wscratch = ARMRegister(Rscratch, 32); +#ifdef MERGE + // DIV and MOD need an extra non-volatile ValueOperand to hold R0. + AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2)); + savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs); +#endif + // get some more ARM-y names for the registers + ARMRegister W0(R0_, 32); + ARMRegister X0(R0_, 64); + ARMRegister W1(R1_, 32); + ARMRegister X1(R1_, 64); + ARMRegister WTemp(ExtractTemp0, 32); + ARMRegister XTemp(ExtractTemp0, 64); + Label maybeNegZero, revertRegister; + switch(op_) { + case JSOP_ADD: + masm.Adds(WTemp, W0, Operand(W1)); + + // Just jump to failure on overflow. R0 and R1 are preserved, so we can + // just jump to the next stub. + masm.j(Assembler::Overflow, &failure); + + // Box the result and return. We know R0 already contains the + // integer tag, so we just need to move the payload into place. + masm.movePayload(ExtractTemp0, R0_); + break; + + case JSOP_SUB: + masm.Subs(WTemp, W0, Operand(W1)); + masm.j(Assembler::Overflow, &failure); + masm.movePayload(ExtractTemp0, R0_); + break; + + case JSOP_MUL: + masm.mul32(R0.valueReg(), R1.valueReg(), Rscratch, &failure, &maybeNegZero); + masm.movePayload(Rscratch, R0_); + break; + + case JSOP_DIV: + case JSOP_MOD: { + + // Check for INT_MIN / -1, it results in a double. + Label check2; + masm.Cmp(W0, Operand(INT_MIN)); + masm.B(&check2, Assembler::NotEqual); + masm.Cmp(W1, Operand(-1)); + masm.j(Assembler::Equal, &failure); + masm.bind(&check2); + Label no_fail; + // Check for both division by zero and 0 / X with X < 0 (results in -0). + masm.Cmp(W1, Operand(0)); + // If x > 0, then it can't be bad. + masm.B(&no_fail, Assembler::GreaterThan); + // if x == 0, then ignore any comparison, and force + // it to fail, if x < 0 (the only other case) + // then do the comparison, and fail if y == 0 + masm.Ccmp(W0, Operand(0), vixl::ZFlag, Assembler::NotEqual); + masm.B(&failure, Assembler::Equal); + masm.bind(&no_fail); + masm.Sdiv(Wscratch, W0, W1); + // Start calculating the remainder, x - (x / y) * y. + masm.mul(WTemp, W1, Wscratch); + if (op_ == JSOP_DIV) { + // Result is a double if the remainder != 0, which happens + // when (x/y)*y != x. + masm.branch32(Assembler::NotEqual, R0.valueReg(), ExtractTemp0, &revertRegister); + masm.movePayload(Rscratch, R0_); + } else { + // Calculate the actual mod. Set the condition code, so we can see if it is non-zero. + masm.Subs(WTemp, W0, WTemp); + + // If X % Y == 0 and X < 0, the result is -0. + masm.Ccmp(W0, Operand(0), vixl::NoFlag, Assembler::Equal); + masm.branch(Assembler::LessThan, &revertRegister); + masm.movePayload(ExtractTemp0, R0_); + } + break; + } + // ORR, EOR, AND can trivially be coerced int + // working without affecting the tag of the dest.. + case JSOP_BITOR: + masm.Orr(X0, X0, Operand(X1)); + break; + case JSOP_BITXOR: + masm.Eor(X0, X0, Operand(W1, vixl::UXTW)); + break; + case JSOP_BITAND: + masm.And(X0, X0, Operand(X1)); + break; + // LSH, RSH and URSH can not. + case JSOP_LSH: + // ARM will happily try to shift by more than 0x1f. + masm.Lsl(Wscratch, W0, W1); + masm.movePayload(Rscratch, R0.valueReg()); + break; + case JSOP_RSH: + masm.Asr(Wscratch, W0, W1); + masm.movePayload(Rscratch, R0.valueReg()); + break; + case JSOP_URSH: + masm.Lsr(Wscratch, W0, W1); + if (allowDouble_) { + Label toUint; + // Testing for negative is equivalent to testing bit 31 + masm.Tbnz(Wscratch, 31, &toUint); + // Move result and box for return. + masm.movePayload(Rscratch, R0_); + EmitReturnFromIC(masm); + + masm.bind(&toUint); + masm.convertUInt32ToDouble(Rscratch, ScratchDoubleReg); + masm.boxDouble(ScratchDoubleReg, R0); + } else { + // Testing for negative is equivalent to testing bit 31 + masm.Tbnz(Wscratch, 31, &failure); + // Move result for return. + masm.movePayload(Rscratch, R0_); + } + break; + default: + MOZ_CRASH("Unhandled op for BinaryArith_Int32."); + } + + EmitReturnFromIC(masm); + + switch (op_) { + case JSOP_MUL: + masm.bind(&maybeNegZero); + + // Result is -0 if exactly one of lhs or rhs is negative. + masm.Cmn(W0, W1); + masm.j(Assembler::Signed, &failure); + + // Result is +0, so use the zero register. + masm.movePayload(rzr, R0_); + EmitReturnFromIC(masm); + break; + case JSOP_DIV: + case JSOP_MOD: + masm.bind(&revertRegister); + break; + default: + break; + } + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +bool +ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + + switch (op) { + case JSOP_BITNOT: + masm.Mvn(ARMRegister(R1.valueReg(), 32), ARMRegister(R0.valueReg(), 32)); + masm.movePayload(R1.valueReg(), R0.valueReg()); + break; + case JSOP_NEG: + // Guard against 0 and MIN_INT, both result in a double. + masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure); + + // Compile -x as 0 - x. + masm.Sub(ARMRegister(R1.valueReg(), 32), wzr, ARMRegister(R0.valueReg(), 32)); + masm.movePayload(R1.valueReg(), R0.valueReg()); + break; + default: + MOZ_CRASH("Unexpected op"); + } + + EmitReturnFromIC(masm); + + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; + +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/SharedICHelpers-arm64.h b/js/src/jit/arm64/SharedICHelpers-arm64.h new file mode 100644 index 000000000..b97129e65 --- /dev/null +++ b/js/src/jit/arm64/SharedICHelpers-arm64.h @@ -0,0 +1,337 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_SharedICHelpers_arm64_h +#define jit_arm64_SharedICHelpers_arm64_h + +#include "jit/BaselineFrame.h" +#include "jit/BaselineIC.h" +#include "jit/MacroAssembler.h" +#include "jit/SharedICRegisters.h" + +namespace js { +namespace jit { + +// Distance from sp to the top Value inside an IC stub (no return address on the stack on ARM). +static const size_t ICStackValueOffset = 0; + +inline void +EmitRestoreTailCallReg(MacroAssembler& masm) +{ + // No-op on ARM because link register is always holding the return address. +} + +inline void +EmitRepushTailCallReg(MacroAssembler& masm) +{ + // No-op on ARM because link register is always holding the return address. +} + +inline void +EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm) +{ + // Move ICEntry offset into ICStubReg + CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg); + *patchOffset = offset; + + // Load stub pointer into ICStubReg + masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry. + // R2 won't be active when we call ICs, so we can use r0. + MOZ_ASSERT(R2 == ValueOperand(r0)); + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Call the stubcode via a direct branch-and-link. + masm.Blr(x0); +} + +inline void +EmitEnterTypeMonitorIC(MacroAssembler& masm, + size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub()) +{ + // This is expected to be called from within an IC, when ICStubReg is + // properly initialized to point to the stub. + masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry. + // R2 won't be active when we call ICs, so we can use r0. + MOZ_ASSERT(R2 == ValueOperand(r0)); + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Jump to the stubcode. + masm.Br(x0); +} + +inline void +EmitReturnFromIC(MacroAssembler& masm) +{ + masm.abiret(); // Defaults to lr. +} + +inline void +EmitChangeICReturnAddress(MacroAssembler& masm, Register reg) +{ + masm.movePtr(reg, lr); +} + +inline void +EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize) +{ + // We assume that R0 has been pushed, and R2 is unused. + MOZ_ASSERT(R2 == ValueOperand(r0)); + + // Compute frame size into w0. Used below in makeFrameDescriptor(). + masm.Sub(x0, BaselineFrameReg64, masm.GetStackPointer64()); + masm.Add(w0, w0, Operand(BaselineFrame::FramePointerOffset)); + + // Store frame size without VMFunction arguments for GC marking. + { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch32 = temps.AcquireW(); + + masm.Sub(scratch32, w0, Operand(argSize)); + masm.store32(scratch32.asUnsized(), + Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + } + + // Push frame descriptor (minus the return address) and perform the tail call. + MOZ_ASSERT(ICTailCallReg == lr); + masm.makeFrameDescriptor(r0, JitFrame_BaselineJS, ExitFrameLayout::Size()); + masm.push(r0); + + // The return address will be pushed by the VM wrapper, for compatibility + // with direct calls. Refer to the top of generateVMWrapper(). + // ICTailCallReg (lr) already contains the return address (as we keep + // it there through the stub calls). + + masm.branch(target); +} + +inline void +EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize) +{ + MOZ_CRASH("Not implemented yet."); +} + +inline void +EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) +{ + ARMRegister reg64(reg, 64); + + // Compute stub frame size. + masm.Sub(reg64, masm.GetStackPointer64(), Operand(sizeof(void*) * 2)); + masm.Sub(reg64, BaselineFrameReg64, reg64); + + masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize); +} + +inline void +EmitBaselineCallVM(JitCode* target, MacroAssembler& masm) +{ + EmitBaselineCreateStubFrameDescriptor(masm, r0, ExitFrameLayout::Size()); + masm.push(r0); + masm.call(target); +} + +inline void +EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm) +{ + MOZ_CRASH("Not implemented yet."); +} + +// Size of values pushed by EmitEnterStubFrame. +static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*); +static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*); + +inline void +EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) +{ + MOZ_ASSERT(scratch != ICTailCallReg); + + // Compute frame size. + masm.Add(ARMRegister(scratch, 64), BaselineFrameReg64, Operand(BaselineFrame::FramePointerOffset)); + masm.Sub(ARMRegister(scratch, 64), ARMRegister(scratch, 64), masm.GetStackPointer64()); + + masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + + // Note: when making changes here, don't forget to update STUB_FRAME_SIZE. + + // Push frame descriptor and return address. + // Save old frame pointer, stack pointer, and stub reg. + masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size()); + masm.Push(scratch, ICTailCallReg, ICStubReg, BaselineFrameReg); + + // Update the frame register. + masm.Mov(BaselineFrameReg64, masm.GetStackPointer64()); + + // Stack should remain 16-byte aligned. + masm.checkStackAlignment(); +} + +inline void +EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch) +{ + MOZ_CRASH("Not implemented yet."); +} + +inline void +EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false) +{ + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + + // Ion frames do not save and restore the frame pointer. If we called + // into Ion, we have to restore the stack pointer from the frame descriptor. + // If we performed a VM call, the descriptor has been popped already so + // in that case we use the frame pointer. + if (calledIntoIon) { + masm.pop(scratch64.asUnsized()); + masm.Lsr(scratch64, scratch64, FRAMESIZE_SHIFT); + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), scratch64); + } else { + masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64); + } + + // Pop values, discarding the frame descriptor. + masm.pop(BaselineFrameReg, ICStubReg, ICTailCallReg, scratch64.asUnsized()); + + // Stack should remain 16-byte aligned. + masm.checkStackAlignment(); +} + +inline void +EmitIonLeaveStubFrame(MacroAssembler& masm) +{ + MOZ_CRASH("Not implemented yet."); +} + +inline void +EmitStowICValues(MacroAssembler& masm, int values) +{ + switch (values) { + case 1: + // Stow R0. + masm.Push(R0); + break; + case 2: + // Stow R0 and R1. + masm.Push(R0.valueReg()); + masm.Push(R1.valueReg()); + break; + default: + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values"); + } +} + +inline void +EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false) +{ + MOZ_ASSERT(values >= 0 && values <= 2); + switch (values) { + case 1: + // Unstow R0. + if (discard) + masm.Drop(Operand(sizeof(Value))); + else + masm.popValue(R0); + break; + case 2: + // Unstow R0 and R1. + if (discard) + masm.Drop(Operand(sizeof(Value) * 2)); + else + masm.pop(R1.valueReg(), R0.valueReg()); + break; + default: + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values"); + } + masm.adjustFrame(-values * sizeof(Value)); +} + +inline void +EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset) +{ + // R0 contains the value that needs to be typechecked. + // The object we're updating is a boxed Value on the stack, at offset + // objectOffset from stack top, excluding the return address. + MOZ_ASSERT(R2 == ValueOperand(r0)); + + // Save the current ICStubReg to stack, as well as the TailCallReg, + // since on AArch64, the LR is live. + masm.push(ICStubReg, ICTailCallReg); + + // This is expected to be called from within an IC, when ICStubReg + // is properly initialized to point to the stub. + masm.loadPtr(Address(ICStubReg, (int32_t)ICUpdatedStub::offsetOfFirstUpdateStub()), + ICStubReg); + + // Load stubcode pointer from ICStubReg into ICTailCallReg. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), ICTailCallReg); + + // Call the stubcode. + masm.Blr(ARMRegister(ICTailCallReg, 64)); + + // Restore the old stub reg and tailcall reg. + masm.pop(ICTailCallReg, ICStubReg); + + // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the + // value in R0 type-checked properly or not. + Label success; + masm.cmp32(R1.scratchReg(), Imm32(1)); + masm.j(Assembler::Equal, &success); + + // If the IC failed, then call the update fallback function. + EmitBaselineEnterStubFrame(masm, R1.scratchReg()); + + masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + objectOffset), R1); + masm.Push(R0.valueReg()); + masm.Push(R1.valueReg()); + masm.Push(ICStubReg); + + // Load previous frame pointer, push BaselineFrame*. + masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg()); + masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg()); + + EmitBaselineCallVM(code, masm); + EmitBaselineLeaveStubFrame(masm); + + // Success at end. + masm.bind(&success); +} + +template <typename AddrType> +inline void +EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type) +{ + // On AArch64, lr is clobbered by patchableCallPreBarrier. Save it first. + masm.push(lr); + masm.patchableCallPreBarrier(addr, type); + masm.pop(lr); +} + +inline void +EmitStubGuardFailure(MacroAssembler& masm) +{ + // NOTE: This routine assumes that the stub guard code left the stack in the + // same state it was in when it was entered. + + // BaselineStubEntry points to the current stub. + + // Load next stub into ICStubReg. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry into scratch register. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Return address is already loaded, just jump to the next stubcode. + masm.Br(x0); +} + +} // namespace jit +} // namespace js + +#endif // jit_arm64_SharedICHelpers_arm64_h diff --git a/js/src/jit/arm64/SharedICRegisters-arm64.h b/js/src/jit/arm64/SharedICRegisters-arm64.h new file mode 100644 index 000000000..c78724158 --- /dev/null +++ b/js/src/jit/arm64/SharedICRegisters-arm64.h @@ -0,0 +1,58 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_SharedICRegisters_arm64_h +#define jit_arm64_SharedICRegisters_arm64_h + +#include "jit/MacroAssembler.h" + +namespace js { +namespace jit { + +// Must be a callee-saved register for preservation around generateEnterJIT(). +static constexpr Register BaselineFrameReg = r23; +static constexpr ARMRegister BaselineFrameReg64 = { BaselineFrameReg, 64 }; + +// BaselineStackReg is intentionally undefined on ARM64. +// Refer to the comment next to the definition of RealStackPointer. + +// ValueOperands R0, R1, and R2. +// R0 == JSReturnReg, and R2 uses registers not preserved across calls. +// R1 value should be preserved across calls. +static constexpr Register R0_ = r2; +static constexpr Register R1_ = r19; +static constexpr Register R2_ = r0; + +static constexpr ValueOperand R0(R0_); +static constexpr ValueOperand R1(R1_); +static constexpr ValueOperand R2(R2_); + +// ICTailCallReg and ICStubReg use registers that are not preserved across calls. +static constexpr Register ICTailCallReg = r30; +static constexpr Register ICStubReg = r9; + +// ExtractTemps must be callee-save registers: +// ICSetProp_Native::Compiler::generateStubCode() stores the object +// in ExtractTemp0, but then calls callTypeUpdateIC(), which clobbers +// caller-save registers. +// They should also not be the scratch registers ip0 or ip1, +// since those get clobbered all the time. +static constexpr Register ExtractTemp0 = r24; +static constexpr Register ExtractTemp1 = r25; + +// R7 - R9 are generally available for use within stubcode. + +// Note that BaselineTailCallReg is actually just the link +// register. In ARM code emission, we do not clobber BaselineTailCallReg +// since we keep the return address for calls there. + +static constexpr FloatRegister FloatReg0 = { FloatRegisters::d0, FloatRegisters::Double }; +static constexpr FloatRegister FloatReg1 = { FloatRegisters::d1, FloatRegisters::Double }; + +} // namespace jit +} // namespace js + +#endif // jit_arm64_SharedICRegisters_arm64_h diff --git a/js/src/jit/arm64/Trampoline-arm64.cpp b/js/src/jit/arm64/Trampoline-arm64.cpp new file mode 100644 index 000000000..547bdd927 --- /dev/null +++ b/js/src/jit/arm64/Trampoline-arm64.cpp @@ -0,0 +1,1229 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/Bailouts.h" +#include "jit/JitCompartment.h" +#include "jit/JitFrames.h" +#include "jit/Linker.h" +#ifdef JS_ION_PERF +# include "jit/PerfSpewer.h" +#endif +#include "jit/arm64/SharedICHelpers-arm64.h" +#include "jit/VMFunctions.h" + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +// All registers to save and restore. This includes the stack pointer, since we +// use the ability to reference register values on the stack by index. +static const LiveRegisterSet AllRegs = + LiveRegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1 << 31 | 1 << 30 | 1 << 29| 1 << 28)), + FloatRegisterSet(FloatRegisters::AllMask)); + +/* This method generates a trampoline on ARM64 for a c++ function with + * the following signature: + * bool blah(void* code, int argc, Value* argv, JSObject* scopeChain, Value* vp) + * ...using standard AArch64 calling convention + */ +JitCode* +JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type) +{ + MacroAssembler masm(cx); + + const Register reg_code = IntArgReg0; // EnterJitData::jitcode. + const Register reg_argc = IntArgReg1; // EnterJitData::maxArgc. + const Register reg_argv = IntArgReg2; // EnterJitData::maxArgv. + const Register reg_osrFrame = IntArgReg3; // EnterJitData::osrFrame. + const Register reg_callee = IntArgReg4; // EnterJitData::calleeToken. + const Register reg_scope = IntArgReg5; // EnterJitData::scopeChain. + const Register reg_osrNStack = IntArgReg6; // EnterJitData::osrNumStackValues. + const Register reg_vp = IntArgReg7; // Address of EnterJitData::result. + + MOZ_ASSERT(OsrFrameReg == IntArgReg3); + + // During the pushes below, use the normal stack pointer. + masm.SetStackPointer64(sp); + + // Save old frame pointer and return address; set new frame pointer. + masm.push(r29, r30); + masm.moveStackPtrTo(r29); + + // Save callee-save integer registers. + // Also save x7 (reg_vp) and x30 (lr), for use later. + masm.push(r19, r20, r21, r22); + masm.push(r23, r24, r25, r26); + masm.push(r27, r28, r7, r30); + + // Save callee-save floating-point registers. + // AArch64 ABI specifies that only the lower 64 bits must be saved. + masm.push(d8, d9, d10, d11); + masm.push(d12, d13, d14, d15); + +#ifdef DEBUG + // Emit stack canaries. + masm.movePtr(ImmWord(0xdeadd00d), r23); + masm.movePtr(ImmWord(0xdeadd11d), r24); + masm.push(r23, r24); +#endif + + // Common code below attempts to push single registers at a time, + // which breaks the stack pointer's 16-byte alignment requirement. + // Note that movePtr() is invalid because StackPointer is treated as xzr. + // + // FIXME: After testing, this entire function should be rewritten to not + // use the PseudoStackPointer: since the amount of data pushed is precalculated, + // we can just allocate the whole frame header at once and index off sp. + // This will save a significant number of instructions where Push() updates sp. + masm.Mov(PseudoStackPointer64, sp); + masm.SetStackPointer64(PseudoStackPointer64); + + // Save the stack pointer at this point for Baseline OSR. + masm.moveStackPtrTo(BaselineFrameReg); + // Remember stack depth without padding and arguments. + masm.moveStackPtrTo(r19); + + // If constructing, include newTarget in argument vector. + { + Label noNewTarget; + Imm32 constructingToken(CalleeToken_FunctionConstructing); + masm.branchTest32(Assembler::Zero, reg_callee, constructingToken, &noNewTarget); + masm.add32(Imm32(1), reg_argc); + masm.bind(&noNewTarget); + } + + // JitFrameLayout is as follows (higher is higher in memory): + // N*8 - [ JS argument vector ] (base 16-byte aligned) + // 8 - numActualArgs + // 8 - calleeToken (16-byte aligned) + // 8 - frameDescriptor + // 8 - returnAddress (16-byte aligned, pushed by callee) + + // Push the argument vector onto the stack. + // WARNING: destructively modifies reg_argv + { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + + const ARMRegister tmp_argc = temps.AcquireX(); + const ARMRegister tmp_sp = temps.AcquireX(); + + Label noArguments; + Label loopHead; + + masm.movePtr(reg_argc, tmp_argc.asUnsized()); + + // sp -= 8 + // Since we're using PostIndex Str below, this is necessary to avoid overwriting + // the SPS mark pushed above. + masm.subFromStackPtr(Imm32(8)); + + // sp -= 8 * argc + masm.Sub(PseudoStackPointer64, PseudoStackPointer64, Operand(tmp_argc, vixl::SXTX, 3)); + + // Give sp 16-byte alignment and sync stack pointers. + masm.andToStackPtr(Imm32(~0xff)); + masm.moveStackPtrTo(tmp_sp.asUnsized()); + + masm.branchTestPtr(Assembler::Zero, reg_argc, reg_argc, &noArguments); + + // Begin argument-pushing loop. + // This could be optimized using Ldp and Stp. + { + masm.bind(&loopHead); + + // Load an argument from argv, then increment argv by 8. + masm.Ldr(x24, MemOperand(ARMRegister(reg_argv, 64), Operand(8), vixl::PostIndex)); + + // Store the argument to tmp_sp, then increment tmp_sp by 8. + masm.Str(x24, MemOperand(tmp_sp, Operand(8), vixl::PostIndex)); + + // Set the condition codes for |cmp tmp_argc, 2| (using the old value). + masm.Subs(tmp_argc, tmp_argc, Operand(1)); + + // Branch if arguments remain. + masm.B(&loopHead, vixl::Condition::ge); + } + + masm.bind(&noArguments); + } + masm.checkStackAlignment(); + + // Push the number of actual arguments and the calleeToken. + // The result address is used to store the actual number of arguments + // without adding an argument to EnterJIT. + masm.unboxInt32(Address(reg_vp, 0x0), ip0); + masm.push(ip0, reg_callee); + masm.checkStackAlignment(); + + // Calculate the number of bytes pushed so far. + masm.subStackPtrFrom(r19); + + // Push the frameDescriptor. + masm.makeFrameDescriptor(r19, JitFrame_Entry, JitFrameLayout::Size()); + masm.Push(r19); + + Label osrReturnPoint; + if (type == EnterJitBaseline) { + // Check for OSR. + Label notOsr; + masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, ¬Osr); + + // Push return address and previous frame pointer. + masm.Adr(ScratchReg2_64, &osrReturnPoint); + masm.push(ScratchReg2, BaselineFrameReg); + + // Reserve frame. + masm.subFromStackPtr(Imm32(BaselineFrame::Size())); + masm.moveStackPtrTo(BaselineFrameReg); + + // Reserve space for locals and stack values. + masm.Lsl(w19, ARMRegister(reg_osrNStack, 32), 3); // w19 = num_stack_values * sizeof(Value). + masm.subFromStackPtr(r19); + + // Enter exit frame. + masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), r19); + masm.makeFrameDescriptor(r19, JitFrame_BaselineJS, ExitFrameLayout::Size()); + masm.asVIXL().Push(x19, xzr); // Push xzr for a fake return address. + // No GC things to mark: push a bare token. + masm.enterFakeExitFrame(ExitFrameLayoutBareToken); + + masm.push(BaselineFrameReg, reg_code); + + // Initialize the frame, including filling in the slots. + masm.setupUnalignedABICall(r19); + masm.passABIArg(BaselineFrameReg); // BaselineFrame. + masm.passABIArg(reg_osrFrame); // InterpreterFrame. + masm.passABIArg(reg_osrNStack); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr)); + + masm.pop(r19, BaselineFrameReg); + MOZ_ASSERT(r19 != ReturnReg); + + masm.addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter())); + masm.addPtr(Imm32(BaselineFrame::Size()), BaselineFrameReg); + + Label error; + masm.branchIfFalseBool(ReturnReg, &error); + + masm.jump(r19); + + // OOM: load error value, discard return address and previous frame + // pointer, and return. + masm.bind(&error); + masm.Add(masm.GetStackPointer64(), BaselineFrameReg64, Operand(2 * sizeof(uintptr_t))); + masm.syncStackPtr(); + masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); + masm.B(&osrReturnPoint); + + masm.bind(¬Osr); + masm.movePtr(reg_scope, R1_); + } + + // Call function. + // Since AArch64 doesn't have the pc register available, the callee must push lr. + masm.callJitNoProfiler(reg_code); + + // Baseline OSR will return here. + if (type == EnterJitBaseline) + masm.bind(&osrReturnPoint); + + // Return back to SP. + masm.Pop(r19); + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), + Operand(x19, vixl::LSR, FRAMESIZE_SHIFT)); + masm.syncStackPtr(); + masm.SetStackPointer64(sp); + +#ifdef DEBUG + // Check that canaries placed on function entry are still present. + masm.pop(r24, r23); + Label x23OK, x24OK; + + masm.branchPtr(Assembler::Equal, r23, ImmWord(0xdeadd00d), &x23OK); + masm.breakpoint(); + masm.bind(&x23OK); + + masm.branchPtr(Assembler::Equal, r24, ImmWord(0xdeadd11d), &x24OK); + masm.breakpoint(); + masm.bind(&x24OK); +#endif + + // Restore callee-save floating-point registers. + masm.pop(d15, d14, d13, d12); + masm.pop(d11, d10, d9, d8); + + // Restore callee-save integer registers. + // Also restore x7 (reg_vp) and x30 (lr). + masm.pop(r30, r7, r28, r27); + masm.pop(r26, r25, r24, r23); + masm.pop(r22, r21, r20, r19); + + // Store return value (in JSReturnReg = x2 to just-popped reg_vp). + masm.storeValue(JSReturnOperand, Address(reg_vp, 0)); + + // Restore old frame pointer. + masm.pop(r30, r29); + + // Return using the value popped into x30. + masm.abiret(); + + Linker linker(masm); + AutoFlushICache afc("EnterJIT"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "EnterJIT"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateInvalidator(JSContext* cx) +{ + MacroAssembler masm; + + masm.push(r0, r1, r2, r3); + + masm.PushRegsInMask(AllRegs); + masm.moveStackPtrTo(r0); + + masm.Sub(x1, masm.GetStackPointer64(), Operand(sizeof(size_t))); + masm.Sub(x2, masm.GetStackPointer64(), Operand(sizeof(size_t) + sizeof(void*))); + masm.moveToStackPtr(r2); + + masm.setupUnalignedABICall(r10); + masm.passABIArg(r0); + masm.passABIArg(r1); + masm.passABIArg(r2); + + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout)); + + masm.pop(r2, r1); + + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), x1); + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), + Operand(sizeof(InvalidationBailoutStack))); + masm.syncStackPtr(); + + JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); + masm.branch(bailoutTail); + + Linker linker(masm); + AutoFlushICache afc("Invalidator"); + return linker.newCode<NoGC>(cx, OTHER_CODE); +} + +JitCode* +JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut) +{ + MacroAssembler masm; + + // Save the return address for later. + masm.push(lr); + + // Load the information that the rectifier needs from the stack. + masm.Ldr(w0, MemOperand(masm.GetStackPointer64(), RectifierFrameLayout::offsetOfNumActualArgs())); + masm.Ldr(x1, MemOperand(masm.GetStackPointer64(), RectifierFrameLayout::offsetOfCalleeToken())); + + // Extract a JSFunction pointer from the callee token and keep the + // intermediary to avoid later recalculation. + masm.And(x5, x1, Operand(CalleeTokenMask)); + + // Get the arguments from the function object. + masm.Ldrh(x6, MemOperand(x5, JSFunction::offsetOfNargs())); + + static_assert(CalleeToken_FunctionConstructing == 0x1, "Constructing must be low-order bit"); + masm.And(x4, x1, Operand(CalleeToken_FunctionConstructing)); + masm.Add(x7, x6, x4); + + // Calculate the position that our arguments are at before sp gets modified. + MOZ_ASSERT(ArgumentsRectifierReg == r8, "x8 used for argc in Arguments Rectifier"); + masm.Add(x3, masm.GetStackPointer64(), Operand(x8, vixl::LSL, 3)); + masm.Add(x3, x3, Operand(sizeof(RectifierFrameLayout))); + + // Pad to a multiple of 16 bytes. This neglects the |this| value, + // which will also be pushed, because the rest of the frame will + // round off that value. See pushes of |argc|, |callee| and |desc| below. + Label noPadding; + masm.Tbnz(x7, 0, &noPadding); + masm.asVIXL().Push(xzr); + masm.Add(x7, x7, Operand(1)); + masm.bind(&noPadding); + + { + Label notConstructing; + masm.Cbz(x4, ¬Constructing); + + // new.target lives at the end of the pushed args + // NB: The arg vector holder starts at the beginning of the last arg, + // add a value to get to argv[argc] + masm.loadPtr(Address(r3, sizeof(Value)), r4); + masm.Push(r4); + + masm.bind(¬Constructing); + } + + // Calculate the number of undefineds that need to be pushed. + masm.Sub(w2, w6, w8); + + // Put an undefined in a register so it can be pushed. + masm.moveValue(UndefinedValue(), r4); + + // Push undefined N times. + { + Label undefLoopTop; + masm.bind(&undefLoopTop); + masm.Push(r4); + masm.Subs(w2, w2, Operand(1)); + masm.B(&undefLoopTop, Assembler::NonZero); + } + + // Arguments copy loop. Copy for x8 >= 0 to include |this|. + { + Label copyLoopTop; + masm.bind(©LoopTop); + masm.Ldr(x4, MemOperand(x3, -sizeof(Value), vixl::PostIndex)); + masm.Push(r4); + masm.Subs(x8, x8, Operand(1)); + masm.B(©LoopTop, Assembler::NotSigned); + } + + // Fix up the size of the stack frame. +1 accounts for |this|. + masm.Add(x6, x7, Operand(1)); + masm.Lsl(x6, x6, 3); + + // Make that into a frame descriptor. + masm.makeFrameDescriptor(r6, JitFrame_Rectifier, JitFrameLayout::Size()); + + masm.push(r0, // Number of actual arguments. + r1, // Callee token. + r6); // Frame descriptor. + + // Load the address of the code that is getting called. + masm.Ldr(x3, MemOperand(x5, JSFunction::offsetOfNativeOrScript())); + masm.loadBaselineOrIonRaw(r3, r3, nullptr); + uint32_t returnOffset = masm.callJitNoProfiler(r3); + + // Clean up! + // Get the size of the stack frame, and clean up the later fixed frame. + masm.Ldr(x4, MemOperand(masm.GetStackPointer64(), 24, vixl::PostIndex)); + + // Now that the size of the stack frame sans the fixed frame has been loaded, + // add that onto the stack pointer. + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), + Operand(x4, vixl::LSR, FRAMESIZE_SHIFT)); + + // Pop the return address from earlier and branch. + masm.ret(); + + Linker linker(masm); + AutoFlushICache afc("ArgumentsRectifier"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + + if (returnAddrOut) + *returnAddrOut = (void*) (code->raw() + returnOffset); + + return code; +} + +static void +PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg) +{ + // the stack should look like: + // [IonFrame] + // bailoutFrame.registersnapshot + // bailoutFrame.fpsnapshot + // bailoutFrame.snapshotOffset + // bailoutFrame.frameSize + + // STEP 1a: Save our register sets to the stack so Bailout() can read + // everything. + // sp % 8 == 0 + + // We don't have to push everything, but this is likely easier. + // Setting regs_. + masm.subFromStackPtr(Imm32(Registers::TotalPhys * sizeof(void*))); + for (uint32_t i = 0; i < Registers::TotalPhys; i += 2) { + masm.Stp(ARMRegister::XRegFromCode(i), + ARMRegister::XRegFromCode(i + 1), + MemOperand(masm.GetStackPointer64(), i * sizeof(void*))); + } + + // Since our datastructures for stack inspection are compile-time fixed, + // if there are only 16 double registers, then we need to reserve + // space on the stack for the missing 16. + masm.subFromStackPtr(Imm32(FloatRegisters::TotalPhys * sizeof(double))); + for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i += 2) { + masm.Stp(ARMFPRegister::DRegFromCode(i), + ARMFPRegister::DRegFromCode(i + 1), + MemOperand(masm.GetStackPointer64(), i * sizeof(void*))); + } + + // STEP 1b: Push both the "return address" of the function call (the address + // of the instruction after the call that we used to get here) as + // well as the callee token onto the stack. The return address is + // currently in r14. We will proceed by loading the callee token + // into a sacrificial register <= r14, then pushing both onto the + // stack. + + // Now place the frameClass onto the stack, via a register. + masm.Mov(x9, frameClass); + + // And onto the stack. Since the stack is full, we need to put this one past + // the end of the current stack. Sadly, the ABI says that we need to always + // point to the lowest place that has been written. The OS is free to do + // whatever it wants below sp. + masm.push(r30, r9); + masm.moveStackPtrTo(spArg); +} + +static void +GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass) +{ + PushBailoutFrame(masm, frameClass, r0); + + // SP % 8 == 4 + // STEP 1c: Call the bailout function, giving a pointer to the + // structure we just blitted onto the stack. + // Make space for the BaselineBailoutInfo* outparam. + const int sizeOfBailoutInfo = sizeof(void*) * 2; + masm.reserveStack(sizeOfBailoutInfo); + masm.moveStackPtrTo(r1); + + masm.setupUnalignedABICall(r2); + masm.passABIArg(r0); + masm.passABIArg(r1); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout)); + + masm.Ldr(x2, MemOperand(masm.GetStackPointer64(), 0)); + masm.addToStackPtr(Imm32(sizeOfBailoutInfo)); + + static const uint32_t BailoutDataSize = sizeof(void*) * Registers::Total + + sizeof(double) * FloatRegisters::TotalPhys; + + if (frameClass == NO_FRAME_SIZE_CLASS_ID) { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + + masm.Ldr(scratch64, MemOperand(masm.GetStackPointer64(), sizeof(uintptr_t))); + masm.addToStackPtr(Imm32(BailoutDataSize + 32)); + masm.addToStackPtr(scratch64.asUnsized()); + } else { + uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize(); + masm.addToStackPtr(Imm32(frameSize + BailoutDataSize + sizeof(void*))); + } + + // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9. + JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); + masm.branch(bailoutTail); +} + +JitCode* +JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass) +{ + // FIXME: Implement. + MacroAssembler masm; + masm.breakpoint(); + Linker linker(masm); + AutoFlushICache afc("BailoutTable"); + return linker.newCode<NoGC>(cx, OTHER_CODE); +} + +JitCode* +JitRuntime::generateBailoutHandler(JSContext* cx) +{ + MacroAssembler masm(cx); + GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "BailoutHandler"); +#endif + + Linker linker(masm); + AutoFlushICache afc("BailoutHandler"); + return linker.newCode<NoGC>(cx, OTHER_CODE); +} + +JitCode* +JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f) +{ + MOZ_ASSERT(functionWrappers_); + MOZ_ASSERT(functionWrappers_->initialized()); + VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f); + if (p) + return p->value(); + + MacroAssembler masm(cx); + + // Avoid conflicts with argument registers while discarding the result after + // the function call. + AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask); + + // Wrapper register set is a superset of the Volatile register set. + JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0); + + // Unlike on other platforms, it is the responsibility of the VM *callee* to + // push the return address, while the caller must ensure that the address + // is stored in lr on entry. This allows the VM wrapper to work with both direct + // calls and tail calls. + masm.push(lr); + + // First argument is the JSContext. + Register reg_cx = IntArgReg0; + regs.take(reg_cx); + + // Stack is: + // ... frame ... + // +12 [args] + // +8 descriptor + // +0 returnAddress (pushed by this function, caller sets as lr) + // + // We're aligned to an exit frame, so link it up. + masm.enterExitFrame(&f); + masm.loadJSContext(reg_cx); + + // Save the current stack pointer as the base for copying arguments. + Register argsBase = InvalidReg; + if (f.explicitArgs) { + // argsBase can't be an argument register. Bad things would happen if + // the MoveResolver didn't throw an assertion failure first. + argsBase = r8; + regs.take(argsBase); + masm.Add(ARMRegister(argsBase, 64), masm.GetStackPointer64(), + Operand(ExitFrameLayout::SizeWithFooter())); + } + + // Reserve space for any outparameter. + Register outReg = InvalidReg; + switch (f.outParam) { + case Type_Value: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(Value)); + masm.moveStackPtrTo(outReg); + break; + + case Type_Handle: + outReg = regs.takeAny(); + masm.PushEmptyRooted(f.outParamRootType); + masm.moveStackPtrTo(outReg); + break; + + case Type_Int32: + case Type_Bool: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(int64_t)); + masm.moveStackPtrTo(outReg); + break; + + case Type_Double: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(double)); + masm.moveStackPtrTo(outReg); + break; + + case Type_Pointer: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(uintptr_t)); + masm.moveStackPtrTo(outReg); + break; + + default: + MOZ_ASSERT(f.outParam == Type_Void); + break; + } + + if (!generateTLEnterVM(cx, masm, f)) + return nullptr; + + masm.setupUnalignedABICall(regs.getAny()); + masm.passABIArg(reg_cx); + + size_t argDisp = 0; + + // Copy arguments. + for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) { + MoveOperand from; + switch (f.argProperties(explicitArg)) { + case VMFunction::WordByValue: + masm.passABIArg(MoveOperand(argsBase, argDisp), + (f.argPassedInFloatReg(explicitArg) ? MoveOp::DOUBLE : MoveOp::GENERAL)); + argDisp += sizeof(void*); + break; + + case VMFunction::WordByRef: + masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), + MoveOp::GENERAL); + argDisp += sizeof(void*); + break; + + case VMFunction::DoubleByValue: + case VMFunction::DoubleByRef: + MOZ_CRASH("NYI: AArch64 callVM should not be used with 128bit values."); + } + } + + // Copy the semi-implicit outparam, if any. + // It is not a C++-abi outparam, which would get passed in the + // outparam register, but a real parameter to the function, which + // was stack-allocated above. + if (outReg != InvalidReg) + masm.passABIArg(outReg); + + masm.callWithABI(f.wrapped); + + if (!generateTLExitVM(cx, masm, f)) + return nullptr; + + // SP is used to transfer stack across call boundaries. + if (!masm.GetStackPointer64().Is(vixl::sp)) + masm.Mov(masm.GetStackPointer64(), vixl::sp); + + // Test for failure. + switch (f.failType()) { + case Type_Object: + masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel()); + break; + case Type_Bool: + masm.branchIfFalseBool(r0, masm.failureLabel()); + break; + default: + MOZ_CRASH("unknown failure kind"); + } + + // Load the outparam and free any allocated stack. + switch (f.outParam) { + case Type_Value: + masm.Ldr(ARMRegister(JSReturnReg, 64), MemOperand(masm.GetStackPointer64())); + masm.freeStack(sizeof(Value)); + break; + + case Type_Handle: + masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand); + break; + + case Type_Int32: + masm.Ldr(ARMRegister(ReturnReg, 32), MemOperand(masm.GetStackPointer64())); + masm.freeStack(sizeof(int64_t)); + break; + + case Type_Bool: + masm.Ldrb(ARMRegister(ReturnReg, 32), MemOperand(masm.GetStackPointer64())); + masm.freeStack(sizeof(int64_t)); + break; + + case Type_Double: + MOZ_ASSERT(cx->runtime()->jitSupportsFloatingPoint); + masm.Ldr(ARMFPRegister(ReturnDoubleReg, 64), MemOperand(masm.GetStackPointer64())); + masm.freeStack(sizeof(double)); + break; + + case Type_Pointer: + masm.Ldr(ARMRegister(ReturnReg, 64), MemOperand(masm.GetStackPointer64())); + masm.freeStack(sizeof(uintptr_t)); + break; + + default: + MOZ_ASSERT(f.outParam == Type_Void); + break; + } + + masm.leaveExitFrame(); + masm.retn(Imm32(sizeof(ExitFrameLayout) + + f.explicitStackSlots() * sizeof(void*) + + f.extraValuesToPop * sizeof(Value))); + + Linker linker(masm); + AutoFlushICache afc("VMWrapper"); + JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE); + if (!wrapper) + return nullptr; + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(wrapper, "VMWrapper"); +#endif + + // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to + // use relookupOrAdd instead of add. + if (!functionWrappers_->relookupOrAdd(p, &f, wrapper)) + return nullptr; + + return wrapper; +} + +JitCode* +JitRuntime::generatePreBarrier(JSContext* cx, MIRType type) +{ + MacroAssembler masm(cx); + + LiveRegisterSet regs = LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask), + FloatRegisterSet(FloatRegisters::VolatileMask)); + + // Also preserve the return address. + regs.add(lr); + + masm.PushRegsInMask(regs); + + MOZ_ASSERT(PreBarrierReg == r1); + masm.movePtr(ImmPtr(cx->runtime()), r3); + + masm.setupUnalignedABICall(r0); + masm.passABIArg(r3); + masm.passABIArg(PreBarrierReg); + masm.callWithABI(IonMarkFunction(type)); + + // Pop the volatile regs and restore LR. + masm.PopRegsInMask(regs); + + masm.abiret(); + + Linker linker(masm); + AutoFlushICache afc("PreBarrier"); + return linker.newCode<NoGC>(cx, OTHER_CODE); +} + +typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*); +static const VMFunction HandleDebugTrapInfo = + FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap"); + +JitCode* +JitRuntime::generateDebugTrapHandler(JSContext* cx) +{ + MacroAssembler masm(cx); +#ifndef JS_USE_LINK_REGISTER + // The first value contains the return addres, + // which we pull into ICTailCallReg for tail calls. + masm.setFramePushed(sizeof(intptr_t)); +#endif + + Register scratch1 = r0; + Register scratch2 = r1; + + // Load BaselineFrame pointer into scratch1. + masm.Sub(ARMRegister(scratch1, 64), BaselineFrameReg64, Operand(BaselineFrame::Size())); + + // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the + // stub frame has a nullptr ICStub pointer, since this pointer is marked + // during GC. + masm.movePtr(ImmPtr(nullptr), ICStubReg); + EmitBaselineEnterStubFrame(masm, scratch2); + + JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo); + if (!code) + return nullptr; + + masm.asVIXL().Push(vixl::lr, ARMRegister(scratch1, 64)); + EmitBaselineCallVM(code, masm); + + EmitBaselineLeaveStubFrame(masm); + + // If the stub returns |true|, we have to perform a forced return (return + // from the JS frame). If the stub returns |false|, just return from the + // trap stub so that execution continues at the current pc. + Label forcedReturn; + masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn); + masm.abiret(); + + masm.bind(&forcedReturn); + masm.loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()), + JSReturnOperand); + masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64); + + masm.pop(BaselineFrameReg, lr); + masm.syncStackPtr(); + masm.abiret(); + + Linker linker(masm); + AutoFlushICache afc("DebugTrapHandler"); + JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler"); +#endif + + return codeDbg; +} + +JitCode* +JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler) +{ + MacroAssembler masm(cx); + + masm.handleFailureWithHandlerTail(handler); + + Linker linker(masm); + AutoFlushICache afc("ExceptionTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "ExceptionTailStub"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateBailoutTailStub(JSContext* cx) +{ + MacroAssembler masm(cx); + + masm.generateBailoutTail(r1, r2); + + Linker linker(masm); + AutoFlushICache afc("BailoutTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "BailoutTailStub"); +#endif + + return code; +} +JitCode* +JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx) +{ + MacroAssembler masm; + + Register scratch1 = r8; + Register scratch2 = r9; + Register scratch3 = r10; + Register scratch4 = r11; + + // + // The code generated below expects that the current stack pointer points + // to an Ion or Baseline frame, at the state it would be immediately + // before a ret(). Thus, after this stub's business is done, it executes + // a ret() and returns directly to the caller script, on behalf of the + // callee script that jumped to this code. + // + // Thus the expected stack is: + // + // StackPointer ----+ + // v + // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr + // MEM-HI MEM-LOW + // + // + // The generated jitcode is responsible for overwriting the + // jitActivation->lastProfilingFrame field with a pointer to the previous + // Ion or Baseline jit-frame that was pushed before this one. It is also + // responsible for overwriting jitActivation->lastProfilingCallSite with + // the return address into that frame. The frame could either be an + // immediate "caller" frame, or it could be a frame in a previous + // JitActivation (if the current frame was entered from C++, and the C++ + // was entered by some caller jit-frame further down the stack). + // + // So this jitcode is responsible for "walking up" the jit stack, finding + // the previous Ion or Baseline JS frame, and storing its address and the + // return address into the appropriate fields on the current jitActivation. + // + // There are a fixed number of different path types that can lead to the + // current frame, which is either a baseline or ion frame: + // + // <Baseline-Or-Ion> + // ^ + // | + // ^--- Ion + // | + // ^--- Baseline Stub <---- Baseline + // | + // ^--- Argument Rectifier + // | ^ + // | | + // | ^--- Ion + // | | + // | ^--- Baseline Stub <---- Baseline + // | + // ^--- Entry Frame (From C++) + // + Register actReg = scratch4; + AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation()); + masm.loadPtr(activationAddr, actReg); + + Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame()); + Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite()); + +#ifdef DEBUG + // Ensure that frame we are exiting is current lastProfilingFrame + { + masm.loadPtr(lastProfilingFrame, scratch1); + Label checkOk; + masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk); + masm.branchStackPtr(Assembler::Equal, scratch1, &checkOk); + masm.assumeUnreachable("Mismatch between stored lastProfilingFrame and current stack pointer."); + masm.bind(&checkOk); + } +#endif + + // Load the frame descriptor into |scratch1|, figure out what to do depending on its type. + masm.loadPtr(Address(masm.getStackPointer(), JitFrameLayout::offsetOfDescriptor()), scratch1); + + // Going into the conditionals, we will have: + // FrameDescriptor.size in scratch1 + // FrameDescriptor.type in scratch2 + masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2); + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1); + + // Handling of each case is dependent on FrameDescriptor.type + Label handle_IonJS; + Label handle_BaselineStub; + Label handle_Rectifier; + Label handle_IonAccessorIC; + Label handle_Entry; + Label end; + + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry); + + masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame."); + + // + // JitFrame_IonJS + // + // Stack layout: + // ... + // Ion-Descriptor + // Prev-FP ---> Ion-ReturnAddr + // ... previous frame data ... |- Descriptor.Size + // ... arguments ... | + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + masm.bind(&handle_IonJS); + { + // |scratch1| contains Descriptor.size + + // returning directly to an IonJS frame. Store return addr to frame + // in lastProfilingCallSite. + masm.loadPtr(Address(masm.getStackPointer(), JitFrameLayout::offsetOfReturnAddress()), + scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + // Store return frame in lastProfilingFrame. + // scratch2 := masm.getStackPointer() + Descriptor.size*1 + JitFrameLayout::Size(); + masm.addPtr(masm.getStackPointer(), scratch1, scratch2); + masm.syncStackPtr(); + masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2, scratch2); + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + // + // JitFrame_BaselineStub + // + // Look past the stub and store the frame pointer to + // the baselineJS frame prior to it. + // + // Stack layout: + // ... + // BL-Descriptor + // Prev-FP ---> BL-ReturnAddr + // +-----> BL-PrevFramePointer + // | ... BL-FrameData ... + // | BLStub-Descriptor + // | BLStub-ReturnAddr + // | BLStub-StubPointer | + // +------ BLStub-SavedFramePointer |- Descriptor.Size + // ... arguments ... | + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + // We take advantage of the fact that the stub frame saves the frame + // pointer pointing to the baseline frame, so a bunch of calculation can + // be avoided. + // + masm.bind(&handle_BaselineStub); + { + masm.addPtr(masm.getStackPointer(), scratch1, scratch3); + masm.syncStackPtr(); + Address stubFrameReturnAddr(scratch3, + JitFrameLayout::Size() + + BaselineStubFrameLayout::offsetOfReturnAddress()); + masm.loadPtr(stubFrameReturnAddr, scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + Address stubFrameSavedFramePtr(scratch3, + JitFrameLayout::Size() - (2 * sizeof(void*))); + masm.loadPtr(stubFrameSavedFramePtr, scratch2); + masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr. + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + + // + // JitFrame_Rectifier + // + // The rectifier frame can be preceded by either an IonJS or a + // BaselineStub frame. + // + // Stack layout if caller of rectifier was Ion: + // + // Ion-Descriptor + // Ion-ReturnAddr + // ... ion frame data ... |- Rect-Descriptor.Size + // < COMMON LAYOUT > + // + // Stack layout if caller of rectifier was Baseline: + // + // BL-Descriptor + // Prev-FP ---> BL-ReturnAddr + // +-----> BL-SavedFramePointer + // | ... baseline frame data ... + // | BLStub-Descriptor + // | BLStub-ReturnAddr + // | BLStub-StubPointer | + // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size + // ... args to rectifier ... | + // < COMMON LAYOUT > + // + // Common stack layout: + // + // ActualArgc | + // CalleeToken |- IonRectitiferFrameLayout::Size() + // Rect-Descriptor | + // Rect-ReturnAddr | + // ... rectifier data & args ... |- Descriptor.Size + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + masm.bind(&handle_Rectifier); + { + // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size(); + masm.addPtr(masm.getStackPointer(), scratch1, scratch2); + masm.syncStackPtr(); + masm.add32(Imm32(JitFrameLayout::Size()), scratch2); + masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3); + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3, scratch1); + masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3); + + // Now |scratch1| contains Rect-Descriptor.Size + // and |scratch2| points to Rectifier frame + // and |scratch3| contains Rect-Descriptor.Type + + // Check for either Ion or BaselineStub frame. + Label handle_Rectifier_BaselineStub; + masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), + &handle_Rectifier_BaselineStub); + + // Handle Rectifier <- IonJS + // scratch3 := RectFrame[ReturnAddr] + masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3); + masm.storePtr(scratch3, lastProfilingCallSite); + + // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size() + masm.addPtr(scratch2, scratch1, scratch3); + masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3); + masm.storePtr(scratch3, lastProfilingFrame); + masm.ret(); + + // Handle Rectifier <- BaselineStub <- BaselineJS + masm.bind(&handle_Rectifier_BaselineStub); +#ifdef DEBUG + { + Label checkOk; + masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk); + masm.assumeUnreachable("Unrecognized frame preceding baselineStub."); + masm.bind(&checkOk); + } +#endif + masm.addPtr(scratch2, scratch1, scratch3); + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + + BaselineStubFrameLayout::offsetOfReturnAddress()); + masm.loadPtr(stubFrameReturnAddr, scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + Address stubFrameSavedFramePtr(scratch3, + RectifierFrameLayout::Size() - (2 * sizeof(void*))); + masm.loadPtr(stubFrameSavedFramePtr, scratch2); + masm.addPtr(Imm32(sizeof(void*)), scratch2); + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + // JitFrame_IonAccessorIC + // + // The caller is always an IonJS frame. + // + // Ion-Descriptor + // Ion-ReturnAddr + // ... ion frame data ... |- AccFrame-Descriptor.Size + // StubCode | + // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size() + // AccFrame-ReturnAddr | + // ... accessor frame data & args ... |- Descriptor.Size + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + masm.bind(&handle_IonAccessorIC); + { + // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size() + masm.addPtr(masm.getStackPointer(), scratch1, scratch2); + masm.syncStackPtr(); + masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2); + + // scratch3 := AccFrame-Descriptor.Size + masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3); +#ifdef DEBUG + // Assert previous frame is an IonJS frame. + masm.movePtr(scratch3, scratch1); + masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1); + { + Label checkOk; + masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk); + masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame"); + masm.bind(&checkOk); + } +#endif + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3); + + // lastProfilingCallSite := AccFrame-ReturnAddr + masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1); + masm.storePtr(scratch1, lastProfilingCallSite); + + // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size + + // IonAccessorICFrameLayout::Size() + masm.addPtr(scratch2, scratch3, scratch1); + masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1); + masm.storePtr(scratch1, lastProfilingFrame); + masm.ret(); + } + + // + // JitFrame_Entry + // + // If at an entry frame, store null into both fields. + // + masm.bind(&handle_Entry); + { + masm.movePtr(ImmPtr(nullptr), scratch1); + masm.storePtr(scratch1, lastProfilingCallSite); + masm.storePtr(scratch1, lastProfilingFrame); + masm.ret(); + } + + Linker linker(masm); + AutoFlushICache afc("ProfilerExitFrameTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub"); +#endif + + return code; +} diff --git a/js/src/jit/arm64/vixl/.clang-format b/js/src/jit/arm64/vixl/.clang-format new file mode 100644 index 000000000..122a79540 --- /dev/null +++ b/js/src/jit/arm64/vixl/.clang-format @@ -0,0 +1,4 @@ +BasedOnStyle: Chromium + +# Ignore all comments because they aren't reflowed properly. +CommentPragmas: "^" diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.cpp b/js/src/jit/arm64/vixl/Assembler-vixl.cpp new file mode 100644 index 000000000..d784fd860 --- /dev/null +++ b/js/src/jit/arm64/vixl/Assembler-vixl.cpp @@ -0,0 +1,5088 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Assembler-vixl.h" + +#include <cmath> + +#include "jit/arm64/vixl/MacroAssembler-vixl.h" + +namespace vixl { + +// CPURegList utilities. +CPURegister CPURegList::PopLowestIndex() { + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountTrailingZeros(list_); + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +CPURegister CPURegList::PopHighestIndex() { + VIXL_ASSERT(IsValid()); + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountLeadingZeros(list_); + index = kRegListSizeInBits - 1 - index; + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +bool CPURegList::IsValid() const { + if ((type_ == CPURegister::kRegister) || + (type_ == CPURegister::kVRegister)) { + bool is_valid = true; + // Try to create a CPURegister for each element in the list. + for (int i = 0; i < kRegListSizeInBits; i++) { + if (((list_ >> i) & 1) != 0) { + is_valid &= CPURegister(i, size_, type_).IsValid(); + } + } + return is_valid; + } else if (type_ == CPURegister::kNoRegister) { + // We can't use IsEmpty here because that asserts IsValid(). + return list_ == 0; + } else { + return false; + } +} + + +void CPURegList::RemoveCalleeSaved() { + if (type() == CPURegister::kRegister) { + Remove(GetCalleeSaved(RegisterSizeInBits())); + } else if (type() == CPURegister::kVRegister) { + Remove(GetCalleeSavedV(RegisterSizeInBits())); + } else { + VIXL_ASSERT(type() == CPURegister::kNoRegister); + VIXL_ASSERT(IsEmpty()); + // The list must already be empty, so do nothing. + } +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Union(list_1, Union(list_2, list_3)); +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); +} + + +CPURegList CPURegList::GetCalleeSaved(unsigned size) { + return CPURegList(CPURegister::kRegister, size, 19, 29); +} + + +CPURegList CPURegList::GetCalleeSavedV(unsigned size) { + return CPURegList(CPURegister::kVRegister, size, 8, 15); +} + + +CPURegList CPURegList::GetCallerSaved(unsigned size) { + // Registers x0-x18 and lr (x30) are caller-saved. + CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); + // Do not use lr directly to avoid initialisation order fiasco bugs for users. + list.Combine(Register(30, kXRegSize)); + return list; +} + + +CPURegList CPURegList::GetCallerSavedV(unsigned size) { + // Registers d0-d7 and d16-d31 are caller-saved. + CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); + list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); + return list; +} + + +const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved(); +const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV(); +const CPURegList kCallerSaved = CPURegList::GetCallerSaved(); +const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV(); + + +// Registers. +#define WREG(n) w##n, +const Register Register::wregisters[] = { +REGISTER_CODE_LIST(WREG) +}; +#undef WREG + +#define XREG(n) x##n, +const Register Register::xregisters[] = { +REGISTER_CODE_LIST(XREG) +}; +#undef XREG + +#define BREG(n) b##n, +const VRegister VRegister::bregisters[] = { +REGISTER_CODE_LIST(BREG) +}; +#undef BREG + +#define HREG(n) h##n, +const VRegister VRegister::hregisters[] = { +REGISTER_CODE_LIST(HREG) +}; +#undef HREG + +#define SREG(n) s##n, +const VRegister VRegister::sregisters[] = { +REGISTER_CODE_LIST(SREG) +}; +#undef SREG + +#define DREG(n) d##n, +const VRegister VRegister::dregisters[] = { +REGISTER_CODE_LIST(DREG) +}; +#undef DREG + +#define QREG(n) q##n, +const VRegister VRegister::qregisters[] = { +REGISTER_CODE_LIST(QREG) +}; +#undef QREG + +#define VREG(n) v##n, +const VRegister VRegister::vregisters[] = { +REGISTER_CODE_LIST(VREG) +}; +#undef VREG + + +const Register& Register::WRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return wsp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return wregisters[code]; + } +} + + +const Register& Register::XRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return sp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return xregisters[code]; + } +} + + +const VRegister& VRegister::BRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return bregisters[code]; +} + + +const VRegister& VRegister::HRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return hregisters[code]; +} + + +const VRegister& VRegister::SRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return sregisters[code]; +} + + +const VRegister& VRegister::DRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return dregisters[code]; +} + + +const VRegister& VRegister::QRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return qregisters[code]; +} + + +const VRegister& VRegister::VRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vregisters[code]; +} + + +const Register& CPURegister::W() const { + VIXL_ASSERT(IsValidRegister()); + return Register::WRegFromCode(code_); +} + + +const Register& CPURegister::X() const { + VIXL_ASSERT(IsValidRegister()); + return Register::XRegFromCode(code_); +} + + +const VRegister& CPURegister::B() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::BRegFromCode(code_); +} + + +const VRegister& CPURegister::H() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::HRegFromCode(code_); +} + + +const VRegister& CPURegister::S() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::SRegFromCode(code_); +} + + +const VRegister& CPURegister::D() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::DRegFromCode(code_); +} + + +const VRegister& CPURegister::Q() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::QRegFromCode(code_); +} + + +const VRegister& CPURegister::V() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::VRegFromCode(code_); +} + + +// Operand. +Operand::Operand(int64_t immediate) + : immediate_(immediate), + reg_(NoReg), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) {} + + +Operand::Operand(Register reg, Shift shift, unsigned shift_amount) + : reg_(reg), + shift_(shift), + extend_(NO_EXTEND), + shift_amount_(shift_amount) { + VIXL_ASSERT(shift != MSL); + VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize)); + VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize)); + VIXL_ASSERT(!reg.IsSP()); +} + + +Operand::Operand(Register reg, Extend extend, unsigned shift_amount) + : reg_(reg), + shift_(NO_SHIFT), + extend_(extend), + shift_amount_(shift_amount) { + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(shift_amount <= 4); + VIXL_ASSERT(!reg.IsSP()); + + // Extend modes SXTX and UXTX require a 64-bit register. + VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); +} + + +bool Operand::IsImmediate() const { + return reg_.Is(NoReg); +} + + +bool Operand::IsShiftedRegister() const { + return reg_.IsValid() && (shift_ != NO_SHIFT); +} + + +bool Operand::IsExtendedRegister() const { + return reg_.IsValid() && (extend_ != NO_EXTEND); +} + + +bool Operand::IsZero() const { + if (IsImmediate()) { + return immediate() == 0; + } else { + return reg().IsZero(); + } +} + + +Operand Operand::ToExtendedRegister() const { + VIXL_ASSERT(IsShiftedRegister()); + VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); + return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); +} + + +// MemOperand +MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode) + : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount) + : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), + shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(!regoffset.IsSP()); + VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); + + // SXTX extend mode requires a 64-bit offset register. + VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX)); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Shift shift, + unsigned shift_amount) + : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), + shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); + VIXL_ASSERT(shift == LSL); +} + + +MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) + : base_(base), regoffset_(NoReg), addrmode_(addrmode) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + + if (offset.IsImmediate()) { + offset_ = offset.immediate(); + } else if (offset.IsShiftedRegister()) { + VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex)); + + regoffset_ = offset.reg(); + shift_ = offset.shift(); + shift_amount_ = offset.shift_amount(); + + extend_ = NO_EXTEND; + offset_ = 0; + + // These assertions match those in the shifted-register constructor. + VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); + VIXL_ASSERT(shift_ == LSL); + } else { + VIXL_ASSERT(offset.IsExtendedRegister()); + VIXL_ASSERT(addrmode == Offset); + + regoffset_ = offset.reg(); + extend_ = offset.extend(); + shift_amount_ = offset.shift_amount(); + + shift_ = NO_SHIFT; + offset_ = 0; + + // These assertions match those in the extended-register constructor. + VIXL_ASSERT(!regoffset_.IsSP()); + VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); + VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); + } +} + + +bool MemOperand::IsImmediateOffset() const { + return (addrmode_ == Offset) && regoffset_.Is(NoReg); +} + + +bool MemOperand::IsRegisterOffset() const { + return (addrmode_ == Offset) && !regoffset_.Is(NoReg); +} + + +bool MemOperand::IsPreIndex() const { + return addrmode_ == PreIndex; +} + + +bool MemOperand::IsPostIndex() const { + return addrmode_ == PostIndex; +} + + +void MemOperand::AddOffset(int64_t offset) { + VIXL_ASSERT(IsImmediateOffset()); + offset_ += offset; +} + + +// Assembler +Assembler::Assembler(PositionIndependentCodeOption pic) + : pic_(pic) { +} + + +// Code generation. +void Assembler::br(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BR | Rn(xn)); +} + + +void Assembler::blr(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLR | Rn(xn)); +} + + +void Assembler::ret(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(RET | Rn(xn)); +} + + +void Assembler::NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op) { + VIXL_ASSERT(vd.Is16B() || vd.Is8B()); + VIXL_ASSERT(vn.Is16B()); + VIXL_ASSERT(AreSameFormat(vd, vm)); + Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONTable(vd, vn, vm, NEON_TBL_1v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBL_2v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBL_3v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); + VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBL_4v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONTable(vd, vn, vm, NEON_TBX_1v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBX_2v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBX_3v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); + VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); + VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters)); + + NEONTable(vd, vn, vm, NEON_TBX_4v); +} + + +void Assembler::add(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, ADD); +} + + +void Assembler::adds(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, ADD); +} + + +void Assembler::cmn(const Register& rn, + const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + adds(zr, rn, operand); +} + + +void Assembler::sub(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, SUB); +} + + +void Assembler::subs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, SUB); +} + + +void Assembler::cmp(const Register& rn, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + subs(zr, rn, operand); +} + + +void Assembler::neg(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sub(rd, zr, operand); +} + + +void Assembler::negs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + subs(rd, zr, operand); +} + + +void Assembler::adc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); +} + + +void Assembler::adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, ADC); +} + + +void Assembler::sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); +} + + +void Assembler::sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, SBC); +} + + +void Assembler::ngc(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbc(rd, zr, operand); +} + + +void Assembler::ngcs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbcs(rd, zr, operand); +} + + +// Logical instructions. +void Assembler::and_(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, AND); +} + + +void Assembler::bic(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BIC); +} + + +void Assembler::bics(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BICS); +} + + +void Assembler::orr(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORR); +} + + +void Assembler::orn(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORN); +} + + +void Assembler::eor(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EOR); +} + + +void Assembler::eon(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EON); +} + + +void Assembler::lslv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::lsrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::asrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rorv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +// Bitfield operations. +void Assembler::bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.size() == rn.size()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | BFM | N | + ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | SBFM | N | + ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.size() == rn.size()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | UBFM | N | + ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSEL); +} + + +void Assembler::csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINC); +} + + +void Assembler::csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINV); +} + + +void Assembler::csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSNEG); +} + + +void Assembler::cset(const Register &rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinc(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::csetm(const Register &rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinv(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinc(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinv(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csneg(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMN); +} + + +void Assembler::ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMP); +} + + +void Assembler::DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op) { + Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32b(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32h(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32w(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32x(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits()); + Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32cb(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32ch(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32cw(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); + Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32cx(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits()); + Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd)); +} + + +void Assembler::mul(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD); +} + + +void Assembler::madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MADD); +} + + +void Assembler::mneg(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB); +} + + +void Assembler::msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MSUB); +} + + +void Assembler::umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); + VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); +} + + +void Assembler::smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); + VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); +} + + +void Assembler::umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); + VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); +} + + +void Assembler::smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); + VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); +} + + +void Assembler::smull(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.Is64Bits()); + VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); + DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); +} + + +void Assembler::sdiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::smulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, SMULH_x); +} + + +void Assembler::umulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, UMULH_x); +} + + +void Assembler::udiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == rm.size()); + Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rbit(const Register& rd, + const Register& rn) { + DataProcessing1Source(rd, rn, RBIT); +} + + +void Assembler::rev16(const Register& rd, + const Register& rn) { + DataProcessing1Source(rd, rn, REV16); +} + + +void Assembler::rev32(const Register& rd, + const Register& rn) { + VIXL_ASSERT(rd.Is64Bits()); + DataProcessing1Source(rd, rn, REV); +} + + +void Assembler::rev(const Register& rd, + const Register& rn) { + DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); +} + + +void Assembler::clz(const Register& rd, + const Register& rn) { + DataProcessing1Source(rd, rn, CLZ); +} + + +void Assembler::cls(const Register& rd, + const Register& rn) { + DataProcessing1Source(rd, rn, CLS); +} + + +void Assembler::ldp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); +} + + +void Assembler::stp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); +} + + +void Assembler::ldpsw(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.Is64Bits()); + LoadStorePair(rt, rt2, src, LDPSW_x); +} + + +void Assembler::LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op))); + + int offset = static_cast<int>(addr.offset()); + Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | + ImmLSPair(offset, CalcLSPairDataSize(op)); + + Instr addrmodeop; + if (addr.IsImmediateOffset()) { + addrmodeop = LoadStorePairOffsetFixed; + } else { + VIXL_ASSERT(addr.offset() != 0); + if (addr.IsPreIndex()) { + addrmodeop = LoadStorePairPreIndexFixed; + } else { + VIXL_ASSERT(addr.IsPostIndex()); + addrmodeop = LoadStorePairPostIndexFixed; + } + } + Emit(addrmodeop | memop); +} + + +void Assembler::ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePairNonTemporal(rt, rt2, src, + LoadPairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePairNonTemporal(rt, rt2, dst, + StorePairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op) { + VIXL_ASSERT(!rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(addr.IsImmediateOffset()); + + unsigned size = CalcLSPairDataSize( + static_cast<LoadStorePairOp>(op & LoadStorePairMask)); + VIXL_ASSERT(IsImmLSPair(addr.offset(), size)); + int offset = static_cast<int>(addr.offset()); + Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size)); +} + + +// Memory instructions. +void Assembler::ldrb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::strb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldrsb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldrh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::strh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldrsh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldr(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::str(const CPURegister& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldrsw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(rt.Is64Bits()); + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRSW_x, option); +} + + +void Assembler::ldurb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::sturb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldursb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldurh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::sturh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldursh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::stur(const CPURegister& rt, const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldursw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(rt.Is64Bits()); + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRSW_x, option); +} + + +void Assembler::ldrsw(const Register& rt, int imm19) { + Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::ldr(const CPURegister& rt, int imm19) { + LoadLiteralOp op = LoadLiteralOpFor(rt); + Emit(op | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::prfm(PrefetchOperation op, int imm19) { + Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19)); +} + + +// Exclusive-access instructions. +void Assembler::stxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::ldxrb(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldxrh(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldxr(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.size() == rt2.size()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base())); +} + + +void Assembler::ldxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.size() == rt2.size()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base())); +} + + +void Assembler::stlxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stlxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stlxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::ldaxrb(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldaxrh(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldaxr(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.size() == rt2.size()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base())); +} + + +void Assembler::ldaxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.size() == rt2.size()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base())); +} + + +void Assembler::stlrb(const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stlrh(const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::stlr(const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); +} + + +void Assembler::ldarb(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldarh(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::ldar(const Register& rt, + const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); +} + + +void Assembler::prfm(PrefetchOperation op, const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::prfum(PrefetchOperation op, const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) { + Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt)); +} + + +void Assembler::sys(int op, const Register& rt) { + Emit(SYS | SysOp(op) | Rt(rt)); +} + + +void Assembler::dc(DataCacheOp op, const Register& rt) { + VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA)); + sys(op, rt); +} + + +void Assembler::ic(InstructionCacheOp op, const Register& rt) { + VIXL_ASSERT(op == IVAU); + sys(op, rt); +} + + +// NEON structure loads and stores. +Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { + Instr addr_field = RnSP(addr.base()); + + if (addr.IsPostIndex()) { + VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex == + static_cast<NEONLoadStoreMultiStructPostIndexOp>( + NEONLoadStoreSingleStructPostIndex)); + + addr_field |= NEONLoadStoreMultiStructPostIndex; + if (addr.offset() == 0) { + addr_field |= RmNot31(addr.regoffset()); + } else { + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_field |= (0x1f << Rm_offset); + } + } else { + VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0)); + } + return addr_field; +} + +void Assembler::LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op) { +#ifdef DEBUG + // Assert that addressing mode is either offset (with immediate 0), post + // index by immediate of the size of the register list, or post index by a + // value in a core register. + if (addr.IsImmediateOffset()) { + VIXL_ASSERT(addr.offset() == 0); + } else { + int offset = vt.SizeInBytes(); + switch (op) { + case NEON_LD1_1v: + case NEON_ST1_1v: + offset *= 1; break; + case NEONLoadStoreSingleStructLoad1: + case NEONLoadStoreSingleStructStore1: + case NEON_LD1R: + offset = (offset / vt.lanes()) * 1; break; + + case NEON_LD1_2v: + case NEON_ST1_2v: + case NEON_LD2: + case NEON_ST2: + offset *= 2; + break; + case NEONLoadStoreSingleStructLoad2: + case NEONLoadStoreSingleStructStore2: + case NEON_LD2R: + offset = (offset / vt.lanes()) * 2; break; + + case NEON_LD1_3v: + case NEON_ST1_3v: + case NEON_LD3: + case NEON_ST3: + offset *= 3; break; + case NEONLoadStoreSingleStructLoad3: + case NEONLoadStoreSingleStructStore3: + case NEON_LD3R: + offset = (offset / vt.lanes()) * 3; break; + + case NEON_LD1_4v: + case NEON_ST1_4v: + case NEON_LD4: + case NEON_ST4: + offset *= 4; break; + case NEONLoadStoreSingleStructLoad4: + case NEONLoadStoreSingleStructStore4: + case NEON_LD4R: + offset = (offset / vt.lanes()) * 4; break; + default: + VIXL_UNREACHABLE(); + } + VIXL_ASSERT(!addr.regoffset().Is(NoReg) || + addr.offset() == offset); + } +#else + USE(vt, addr, op); +#endif +} + +void Assembler::LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op) { + LoadStoreStructVerify(vt, addr, op); + VIXL_ASSERT(vt.IsVector() || vt.Is1D()); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, + const MemOperand& src) { + LoadStoreStruct(vt, src, NEON_LD1_1v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD1_2v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD1_3v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD1_4v); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD2); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); +} + + +void Assembler::ld2r(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD3); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); +} + + +void Assembler::ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD4); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); +} + + +void Assembler::ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); +} + + +void Assembler::st1(const VRegister& vt, + const MemOperand& src) { + LoadStoreStruct(vt, src, NEON_ST1_1v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_ST1_2v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_ST1_3v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_ST1_4v); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, dst, NEON_ST2); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, dst, NEON_ST3); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, dst, NEON_ST4); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); +} + + +void Assembler::LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + + // We support vt arguments of the form vt.VxT() or vt.T(), where x is the + // number of lanes, and T is b, h, s or d. + unsigned lane_size = vt.LaneSizeInBytes(); + VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size)); + + // Lane size is encoded in the opcode field. Lane index is encoded in the Q, + // S and size fields. + lane *= lane_size; + if (lane_size == 8) lane++; + + Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; + Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; + Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; + + Instr instr = op; + switch (lane_size) { + case 1: instr |= NEONLoadStoreSingle_b; break; + case 2: instr |= NEONLoadStoreSingle_h; break; + case 4: instr |= NEONLoadStoreSingle_s; break; + default: + VIXL_ASSERT(lane_size == 8); + instr |= NEONLoadStoreSingle_d; + } + + Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, + int lane, + const MemOperand& src) { + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); +} + + +void Assembler::ld1r(const VRegister& vt, + const MemOperand& src) { + LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); +} + + +void Assembler::st1(const VRegister& vt, + int lane, + const MemOperand& dst) { + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); +} + + +void Assembler::NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || + (vn.Is1S() && vd.Is1D()) || + (vn.Is8B() && vd.Is8H()) || + (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || + (vn.Is16B() && vd.Is8H())|| + (vn.Is8H() && vd.Is4S()) || + (vn.Is4S() && vd.Is2D())); + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || + (vm.Is4H() && vd.Is4S()) || + (vm.Is2S() && vd.Is2D()) || + (vm.Is16B() && vd.Is8H())|| + (vm.Is8H() && vd.Is4S()) || + (vm.Is4S() && vd.Is2D())); + Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vm, vn)); + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || + (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || + (vd.Is16B() && vn.Is8H())|| + (vd.Is8H() && vn.Is4S()) || + (vd.Is4S() && vn.Is2D())); + Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +#define NEON_3DIFF_LONG_LIST(V) \ + V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ + V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ + V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ + V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ + V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ + V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ + V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ + V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ + V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ + V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ + V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ + V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ + V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ + V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ + V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ + V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ + V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ + V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(AS); \ + NEON3DifferentL(vd, vn, vm, OP); \ +} +NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +#define NEON_3DIFF_HN_LIST(V) \ + V(addhn, NEON_ADDHN, vd.IsD()) \ + V(addhn2, NEON_ADDHN2, vd.IsQ()) \ + V(raddhn, NEON_RADDHN, vd.IsD()) \ + V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ + V(subhn, NEON_SUBHN, vd.IsD()) \ + V(subhn2, NEON_SUBHN2, vd.IsQ()) \ + V(rsubhn, NEON_RSUBHN, vd.IsD()) \ + V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(AS); \ + NEON3DifferentHN(vd, vn, vm, OP); \ +} +NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::uaddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW); +} + + +void Assembler::uaddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW2); +} + + +void Assembler::saddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW); +} + + +void Assembler::saddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW2); +} + + +void Assembler::usubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW); +} + + +void Assembler::usubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW2); +} + + +void Assembler::ssubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW); +} + + +void Assembler::ssubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); +} + + +void Assembler::mov(const Register& rd, const Register& rm) { + // Moves involving the stack pointer are encoded as add immediate with + // second operand of zero. Otherwise, orr with first operand zr is + // used. + if (rd.IsSP() || rm.IsSP()) { + add(rd, rm, 0); + } else { + orr(rd, AppropriateZeroRegFor(rd), rm); + } +} + + +void Assembler::mvn(const Register& rd, const Operand& operand) { + orn(rd, AppropriateZeroRegFor(rd), operand); +} + + +void Assembler::mrs(const Register& rt, SystemRegister sysreg) { + VIXL_ASSERT(rt.Is64Bits()); + Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); +} + + +void Assembler::msr(SystemRegister sysreg, const Register& rt) { + VIXL_ASSERT(rt.Is64Bits()); + Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); +} + + +void Assembler::clrex(int imm4) { + Emit(CLREX | CRm(imm4)); +} + + +void Assembler::dmb(BarrierDomain domain, BarrierType type) { + Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::dsb(BarrierDomain domain, BarrierType type) { + Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::isb() { + Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); +} + + +void Assembler::fmov(const VRegister& vd, double imm) { + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1D()); + Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm)); + } else { + VIXL_ASSERT(vd.Is2D()); + Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; + Instr q = NEON_Q; + uint32_t encoded_imm = FP64ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const VRegister& vd, float imm) { + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S()); + Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm)); + } else { + VIXL_ASSERT(vd.Is2S() | vd.Is4S()); + Instr op = NEONModifiedImmediate_MOVI; + Instr q = vd.Is4S() ? NEON_Q : 0; + uint32_t encoded_imm = FP32ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(rd.size() == vn.size()); + FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; + Emit(op | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(vd.size() == rn.size()); + FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx; + Emit(op | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(vd.IsSameFormat(vn)); + Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { + VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX()); + USE(index); + Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { + VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX()); + USE(index); + Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d); +} + + +void Assembler::fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d); +} + + +void Assembler::fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d); +} + + +void Assembler::fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d); +} + + +void Assembler::fnmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm)); + Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d; + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + double value, + FPTrapFlags trap) { + USE(value); + // Although the fcmp{e} instructions can strictly only take an immediate + // value of +0.0, we don't need to check for -0.0 because the sign of 0.0 + // doesn't affect the result of the comparison. + VIXL_ASSERT(value == 0.0); + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero; + Emit(FPType(vn) | op | Rn(vn)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCMPE : FCMP; + Emit(FPType(vn) | op | Rm(vm) | Rn(vn)); +} + + +void Assembler::fcmp(const VRegister& vn, + const VRegister& vm) { + FPCompareMacro(vn, vm, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, + const VRegister& vm) { + FPCompareMacro(vn, vm, EnableTrap); +} + + +void Assembler::fcmp(const VRegister& vn, + double value) { + FPCompareMacro(vn, value, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, + double value) { + FPCompareMacro(vn, value, EnableTrap); +} + + +void Assembler::FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP; + Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv)); +} + +void Assembler::fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap); +} + + +void Assembler::fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap); +} + + +void Assembler::fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFPConvertToInt(const Register& rd, + const VRegister& vn, + Instr op) { + Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); +} + + +void Assembler::NEONFPConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op) { + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + } + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvt(const VRegister& vd, + const VRegister& vn) { + FPDataProcessing1SourceOp op; + if (vd.Is1D()) { + VIXL_ASSERT(vn.Is1S() || vn.Is1H()); + op = vn.Is1S() ? FCVT_ds : FCVT_dh; + } else if (vd.Is1S()) { + VIXL_ASSERT(vn.Is1D() || vn.Is1H()); + op = vn.Is1D() ? FCVT_sd : FCVT_sh; + } else { + VIXL_ASSERT(vd.Is1H()); + VIXL_ASSERT(vn.Is1D() || vn.Is1S()); + op = vn.Is1D() ? FCVT_hd : FCVT_hs; + } + FPDataProcessing1Source(vd, vn, op); +} + + +void Assembler::fcvtl(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || + (vd.Is2D() && vn.Is2S())); + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtl2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || + (vd.Is2D() && vn.Is4S())); + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || + (vn.Is2D() && vd.Is2S())); + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || + (vn.Is2D() && vd.Is4S())); + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtxn(const VRegister& vd, + const VRegister& vn) { + Instr format = 1 << NEONSize_offset; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() && vn.Is1D()); + Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); + } else { + VIXL_ASSERT(vd.Is2S() && vn.Is2D()); + Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fcvtxn2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.Is4S() && vn.Is2D()); + Instr format = 1 << NEONSize_offset; + Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); +} + + +#define NEON_FP2REGMISC_FCVT_LIST(V) \ + V(fcvtnu, NEON_FCVTNU, FCVTNU) \ + V(fcvtns, NEON_FCVTNS, FCVTNS) \ + V(fcvtpu, NEON_FCVTPU, FCVTPU) \ + V(fcvtps, NEON_FCVTPS, FCVTPS) \ + V(fcvtmu, NEON_FCVTMU, FCVTMU) \ + V(fcvtms, NEON_FCVTMS, FCVTMS) \ + V(fcvtau, NEON_FCVTAU, FCVTAU) \ + V(fcvtas, NEON_FCVTAS, FCVTAS) + +#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ +void Assembler::FN(const Register& rd, \ + const VRegister& vn) { \ + NEONFPConvertToInt(rd, vn, SCA_OP); \ +} \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn) { \ + NEONFPConvertToInt(vd, vn, VEC_OP); \ +} +NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + + +void Assembler::fcvtzs(const Register& rd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzs(const VRegister& vd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_FCVTZS); + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); + } +} + + +void Assembler::fcvtzu(const Register& rd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzu(const VRegister& vd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_FCVTZU); + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); + } +} + +void Assembler::ucvtf(const VRegister& vd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_UCVTF); + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); + } +} + +void Assembler::scvtf(const VRegister& vd, + const VRegister& vn, + int fbits) { + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_SCVTF); + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm); + } +} + + +void Assembler::scvtf(const VRegister& vd, + const Register& rn, + int fbits) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::ucvtf(const VRegister& vd, + const Register& rn, + int fbits) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +#define NEON_FP2REGMISC_LIST(V) \ + V(fabs, NEON_FABS, FABS) \ + V(fneg, NEON_FNEG, FNEG) \ + V(fsqrt, NEON_FSQRT, FSQRT) \ + V(frintn, NEON_FRINTN, FRINTN) \ + V(frinta, NEON_FRINTA, FRINTA) \ + V(frintp, NEON_FRINTP, FRINTP) \ + V(frintm, NEON_FRINTM, FRINTM) \ + V(frintx, NEON_FRINTX, FRINTX) \ + V(frintz, NEON_FRINTZ, FRINTZ) \ + V(frinti, NEON_FRINTI, FRINTI) \ + V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \ + V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar ) + + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn) { \ + Instr op; \ + if (vd.IsScalar()) { \ + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } else { \ + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + NEONFP2RegMisc(vd, vn, op); \ +} +NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0); + USE(value); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::cmeq(const VRegister& vd, + const VRegister& vn, + int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value); +} + + +void Assembler::cmge(const VRegister& vd, + const VRegister& vn, + int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGE_zero, value); +} + + +void Assembler::cmgt(const VRegister& vd, + const VRegister& vn, + int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGT_zero, value); +} + + +void Assembler::cmle(const VRegister& vd, + const VRegister& vn, + int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLE_zero, value); +} + + +void Assembler::cmlt(const VRegister& vd, + const VRegister& vn, + int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLT_zero, value); +} + + +void Assembler::shll(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) || + (vd.Is4S() && vn.Is4H() && shift == 16) || + (vd.Is2D() && vn.Is2S() && shift == 32)); + USE(shift); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::shll2(const VRegister& vd, + const VRegister& vn, + int shift) { + USE(shift); + VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) || + (vd.Is4S() && vn.Is8H() && shift == 16) || + (vd.Is2D() && vn.Is4S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + op |= NEON_Q | NEONScalar; + } else { + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); + } + + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcmeq(const VRegister& vd, + const VRegister& vn, + double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value); +} + + +void Assembler::fcmge(const VRegister& vd, + const VRegister& vn, + double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value); +} + + +void Assembler::fcmgt(const VRegister& vd, + const VRegister& vn, + double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value); +} + + +void Assembler::fcmle(const VRegister& vd, + const VRegister& vn, + double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value); +} + + +void Assembler::fcmlt(const VRegister& vd, + const VRegister& vn, + double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value); +} + + +void Assembler::frecpx(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsScalar()); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd)); +} + + +#define NEON_3SAME_LIST(V) \ + V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ + V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ + V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ + V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ + V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ + V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ + V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ + V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ + V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ + V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ + V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ + V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ + V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ + V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ + V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ + V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ + V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ + V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ + V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ + V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ + V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ + V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ + V(uqadd, NEON_UQADD, true) \ + V(sqadd, NEON_SQADD, true) \ + V(uqsub, NEON_UQSUB, true) \ + V(sqsub, NEON_SQSUB, true) \ + V(sqshl, NEON_SQSHL, true) \ + V(uqshl, NEON_UQSHL, true) \ + V(sqrshl, NEON_SQRSHL, true) \ + V(uqrshl, NEON_UQRSHL, true) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(AS); \ + NEON3Same(vd, vn, vm, OP); \ +} +NEON_3SAME_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +#define NEON_FP3SAME_OP_LIST(V) \ + V(fadd, NEON_FADD, FADD) \ + V(fsub, NEON_FSUB, FSUB) \ + V(fmul, NEON_FMUL, FMUL) \ + V(fdiv, NEON_FDIV, FDIV) \ + V(fmax, NEON_FMAX, FMAX) \ + V(fmaxnm, NEON_FMAXNM, FMAXNM) \ + V(fmin, NEON_FMIN, FMIN) \ + V(fminnm, NEON_FMINNM, FMINNM) \ + V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \ + V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \ + V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \ + V(fabd, NEON_FABD, NEON_FABD_scalar) \ + V(fmla, NEON_FMLA, 0) \ + V(fmls, NEON_FMLS, 0) \ + V(facge, NEON_FACGE, NEON_FACGE_scalar) \ + V(facgt, NEON_FACGT, NEON_FACGT_scalar) \ + V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \ + V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \ + V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \ + V(faddp, NEON_FADDP, 0) \ + V(fmaxp, NEON_FMAXP, 0) \ + V(fminp, NEON_FMINP, 0) \ + V(fmaxnmp, NEON_FMAXNMP, 0) \ + V(fminnmp, NEON_FMINNMP, 0) + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + Instr op; \ + if ((SCA_OP != 0) && vd.IsScalar()) { \ + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } else { \ + VIXL_ASSERT(vd.IsVector()); \ + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + NEONFP3Same(vd, vn, vm, op); \ +} +NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::addp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1D() && vn.Is2D())); + Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::faddp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || + (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::fmaxp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || + (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::fminp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || + (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::fmaxnmp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || + (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::fminnmp(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || + (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::orr(const VRegister& vd, + const int imm8, + const int left_shift) { + NEONModifiedImmShiftLsl(vd, imm8, left_shift, + NEONModifiedImmediate_ORR); +} + + +void Assembler::mov(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + orr(vd.V8B(), vn.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + orr(vd.V16B(), vn.V16B(), vn.V16B()); + } +} + + +void Assembler::bic(const VRegister& vd, + const int imm8, + const int left_shift) { + NEONModifiedImmShiftLsl(vd, imm8, left_shift, + NEONModifiedImmediate_BIC); +} + + +void Assembler::movi(const VRegister& vd, + const uint64_t imm, + Shift shift, + const int shift_amount) { + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (vd.Is2D() || vd.Is1D()) { + VIXL_ASSERT(shift_amount == 0); + int imm8 = 0; + for (int i = 0; i < 8; ++i) { + int byte = (imm >> (i * 8)) & 0xff; + VIXL_ASSERT((byte == 0) || (byte == 0xff)); + if (byte == 0xff) { + imm8 |= (1 << i); + } + } + int q = vd.Is2D() ? NEON_Q : 0; + Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | + ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd)); + } else if (shift == LSL) { + VIXL_ASSERT(is_uint8(imm)); + NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount, + NEONModifiedImmediate_MOVI); + } else { + VIXL_ASSERT(is_uint8(imm)); + NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount, + NEONModifiedImmediate_MOVI); + } +} + + +void Assembler::mvn(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + not_(vd.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + not_(vd.V16B(), vn.V16B()); + } +} + + +void Assembler::mvni(const VRegister& vd, + const int imm8, + Shift shift, + const int shift_amount) { + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (shift == LSL) { + NEONModifiedImmShiftLsl(vd, imm8, shift_amount, + NEONModifiedImmediate_MVNI); + } else { + NEONModifiedImmShiftMsl(vd, imm8, shift_amount, + NEONModifiedImmediate_MVNI); + } +} + + +void Assembler::NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || + (vd.Is4S() && vm.Is1S()) || + (vd.Is1S() && vm.Is1S()) || + (vd.Is2D() && vm.Is1D()) || + (vd.Is1D() && vm.Is1D())); + VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || + (vm.Is1D() && (vm_index < 2))); + + Instr op = vop; + int index_num_bits = vm.Is1S() ? 2 : 1; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + } + + Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | + Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || + (vd.Is8H() && vm.Is1H()) || + (vd.Is1H() && vm.Is1H()) || + (vd.Is2S() && vm.Is1S()) || + (vd.Is4S() && vm.Is1S()) || + (vd.Is1S() && vm.Is1S())); + VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | + Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) || + (vd.Is4S() && vn.Is8H() && vm.Is1H()) || + (vd.Is1S() && vn.Is1H() && vm.Is1H()) || + (vd.Is2D() && vn.Is2S() && vm.Is1S()) || + (vd.Is2D() && vn.Is4S() && vm.Is1S()) || + (vd.Is1D() && vn.Is1S() && vm.Is1S())); + + VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | + Rm(vm) | Rn(vn) | Rd(vd)); +} + + +#define NEON_BYELEMENT_LIST(V) \ + V(mul, NEON_MUL_byelement, vn.IsVector()) \ + V(mla, NEON_MLA_byelement, vn.IsVector()) \ + V(mls, NEON_MLS_byelement, vn.IsVector()) \ + V(sqdmulh, NEON_SQDMULH_byelement, true) \ + V(sqrdmulh, NEON_SQRDMULH_byelement, true) + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(AS); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ +} +NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +#define NEON_FPBYELEMENT_LIST(V) \ + V(fmul, NEON_FMUL_byelement) \ + V(fmla, NEON_FMLA_byelement) \ + V(fmls, NEON_FMLS_byelement) \ + V(fmulx, NEON_FMULX_byelement) + + +#define DEFINE_ASM_FUNC(FN, OP) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + NEONFPByElement(vd, vn, vm, vm_index, OP); \ +} +NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +#define NEON_BYELEMENT_LONG_LIST(V) \ + V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(AS); \ + NEONByElementL(vd, vn, vm, vm_index, OP); \ +} +NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::suqadd(const VRegister& vd, + const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SUQADD); +} + + +void Assembler::usqadd(const VRegister& vd, + const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_USQADD); +} + + +void Assembler::abs(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_ABS); +} + + +void Assembler::sqabs(const VRegister& vd, + const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SQABS); +} + + +void Assembler::neg(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_NEG); +} + + +void Assembler::sqneg(const VRegister& vd, + const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SQNEG); +} + + +void Assembler::NEONXtn(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop) { + Instr format, op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || + (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || + (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || + (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || + (vd.Is4S() && vn.Is2D())); + format = VFormat(vd); + } + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::xtn(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() && vd.IsD()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::xtn2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::sqxtn(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtn2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtun(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::sqxtun2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::uqxtn(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +void Assembler::uqxtn2(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size". +void Assembler::not_(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rbit(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + VIXL_ASSERT((0 <= index) && (index < vd.lanes())); + Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::dup(const VRegister& vd, + const VRegister& vn, + int vn_index) { + Instr q, scalar; + + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: format = NEON_16B; break; + case 2: format = NEON_8H; break; + case 4: format = NEON_4S; break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + if (vd.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT(!vd.Is1D()); + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | scalar | NEON_DUP_ELEMENT | + ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(vn.IsScalar()); + dup(vd, vn, vn_index); +} + + +void Assembler::dup(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(!vd.Is1D()); + VIXL_ASSERT(vd.Is2D() == rn.IsX()); + int q = vd.IsD() ? 0 : NEON_Q; + Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); +} + + +void Assembler::ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: format = NEON_16B; break; + case 2: format = NEON_8H; break; + case 4: format = NEON_4S; break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + VIXL_ASSERT((0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); + VIXL_ASSERT((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); + Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | + ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + ins(vd, vd_index, vn, vn_index); +} + + +void Assembler::ins(const VRegister& vd, + int vd_index, + const Register& rn) { + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: format = NEON_16B; VIXL_ASSERT(rn.IsW()); break; + case 2: format = NEON_8H; VIXL_ASSERT(rn.IsW()); break; + case 4: format = NEON_4S; VIXL_ASSERT(rn.IsW()); break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rn.IsX()); + format = NEON_2D; + break; + } + + VIXL_ASSERT((0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); + Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, + int vd_index, + const Register& rn) { + ins(vd, vd_index, rn); +} + + +void Assembler::umov(const Register& rd, + const VRegister& vn, + int vn_index) { + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + switch (lane_size) { + case 1: format = NEON_16B; VIXL_ASSERT(rd.IsW()); break; + case 2: format = NEON_8H; VIXL_ASSERT(rd.IsW()); break; + case 4: format = NEON_4S; VIXL_ASSERT(rd.IsW()); break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rd.IsX()); + format = NEON_2D; + q = NEON_Q; + break; + } + + VIXL_ASSERT((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); + Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::mov(const Register& rd, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(vn.SizeInBytes() >= 4); + umov(rd, vn, vn_index); +} + + +void Assembler::smov(const Register& rd, + const VRegister& vn, + int vn_index) { + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + VIXL_ASSERT(lane_size != 8); + switch (lane_size) { + case 1: format = NEON_16B; break; + case 2: format = NEON_8H; break; + default: + VIXL_ASSERT(lane_size == 4); + VIXL_ASSERT(rd.IsX()); + format = NEON_4S; + break; + } + q = rd.IsW() ? 0 : NEON_Q; + VIXL_ASSERT((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); + Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::cls(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); +} + + +void Assembler::clz(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); +} + + +void Assembler::cnt(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev16(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev32(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); + Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev64(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); +} + + +void Assembler::ursqrte(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); +} + + +void Assembler::urecpe(const VRegister& vd, + const VRegister& vn) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONAddlp(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp op) { + VIXL_ASSERT((op == NEON_SADDLP) || + (op == NEON_UADDLP) || + (op == NEON_SADALP) || + (op == NEON_UADALP)); + + VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || + (vn.Is4H() && vd.Is2S()) || + (vn.Is2S() && vd.Is1D()) || + (vn.Is16B() && vd.Is8H())|| + (vn.Is8H() && vd.Is4S()) || + (vn.Is4S() && vd.Is2D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlp(const VRegister& vd, + const VRegister& vn) { + NEONAddlp(vd, vn, NEON_SADDLP); +} + + +void Assembler::uaddlp(const VRegister& vd, + const VRegister& vn) { + NEONAddlp(vd, vn, NEON_UADDLP); +} + + +void Assembler::sadalp(const VRegister& vd, + const VRegister& vn) { + NEONAddlp(vd, vn, NEON_SADALP); +} + + +void Assembler::uadalp(const VRegister& vd, + const VRegister& vn) { + NEONAddlp(vd, vn, NEON_UADALP); +} + + +void Assembler::NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op) { + VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || + (vn.Is16B() && vd.Is1H()) || + (vn.Is4H() && vd.Is1S()) || + (vn.Is8H() && vd.Is1S()) || + (vn.Is4S() && vd.Is1D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlv(const VRegister& vd, + const VRegister& vn) { + NEONAcrossLanesL(vd, vn, NEON_SADDLV); +} + + +void Assembler::uaddlv(const VRegister& vd, + const VRegister& vn) { + NEONAcrossLanesL(vd, vn, NEON_UADDLV); +} + + +void Assembler::NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op) { + VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || + (vn.Is16B() && vd.Is1B()) || + (vn.Is4H() && vd.Is1H()) || + (vn.Is8H() && vd.Is1H()) || + (vn.Is4S() && vd.Is1S())); + if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); + } else { + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); + } +} + + +#define NEON_ACROSSLANES_LIST(V) \ + V(fmaxv, NEON_FMAXV, vd.Is1S()) \ + V(fminv, NEON_FMINV, vd.Is1S()) \ + V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \ + V(fminnmv, NEON_FMINNMV, vd.Is1S()) \ + V(addv, NEON_ADDV, true) \ + V(smaxv, NEON_SMAXV, true) \ + V(sminv, NEON_SMINV, true) \ + V(umaxv, NEON_UMAXV, true) \ + V(uminv, NEON_UMINV, true) + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn) { \ + VIXL_ASSERT(AS); \ + NEONAcrossLanes(vd, vn, OP); \ +} +NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(!vd.Is1D()); + Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::trn1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_TRN1); +} + + +void Assembler::trn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_TRN2); +} + + +void Assembler::uzp1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_UZP1); +} + + +void Assembler::uzp2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_UZP2); +} + + +void Assembler::zip1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_ZIP1); +} + + +void Assembler::zip2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_ZIP2); +} + + +void Assembler::NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Instr q, scalar; + if (vn.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); +} + + +void Assembler::NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); +} + + +void Assembler::NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + int immh_immb = (laneSizeInBits + shift) << 16; + + VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || + (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || + (vn.Is16B() && vd.Is8H())|| + (vn.Is8H() && vd.Is4S()) || + (vn.Is4S() && vd.Is2D())); + Instr q; + q = vn.IsD() ? 0 : NEON_Q; + Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + Instr q, scalar; + int laneSizeInBits = vd.LaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + int immh_immb = (2 * laneSizeInBits - shift) << 16; + + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || + (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || + (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || + (vd.Is16B() && vn.Is8H())|| + (vd.Is8H() && vn.Is4S()) || + (vd.Is4S() && vn.Is2D())); + scalar = 0; + q = vd.IsD() ? 0 : NEON_Q; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::shl(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); +} + + +void Assembler::sli(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); +} + + +void Assembler::sqshl(const VRegister& vd, + const VRegister& vn, + int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); +} + + +void Assembler::sqshlu(const VRegister& vd, + const VRegister& vn, + int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); +} + + +void Assembler::uqshl(const VRegister& vd, + const VRegister& vn, + int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); +} + + +void Assembler::sshll(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sshll2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sxtl(const VRegister& vd, + const VRegister& vn) { + sshll(vd, vn, 0); +} + + +void Assembler::sxtl2(const VRegister& vd, + const VRegister& vn) { + sshll2(vd, vn, 0); +} + + +void Assembler::ushll(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::ushll2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::uxtl(const VRegister& vd, + const VRegister& vn) { + ushll(vd, vn, 0); +} + + +void Assembler::uxtl2(const VRegister& vd, + const VRegister& vn) { + ushll2(vd, vn, 0); +} + + +void Assembler::sri(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); +} + + +void Assembler::sshr(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); +} + + +void Assembler::ushr(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); +} + + +void Assembler::srshr(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); +} + + +void Assembler::urshr(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); +} + + +void Assembler::ssra(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); +} + + +void Assembler::usra(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); +} + + +void Assembler::srsra(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); +} + + +void Assembler::ursra(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); +} + + +void Assembler::shrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::shrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::rshrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::rshrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::sqshrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqshrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqrshrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqrshrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqshrun(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqshrun2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqrshrun(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::sqrshrun2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::uqshrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqshrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqrshrn(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +void Assembler::uqrshrn2(const VRegister& vd, + const VRegister& vn, + int shift) { + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +// Note: +// Below, a difference in case for the same letter indicates a +// negated bit. +// If b is 1, then B is 0. +uint32_t Assembler::FP32ToImm8(float imm) { + VIXL_ASSERT(IsImmFP32(imm)); + // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = float_to_rawbits(imm); + // bit7: a000.0000 + uint32_t bit7 = ((bits >> 31) & 0x1) << 7; + // bit6: 0b00.0000 + uint32_t bit6 = ((bits >> 29) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint32_t bit5_to_0 = (bits >> 19) & 0x3f; + + return bit7 | bit6 | bit5_to_0; +} + + +Instr Assembler::ImmFP32(float imm) { + return FP32ToImm8(imm) << ImmFP_offset; +} + + +uint32_t Assembler::FP64ToImm8(double imm) { + VIXL_ASSERT(IsImmFP64(imm)); + // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = double_to_rawbits(imm); + // bit7: a000.0000 + uint64_t bit7 = ((bits >> 63) & 0x1) << 7; + // bit6: 0b00.0000 + uint64_t bit6 = ((bits >> 61) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint64_t bit5_to_0 = (bits >> 48) & 0x3f; + + return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0); +} + + +Instr Assembler::ImmFP64(double imm) { + return FP64ToImm8(imm) << ImmFP_offset; +} + + +// Code generation helpers. +void Assembler::MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op) { + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are zero (a positive 32-bit number) or top + // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). + VIXL_ASSERT(((imm >> kWRegSize) == 0) || + ((imm >> (kWRegSize - 1)) == 0x1ffffffff)); + imm &= kWRegMask; + } + + if (shift >= 0) { + // Explicit shift specified. + VIXL_ASSERT((shift == 0) || (shift == 16) || + (shift == 32) || (shift == 48)); + VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); + shift /= 16; + } else { + // Calculate a new immediate and shift combination to encode the immediate + // argument. + shift = 0; + if ((imm & 0xffffffffffff0000) == 0) { + // Nothing to do. + } else if ((imm & 0xffffffff0000ffff) == 0) { + imm >>= 16; + shift = 1; + } else if ((imm & 0xffff0000ffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 32; + shift = 2; + } else if ((imm & 0x0000ffffffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 48; + shift = 3; + } + } + + VIXL_ASSERT(is_uint16(imm)); + + Emit(SF(rd) | MoveWideImmediateFixed | mov_op | + Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); +} + + +void Assembler::AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + VIXL_ASSERT(rd.size() == rn.size()); + if (operand.IsImmediate()) { + int64_t immediate = operand.immediate(); + VIXL_ASSERT(IsImmAddSub(immediate)); + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | + ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn)); + } else if (operand.IsShiftedRegister()) { + VIXL_ASSERT(operand.reg().size() == rd.size()); + VIXL_ASSERT(operand.shift() != ROR); + + // For instructions of the form: + // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] + // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] + // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] + // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] + // or their 64-bit register equivalents, convert the operand from shifted to + // extended register mode, and emit an add/sub extended instruction. + if (rn.IsSP() || rd.IsSP()) { + VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags))); + DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, + AddSubExtendedFixed | op); + } else { + DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); + } + } else { + VIXL_ASSERT(operand.IsExtendedRegister()); + DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); + } +} + + +void Assembler::AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.size() == rn.size()); + VIXL_ASSERT(rd.size() == operand.reg().size()); + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); + Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::hlt(int code) { + VIXL_ASSERT(is_uint16(code)); + Emit(HLT | ImmException(code)); +} + + +void Assembler::brk(int code) { + VIXL_ASSERT(is_uint16(code)); + Emit(BRK | ImmException(code)); +} + + +void Assembler::svc(int code) { + Emit(SVC | ImmException(code)); +} + + +void Assembler::ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + Instr ccmpop; + if (operand.IsImmediate()) { + int64_t immediate = operand.immediate(); + VIXL_ASSERT(IsImmConditionalCompare(immediate)); + ccmpop = ConditionalCompareImmediateFixed | op | + ImmCondCmp(static_cast<unsigned>(immediate)); + } else { + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); + ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); + } + Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); +} + + +void Assembler::DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op) { + VIXL_ASSERT(rd.size() == rn.size()); + Emit(SF(rn) | op | Rn(rn) | Rd(rd)); +} + + +void Assembler::FPDataProcessing1Source(const VRegister& vd, + const VRegister& vn, + FPDataProcessing1SourceOp op) { + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + Emit(FPType(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPDataProcessing3Source(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va, + FPDataProcessing3SourceOp op) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va)); + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va)); +} + + +void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || + vd.Is2S() || vd.Is4S()); + VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || + (left_shift == 16) || (left_shift == 24)); + VIXL_ASSERT(is_uint8(imm8)); + + int cmode_1, cmode_2, cmode_3; + if (vd.Is8B() || vd.Is16B()) { + VIXL_ASSERT(op == NEONModifiedImmediate_MOVI); + cmode_1 = 1; + cmode_2 = 1; + cmode_3 = 1; + } else { + cmode_1 = (left_shift >> 3) & 1; + cmode_2 = left_shift >> 4; + cmode_3 = 0; + if (vd.Is4H() || vd.Is8H()) { + VIXL_ASSERT((left_shift == 0) || (left_shift == 8)); + cmode_3 = 1; + } + } + int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1); + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16)); + VIXL_ASSERT(is_uint8(imm8)); + + int cmode_0 = (shift_amount >> 4) & 1; + int cmode = 0xc | cmode_0; + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned shift_amount) { + switch (shift) { + case LSL: + lsl(rd, rn, shift_amount); + break; + case LSR: + lsr(rd, rn, shift_amount); + break; + case ASR: + asr(rd, rn, shift_amount); + break; + case ROR: + ror(rd, rn, shift_amount); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Assembler::EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift) { + VIXL_ASSERT(rd.size() >= rn.size()); + unsigned reg_size = rd.size(); + // Use the correct size of register. + Register rn_ = Register(rn.code(), rd.size()); + // Bits extracted are high_bit:0. + unsigned high_bit = (8 << (extend & 0x3)) - 1; + // Number of bits left in the result that are not introduced by the shift. + unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); + + if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { + switch (extend) { + case UXTB: + case UXTH: + case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; + case SXTB: + case SXTH: + case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; + case UXTX: + case SXTX: { + VIXL_ASSERT(rn.size() == kXRegSize); + // Nothing to extend. Just shift. + lsl(rd, rn_, left_shift); + break; + } + default: VIXL_UNREACHABLE(); + } + } else { + // No need to extend as the extended bits would be shifted away. + lsl(rd, rn_, left_shift); + } +} + + +void Assembler::DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op) { + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | + ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | + dest_reg | RnSP(rn)); +} + + +Instr Assembler::LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option) { + Instr base = RnSP(addr.base()); + int64_t offset = addr.offset(); + + if (addr.IsImmediateOffset()) { + bool prefer_unscaled = (option == PreferUnscaledOffset) || + (option == RequireUnscaledOffset); + if (prefer_unscaled && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast<int>(offset)); + } + + if ((option != RequireUnscaledOffset) && + IsImmLSScaled(offset, access_size)) { + // Use the scaled addressing mode. + return base | LoadStoreUnsignedOffsetFixed | + ImmLSUnsigned(static_cast<int>(offset) >> access_size); + } + + if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast<int>(offset)); + } + } + + // All remaining addressing modes are register-offset, pre-indexed or + // post-indexed modes. + VIXL_ASSERT((option != RequireUnscaledOffset) && + (option != RequireScaledOffset)); + + if (addr.IsRegisterOffset()) { + Extend ext = addr.extend(); + Shift shift = addr.shift(); + unsigned shift_amount = addr.shift_amount(); + + // LSL is encoded in the option field as UXTX. + if (shift == LSL) { + ext = UXTX; + } + + // Shifts are encoded in one bit, indicating a left shift by the memory + // access size. + VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size)); + return base | LoadStoreRegisterOffsetFixed | Rm(addr.regoffset()) | + ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0); + } + + if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset)); + } + + if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset)); + } + + // If this point is reached, the MemOperand (addr) cannot be encoded. + VIXL_UNREACHABLE(); + return 0; +} + + +void Assembler::LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option) { + Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option)); +} + + +void Assembler::Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option) { + VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset()); + + Instr prfop = ImmPrefetchOperation(op); + Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option)); +} + + +bool Assembler::IsImmAddSub(int64_t immediate) { + return is_uint12(immediate) || + (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); +} + + +bool Assembler::IsImmConditionalCompare(int64_t immediate) { + return is_uint5(immediate); +} + + +bool Assembler::IsImmFP32(float imm) { + // Valid values will have the form: + // aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = float_to_rawbits(imm); + // bits[19..0] are cleared. + if ((bits & 0x7ffff) != 0) { + return false; + } + + // bits[29..25] are all set or all cleared. + uint32_t b_pattern = (bits >> 16) & 0x3e00; + if (b_pattern != 0 && b_pattern != 0x3e00) { + return false; + } + + // bit[30] and bit[29] are opposite. + if (((bits ^ (bits << 1)) & 0x40000000) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmFP64(double imm) { + // Valid values will have the form: + // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = double_to_rawbits(imm); + // bits[47..0] are cleared. + if ((bits & 0x0000ffffffffffff) != 0) { + return false; + } + + // bits[61..54] are all set or all cleared. + uint32_t b_pattern = (bits >> 48) & 0x3fc0; + if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { + return false; + } + + // bit[62] and bit[61] are opposite. + if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + bool offset_is_size_multiple = + (((offset >> access_size) << access_size) == offset); + return offset_is_size_multiple && is_int7(offset >> access_size); +} + + +bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + bool offset_is_size_multiple = + (((offset >> access_size) << access_size) == offset); + return offset_is_size_multiple && is_uint12(offset >> access_size); +} + + +bool Assembler::IsImmLSUnscaled(int64_t offset) { + return is_int9(offset); +} + + +// The movn instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. +bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) { + return IsImmMovz(~imm, reg_size); +} + + +// The movz instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. +bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); +} + + +// Test if a given value can be encoded in the immediate field of a logical +// instruction. +// If it can be encoded, the function returns true, and values pointed to by n, +// imm_s and imm_r are updated with immediates encoded in the format required +// by the corresponding fields in the logical instruction. +// If it can not be encoded, the function returns false, and the values pointed +// to by n, imm_s and imm_r are undefined. +bool Assembler::IsImmLogical(uint64_t value, + unsigned width, + unsigned* n, + unsigned* imm_s, + unsigned* imm_r) { + VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize)); + + bool negate = false; + + // Logical immediates are encoded using parameters n, imm_s and imm_r using + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. + // + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. + // + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + if (width == kWRegSize) { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // Avoid making the assumption that the most-significant 32 bits are zero by + // shifting the value left and duplicating it. + value <<= kWRegSize; + value |= value >> kWRegSize; + } + + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LowestSetBit(value); + uint64_t value_plus_a = value + a; + uint64_t b = LowestSetBit(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LowestSetBit(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSize); + int clz_c = CountLeadingZeros(c, kXRegSize); + d = clz_a - clz_c; + mask = ((UINT64_C(1) << d) - 1); + out_n = 0; + } else { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; + } else { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSize); + d = 64; + mask = ~UINT64_C(0); + out_n = 1; + } + } + + // If the repeat period d is not a power of two, it can't be encoded. + if (!IsPowerOf2(d)) { + return false; + } + + if (((b - a) & ~mask) != 0) { + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + return false; + } + + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const uint64_t multipliers[] = { + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }; + uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57]; + uint64_t candidate = (b - a) * multiplier; + + if (value != candidate) { + // The candidate pattern doesn't match our input value, so fail. + return false; + } + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } else { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (-d << 1) with our computed s to form imms. + if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) { + *n = out_n; + *imm_s = ((-d << 1) | (s - 1)) & 0x3f; + *imm_r = r; + } + + return true; +} + + +LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? LDR_x : LDR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.SizeInBits()) { + case kBRegSize: return LDR_b; + case kHRegSize: return LDR_h; + case kSRegSize: return LDR_s; + case kDRegSize: return LDR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q; + } + } +} + + +LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STR_x : STR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.SizeInBits()) { + case kBRegSize: return STR_b; + case kHRegSize: return STR_h; + case kSRegSize: return STR_s; + case kDRegSize: return STR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STR_q; + } + } +} + + +LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STP_x : STP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.SizeInBytes()) { + case kSRegSizeInBytes: return STP_s; + case kDRegSizeInBytes: return STP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STP_q; + } + } +} + + +LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w); + return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) | + LoadStorePairLBit); +} + + +LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STNP_x : STNP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.SizeInBytes()) { + case kSRegSizeInBytes: return STNP_s; + case kDRegSizeInBytes: return STNP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STNP_q; + } + } +} + + +LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w); + return static_cast<LoadStorePairNonTemporalOp>( + StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit); +} + + +LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { + if (rt.IsRegister()) { + return rt.IsX() ? LDR_x_lit : LDR_w_lit; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.SizeInBytes()) { + case kSRegSizeInBytes: return LDR_s_lit; + case kDRegSizeInBytes: return LDR_d_lit; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q_lit; + } + } +} + + +bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, + const CPURegister& reg3, const CPURegister& reg4, + const CPURegister& reg5, const CPURegister& reg6, + const CPURegister& reg7, const CPURegister& reg8) { + int number_of_valid_regs = 0; + int number_of_valid_fpregs = 0; + + RegList unique_regs = 0; + RegList unique_fpregs = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; + + for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) { + if (regs[i].IsRegister()) { + number_of_valid_regs++; + unique_regs |= regs[i].Bit(); + } else if (regs[i].IsVRegister()) { + number_of_valid_fpregs++; + unique_fpregs |= regs[i].Bit(); + } else { + VIXL_ASSERT(!regs[i].IsValid()); + } + } + + int number_of_unique_regs = CountSetBits(unique_regs); + int number_of_unique_fpregs = CountSetBits(unique_fpregs); + + VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs); + VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); + + return (number_of_valid_regs != number_of_unique_regs) || + (number_of_valid_fpregs != number_of_unique_fpregs); +} + + +bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, + const CPURegister& reg3, const CPURegister& reg4, + const CPURegister& reg5, const CPURegister& reg6, + const CPURegister& reg7, const CPURegister& reg8) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); + match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); + match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); + match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); + match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); + match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); + match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); + return match; +} + + +bool AreSameFormat(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3, const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameFormat(reg1); + match &= !reg3.IsValid() || reg3.IsSameFormat(reg1); + match &= !reg4.IsValid() || reg4.IsSameFormat(reg1); + return match; +} + + +bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3, const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || + (reg2.code() == ((reg1.code() + 1) % kNumberOfVRegisters)); + match &= !reg3.IsValid() || + (reg3.code() == ((reg1.code() + 2) % kNumberOfVRegisters)); + match &= !reg4.IsValid() || + (reg4.code() == ((reg1.code() + 3) % kNumberOfVRegisters)); + return match; +} +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.h b/js/src/jit/arm64/vixl/Assembler-vixl.h new file mode 100644 index 000000000..d209f8b57 --- /dev/null +++ b/js/src/jit/arm64/vixl/Assembler-vixl.h @@ -0,0 +1,4257 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_ASSEMBLER_A64_H_ +#define VIXL_A64_ASSEMBLER_A64_H_ + +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" +#include "jit/arm64/vixl/MozBaseAssembler-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +#include "jit/JitSpewer.h" + +#include "jit/shared/Assembler-shared.h" +#include "jit/shared/IonAssemblerBufferWithConstantPools.h" + +namespace vixl { + +using js::jit::BufferOffset; +using js::jit::Label; +using js::jit::Address; +using js::jit::BaseIndex; + +typedef uint64_t RegList; +static const int kRegListSizeInBits = sizeof(RegList) * 8; + + +// Registers. + +// Some CPURegister methods can return Register or VRegister types, so we need +// to declare them in advance. +class Register; +class VRegister; + +class CPURegister { + public: + enum RegisterType { + // The kInvalid value is used to detect uninitialized static instances, + // which are always zero-initialized before any constructors are called. + kInvalid = 0, + kRegister, + kVRegister, + kFPRegister = kVRegister, + kNoRegister + }; + + constexpr CPURegister() : code_(0), size_(0), type_(kNoRegister) { + } + + constexpr CPURegister(unsigned code, unsigned size, RegisterType type) + : code_(code), size_(size), type_(type) { + } + + unsigned code() const { + VIXL_ASSERT(IsValid()); + return code_; + } + + RegisterType type() const { + VIXL_ASSERT(IsValidOrNone()); + return type_; + } + + RegList Bit() const { + VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); + return IsValid() ? (static_cast<RegList>(1) << code_) : 0; + } + + unsigned size() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + int SizeInBytes() const { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(size() % 8 == 0); + return size_ / 8; + } + + int SizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + bool Is8Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 8; + } + + bool Is16Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 16; + } + + bool Is32Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 32; + } + + bool Is64Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 64; + } + + bool Is128Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 128; + } + + bool IsValid() const { + if (IsValidRegister() || IsValidVRegister()) { + VIXL_ASSERT(!IsNone()); + return true; + } else { + // This assert is hit when the register has not been properly initialized. + // One cause for this can be an initialisation order fiasco. See + // https://isocpp.org/wiki/faq/ctors#static-init-order for some details. + VIXL_ASSERT(IsNone()); + return false; + } + } + + bool IsValidRegister() const { + return IsRegister() && + ((size_ == kWRegSize) || (size_ == kXRegSize)) && + ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); + } + + bool IsValidVRegister() const { + return IsVRegister() && + ((size_ == kBRegSize) || (size_ == kHRegSize) || + (size_ == kSRegSize) || (size_ == kDRegSize) || + (size_ == kQRegSize)) && + (code_ < kNumberOfVRegisters); + } + + bool IsValidFPRegister() const { + return IsFPRegister() && (code_ < kNumberOfVRegisters); + } + + bool IsNone() const { + // kNoRegister types should always have size 0 and code 0. + VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); + VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); + + return type_ == kNoRegister; + } + + bool Aliases(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return (code_ == other.code_) && (type_ == other.type_); + } + + bool Is(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return Aliases(other) && (size_ == other.size_); + } + + bool IsZero() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kZeroRegCode); + } + + bool IsSP() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kSPRegInternalCode); + } + + bool IsRegister() const { + return type_ == kRegister; + } + + bool IsVRegister() const { + return type_ == kVRegister; + } + + bool IsFPRegister() const { + return IsS() || IsD(); + } + + bool IsW() const { return IsValidRegister() && Is32Bits(); } + bool IsX() const { return IsValidRegister() && Is64Bits(); } + + // These assertions ensure that the size and type of the register are as + // described. They do not consider the number of lanes that make up a vector. + // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() + // does not imply Is1D() or Is8B(). + // Check the number of lanes, ie. the format of the vector, using methods such + // as Is8B(), Is1D(), etc. in the VRegister class. + bool IsV() const { return IsVRegister(); } + bool IsB() const { return IsV() && Is8Bits(); } + bool IsH() const { return IsV() && Is16Bits(); } + bool IsS() const { return IsV() && Is32Bits(); } + bool IsD() const { return IsV() && Is64Bits(); } + bool IsQ() const { return IsV() && Is128Bits(); } + + const Register& W() const; + const Register& X() const; + const VRegister& V() const; + const VRegister& B() const; + const VRegister& H() const; + const VRegister& S() const; + const VRegister& D() const; + const VRegister& Q() const; + + bool IsSameSizeAndType(const CPURegister& other) const { + return (size_ == other.size_) && (type_ == other.type_); + } + + protected: + unsigned code_; + unsigned size_; + RegisterType type_; + + private: + bool IsValidOrNone() const { + return IsValid() || IsNone(); + } +}; + + +class Register : public CPURegister { + public: + Register() : CPURegister() {} + explicit Register(const CPURegister& other) + : CPURegister(other.code(), other.size(), other.type()) { + VIXL_ASSERT(IsValidRegister()); + } + constexpr Register(unsigned code, unsigned size) + : CPURegister(code, size, kRegister) {} + + constexpr Register(js::jit::Register r, unsigned size) + : CPURegister(r.code(), size, kRegister) {} + + bool IsValid() const { + VIXL_ASSERT(IsRegister() || IsNone()); + return IsValidRegister(); + } + + js::jit::Register asUnsized() const { + if (code_ == kSPRegInternalCode) + return js::jit::Register::FromCode((js::jit::Register::Code)kZeroRegCode); + return js::jit::Register::FromCode((js::jit::Register::Code)code_); + } + + + static const Register& WRegFromCode(unsigned code); + static const Register& XRegFromCode(unsigned code); + + private: + static const Register wregisters[]; + static const Register xregisters[]; +}; + + +class VRegister : public CPURegister { + public: + VRegister() : CPURegister(), lanes_(1) {} + explicit VRegister(const CPURegister& other) + : CPURegister(other.code(), other.size(), other.type()), lanes_(1) { + VIXL_ASSERT(IsValidVRegister()); + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + constexpr VRegister(unsigned code, unsigned size, unsigned lanes = 1) + : CPURegister(code, size, kVRegister), lanes_(lanes) { + // VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + constexpr VRegister(js::jit::FloatRegister r) + : CPURegister(r.code_, r.size() * 8, kVRegister), lanes_(1) { + } + constexpr VRegister(js::jit::FloatRegister r, unsigned size) + : CPURegister(r.code_, size, kVRegister), lanes_(1) { + } + VRegister(unsigned code, VectorFormat format) + : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister), + lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) { + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + + bool IsValid() const { + VIXL_ASSERT(IsVRegister() || IsNone()); + return IsValidVRegister(); + } + + static const VRegister& BRegFromCode(unsigned code); + static const VRegister& HRegFromCode(unsigned code); + static const VRegister& SRegFromCode(unsigned code); + static const VRegister& DRegFromCode(unsigned code); + static const VRegister& QRegFromCode(unsigned code); + static const VRegister& VRegFromCode(unsigned code); + + VRegister V8B() const { return VRegister(code_, kDRegSize, 8); } + VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } + VRegister V4H() const { return VRegister(code_, kDRegSize, 4); } + VRegister V8H() const { return VRegister(code_, kQRegSize, 8); } + VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } + VRegister V4S() const { return VRegister(code_, kQRegSize, 4); } + VRegister V2D() const { return VRegister(code_, kQRegSize, 2); } + VRegister V1D() const { return VRegister(code_, kDRegSize, 1); } + + bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); } + bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); } + bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); } + bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); } + bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); } + bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); } + bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); } + bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); } + + // For consistency, we assert the number of lanes of these scalar registers, + // even though there are no vectors of equivalent total size with which they + // could alias. + bool Is1B() const { + VIXL_ASSERT(!(Is8Bits() && IsVector())); + return Is8Bits(); + } + bool Is1H() const { + VIXL_ASSERT(!(Is16Bits() && IsVector())); + return Is16Bits(); + } + bool Is1S() const { + VIXL_ASSERT(!(Is32Bits() && IsVector())); + return Is32Bits(); + } + + bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; } + bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; } + bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; } + bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; } + + int lanes() const { + return lanes_; + } + + bool IsScalar() const { + return lanes_ == 1; + } + + bool IsVector() const { + return lanes_ > 1; + } + + bool IsSameFormat(const VRegister& other) const { + return (size_ == other.size_) && (lanes_ == other.lanes_); + } + + unsigned LaneSizeInBytes() const { + return SizeInBytes() / lanes_; + } + + unsigned LaneSizeInBits() const { + return LaneSizeInBytes() * 8; + } + + private: + static const VRegister bregisters[]; + static const VRegister hregisters[]; + static const VRegister sregisters[]; + static const VRegister dregisters[]; + static const VRegister qregisters[]; + static const VRegister vregisters[]; + int lanes_; +}; + + +// Backward compatibility for FPRegisters. +typedef VRegister FPRegister; + +// No*Reg is used to indicate an unused argument, or an error case. Note that +// these all compare equal (using the Is() method). The Register and VRegister +// variants are provided for convenience. +const Register NoReg; +const VRegister NoVReg; +const FPRegister NoFPReg; // For backward compatibility. +const CPURegister NoCPUReg; + + +#define DEFINE_REGISTERS(N) \ +constexpr Register w##N(N, kWRegSize); \ +constexpr Register x##N(N, kXRegSize); +REGISTER_CODE_LIST(DEFINE_REGISTERS) +#undef DEFINE_REGISTERS +constexpr Register wsp(kSPRegInternalCode, kWRegSize); +constexpr Register sp(kSPRegInternalCode, kXRegSize); + + +#define DEFINE_VREGISTERS(N) \ +constexpr VRegister b##N(N, kBRegSize); \ +constexpr VRegister h##N(N, kHRegSize); \ +constexpr VRegister s##N(N, kSRegSize); \ +constexpr VRegister d##N(N, kDRegSize); \ +constexpr VRegister q##N(N, kQRegSize); \ +constexpr VRegister v##N(N, kQRegSize); +REGISTER_CODE_LIST(DEFINE_VREGISTERS) +#undef DEFINE_VREGISTERS + + +// Registers aliases. +constexpr Register ip0 = x16; +constexpr Register ip1 = x17; +constexpr Register lr = x30; +constexpr Register xzr = x31; +constexpr Register wzr = w31; + + +// AreAliased returns true if any of the named registers overlap. Arguments +// set to NoReg are ignored. The system stack pointer may be specified. +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreSameSizeAndType returns true if all of the specified registers have the +// same size, and are of the same type. The system stack pointer may be +// specified. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, + const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, + const CPURegister& reg8 = NoCPUReg); + + +// AreSameFormat returns true if all of the specified VRegisters have the same +// vector format. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoVReg). +bool AreSameFormat(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// AreConsecutive returns true if all of the specified VRegisters are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoVReg). +bool AreConsecutive(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// Lists of registers. +class CPURegList { + public: + explicit CPURegList(CPURegister reg1, + CPURegister reg2 = NoCPUReg, + CPURegister reg3 = NoCPUReg, + CPURegister reg4 = NoCPUReg) + : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), + size_(reg1.size()), type_(reg1.type()) { + VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) + : list_(list), size_(size), type_(type) { + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, + unsigned first_reg, unsigned last_reg) + : size_(size), type_(type) { + VIXL_ASSERT(((type == CPURegister::kRegister) && + (last_reg < kNumberOfRegisters)) || + ((type == CPURegister::kVRegister) && + (last_reg < kNumberOfVRegisters))); + VIXL_ASSERT(last_reg >= first_reg); + list_ = (UINT64_C(1) << (last_reg + 1)) - 1; + list_ &= ~((UINT64_C(1) << first_reg) - 1); + VIXL_ASSERT(IsValid()); + } + + CPURegister::RegisterType type() const { + VIXL_ASSERT(IsValid()); + return type_; + } + + // Combine another CPURegList into this one. Registers that already exist in + // this list are left unchanged. The type and size of the registers in the + // 'other' list must match those in this list. + void Combine(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.RegisterSizeInBits() == size_); + list_ |= other.list(); + } + + // Remove every register in the other CPURegList from this one. Registers that + // do not exist in this list are ignored. The type and size of the registers + // in the 'other' list must match those in this list. + void Remove(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.RegisterSizeInBits() == size_); + list_ &= ~other.list(); + } + + // Variants of Combine and Remove which take a single register. + void Combine(const CPURegister& other) { + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.size() == size_); + Combine(other.code()); + } + + void Remove(const CPURegister& other) { + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.size() == size_); + Remove(other.code()); + } + + // Variants of Combine and Remove which take a single register by its code; + // the type and size of the register is inferred from this list. + void Combine(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ |= (UINT64_C(1) << code); + } + + void Remove(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ &= ~(UINT64_C(1) << code); + } + + static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); + } + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); + } + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + bool Overlaps(const CPURegList& other) const { + return (type_ == other.type_) && ((list_ & other.list_) != 0); + } + + RegList list() const { + VIXL_ASSERT(IsValid()); + return list_; + } + + void set_list(RegList new_list) { + VIXL_ASSERT(IsValid()); + list_ = new_list; + } + + // Remove all callee-saved registers from the list. This can be useful when + // preparing registers for an AAPCS64 function call, for example. + void RemoveCalleeSaved(); + + CPURegister PopLowestIndex(); + CPURegister PopHighestIndex(); + + // AAPCS64 callee-saved registers. + static CPURegList GetCalleeSaved(unsigned size = kXRegSize); + static CPURegList GetCalleeSavedV(unsigned size = kDRegSize); + + // AAPCS64 caller-saved registers. Note that this includes lr. + // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top + // 64-bits being caller-saved. + static CPURegList GetCallerSaved(unsigned size = kXRegSize); + static CPURegList GetCallerSavedV(unsigned size = kDRegSize); + + bool IsEmpty() const { + VIXL_ASSERT(IsValid()); + return list_ == 0; + } + + bool IncludesAliasOf(const CPURegister& other) const { + VIXL_ASSERT(IsValid()); + return (type_ == other.type()) && ((other.Bit() & list_) != 0); + } + + bool IncludesAliasOf(int code) const { + VIXL_ASSERT(IsValid()); + return ((code & list_) != 0); + } + + int Count() const { + VIXL_ASSERT(IsValid()); + return CountSetBits(list_); + } + + unsigned RegisterSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + unsigned RegisterSizeInBytes() const { + int size_in_bits = RegisterSizeInBits(); + VIXL_ASSERT((size_in_bits % 8) == 0); + return size_in_bits / 8; + } + + unsigned TotalSizeInBytes() const { + VIXL_ASSERT(IsValid()); + return RegisterSizeInBytes() * Count(); + } + + private: + RegList list_; + unsigned size_; + CPURegister::RegisterType type_; + + bool IsValid() const; +}; + + +// AAPCS64 callee-saved registers. +extern const CPURegList kCalleeSaved; +extern const CPURegList kCalleeSavedV; + + +// AAPCS64 caller-saved registers. Note that this includes lr. +extern const CPURegList kCallerSaved; +extern const CPURegList kCallerSavedV; + + +// Operand. +class Operand { + public: + // #<immediate> + // where <immediate> is int64_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(int64_t immediate = 0); // NOLINT(runtime/explicit) + + // rm, {<shift> #<shift_amount>} + // where <shift> is one of {LSL, LSR, ASR, ROR}. + // <shift_amount> is uint6_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register reg, + Shift shift = LSL, + unsigned shift_amount = 0); // NOLINT(runtime/explicit) + + // rm, {<extend> {#<shift_amount>}} + // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. + // <shift_amount> is uint2_t. + explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); + + // FIXME: Temporary constructors for compilation. + // FIXME: These should be removed -- Operand should not leak into shared code. + // FIXME: Something like an LAllocationUnion for {gpreg, fpreg, Address} is wanted. + explicit Operand(js::jit::Register) { + MOZ_CRASH("Operand with Register"); + } + explicit Operand(js::jit::FloatRegister) { + MOZ_CRASH("Operand with FloatRegister"); + } + explicit Operand(js::jit::Register, int32_t) { + MOZ_CRASH("Operand with implicit Address"); + } + + bool IsImmediate() const; + bool IsShiftedRegister() const; + bool IsExtendedRegister() const; + bool IsZero() const; + + // This returns an LSL shift (<= 4) operand as an equivalent extend operand, + // which helps in the encoding of instructions that use the stack pointer. + Operand ToExtendedRegister() const; + + int64_t immediate() const { + VIXL_ASSERT(IsImmediate()); + return immediate_; + } + + Register reg() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return reg_; + } + + CPURegister maybeReg() const { + if (IsShiftedRegister() || IsExtendedRegister()) + return reg_; + return NoCPUReg; + } + + Shift shift() const { + VIXL_ASSERT(IsShiftedRegister()); + return shift_; + } + + Extend extend() const { + VIXL_ASSERT(IsExtendedRegister()); + return extend_; + } + + unsigned shift_amount() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return shift_amount_; + } + + private: + int64_t immediate_; + Register reg_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +// MemOperand represents the addressing mode of a load or store instruction. +class MemOperand { + public: + explicit MemOperand(Register base, + int64_t offset = 0, + AddrMode addrmode = Offset); + MemOperand(Register base, + Register regoffset, + Shift shift = LSL, + unsigned shift_amount = 0); + MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount = 0); + MemOperand(Register base, + const Operand& offset, + AddrMode addrmode = Offset); + + // Adapter constructors using C++11 delegating. + // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary. + explicit MemOperand(js::jit::Address addr) + : MemOperand(addr.base.code() == 31 ? sp : Register(addr.base, 64), + (ptrdiff_t)addr.offset) { + } + + const Register& base() const { return base_; } + const Register& regoffset() const { return regoffset_; } + int64_t offset() const { return offset_; } + AddrMode addrmode() const { return addrmode_; } + Shift shift() const { return shift_; } + Extend extend() const { return extend_; } + unsigned shift_amount() const { return shift_amount_; } + bool IsImmediateOffset() const; + bool IsRegisterOffset() const; + bool IsPreIndex() const; + bool IsPostIndex() const; + + void AddOffset(int64_t offset); + + private: + Register base_; + Register regoffset_; + int64_t offset_; + AddrMode addrmode_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +// Control whether or not position-independent code should be emitted. +enum PositionIndependentCodeOption { + // All code generated will be position-independent; all branches and + // references to labels generated with the Label class will use PC-relative + // addressing. + PositionIndependentCode, + + // Allow VIXL to generate code that refers to absolute addresses. With this + // option, it will not be possible to copy the code buffer and run it from a + // different address; code must be generated in its final location. + PositionDependentCode, + + // Allow VIXL to assume that the bottom 12 bits of the address will be + // constant, but that the top 48 bits may change. This allows `adrp` to + // function in systems which copy code between pages, but otherwise maintain + // 4KB page alignment. + PageOffsetDependentCode +}; + + +// Control how scaled- and unscaled-offset loads and stores are generated. +enum LoadStoreScalingOption { + // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferScaledOffset, + + // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferUnscaledOffset, + + // Require scaled-immediate-offset instructions. + RequireScaledOffset, + + // Require unscaled-immediate-offset instructions. + RequireUnscaledOffset +}; + + +// Assembler. +class Assembler : public MozBaseAssembler { + public: + Assembler(PositionIndependentCodeOption pic = PositionIndependentCode); + + // System functions. + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. + void FinalizeCode(); + +#define COPYENUM(v) static const Condition v = vixl::v +#define COPYENUM_(v) static const Condition v = vixl::v##_ + COPYENUM(Equal); + COPYENUM(Zero); + COPYENUM(NotEqual); + COPYENUM(NonZero); + COPYENUM(AboveOrEqual); + COPYENUM(CarrySet); + COPYENUM(Below); + COPYENUM(CarryClear); + COPYENUM(Signed); + COPYENUM(NotSigned); + COPYENUM(Overflow); + COPYENUM(NoOverflow); + COPYENUM(Above); + COPYENUM(BelowOrEqual); + COPYENUM_(GreaterThanOrEqual); + COPYENUM_(LessThan); + COPYENUM_(GreaterThan); + COPYENUM_(LessThanOrEqual); + COPYENUM(Always); + COPYENUM(Never); +#undef COPYENUM +#undef COPYENUM_ + + // Bit set when a DoubleCondition does not map to a single ARM condition. + // The MacroAssembler must special-case these conditions, or else + // ConditionFromDoubleCondition will complain. + static const int DoubleConditionBitSpecial = 0x100; + + enum DoubleCondition { + DoubleOrdered = Condition::vc, + DoubleEqual = Condition::eq, + DoubleNotEqual = Condition::ne | DoubleConditionBitSpecial, + DoubleGreaterThan = Condition::gt, + DoubleGreaterThanOrEqual = Condition::ge, + DoubleLessThan = Condition::lo, // Could also use Condition::mi. + DoubleLessThanOrEqual = Condition::ls, + + // If either operand is NaN, these conditions always evaluate to true. + DoubleUnordered = Condition::vs, + DoubleEqualOrUnordered = Condition::eq | DoubleConditionBitSpecial, + DoubleNotEqualOrUnordered = Condition::ne, + DoubleGreaterThanOrUnordered = Condition::hi, + DoubleGreaterThanOrEqualOrUnordered = Condition::hs, + DoubleLessThanOrUnordered = Condition::lt, + DoubleLessThanOrEqualOrUnordered = Condition::le + }; + + static inline Condition InvertCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + VIXL_ASSERT((cond != al) && (cond != nv)); + return static_cast<Condition>(cond ^ 1); + } + + // This is chaging the condition codes for cmp a, b to the same codes for cmp b, a. + static inline Condition InvertCmpCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + switch (cond) { + case eq: + case ne: + return cond; + case gt: + return le; + case le: + return gt; + case ge: + return lt; + case lt: + return ge; + case hi: + return lo; + case lo: + return hi; + case hs: + return ls; + case ls: + return hs; + case mi: + return pl; + case pl: + return mi; + default: + MOZ_CRASH("TODO: figure this case out."); + } + return static_cast<Condition>(cond ^ 1); + } + + static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) { + VIXL_ASSERT(!(cond & DoubleConditionBitSpecial)); + return static_cast<Condition>(cond); + } + + // Instruction set functions. + + // Branch / Jump instructions. + // Branch to register. + void br(const Register& xn); + static void br(Instruction* at, const Register& xn); + + // Branch with link to register. + void blr(const Register& xn); + static void blr(Instruction* at, const Register& blr); + + // Branch to register with return hint. + void ret(const Register& xn = lr); + + // Unconditional branch to label. + BufferOffset b(Label* label); + + // Conditional branch to label. + BufferOffset b(Label* label, Condition cond); + + // Unconditional branch to PC offset. + BufferOffset b(int imm26); + static void b(Instruction* at, int imm26); + + // Conditional branch to PC offset. + BufferOffset b(int imm19, Condition cond); + static void b(Instruction*at, int imm19, Condition cond); + + // Branch with link to label. + void bl(Label* label); + + // Branch with link to PC offset. + void bl(int imm26); + static void bl(Instruction* at, int imm26); + + // Compare and branch to label if zero. + void cbz(const Register& rt, Label* label); + + // Compare and branch to PC offset if zero. + void cbz(const Register& rt, int imm19); + static void cbz(Instruction* at, const Register& rt, int imm19); + + // Compare and branch to label if not zero. + void cbnz(const Register& rt, Label* label); + + // Compare and branch to PC offset if not zero. + void cbnz(const Register& rt, int imm19); + static void cbnz(Instruction* at, const Register& rt, int imm19); + + // Table lookup from one register. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Table lookup from two registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup from three registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup from four registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Table lookup extension from one register. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Table lookup extension from two registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup extension from three registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup extension from four registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Test bit and branch to label if zero. + void tbz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if zero. + void tbz(const Register& rt, unsigned bit_pos, int imm14); + static void tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14); + + // Test bit and branch to label if not zero. + void tbnz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if not zero. + void tbnz(const Register& rt, unsigned bit_pos, int imm14); + static void tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14); + + // Address calculation instructions. + // Calculate a PC-relative address. Unlike for branches the offset in adr is + // unscaled (i.e. the result can be unaligned). + + // Calculate the address of a label. + void adr(const Register& rd, Label* label); + + // Calculate the address of a PC offset. + void adr(const Register& rd, int imm21); + static void adr(Instruction* at, const Register& rd, int imm21); + + // Calculate the page address of a label. + void adrp(const Register& rd, Label* label); + + // Calculate the page address of a PC offset. + void adrp(const Register& rd, int imm21); + static void adrp(Instruction* at, const Register& rd, int imm21); + + // Data Processing instructions. + // Add. + void add(const Register& rd, + const Register& rn, + const Operand& operand); + + // Add and update status flags. + void adds(const Register& rd, + const Register& rn, + const Operand& operand); + + // Compare negative. + void cmn(const Register& rn, const Operand& operand); + + // Subtract. + void sub(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract and update status flags. + void subs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Compare. + void cmp(const Register& rn, const Operand& operand); + + // Negate. + void neg(const Register& rd, + const Operand& operand); + + // Negate and update status flags. + void negs(const Register& rd, + const Operand& operand); + + // Add with carry bit. + void adc(const Register& rd, + const Register& rn, + const Operand& operand); + + // Add with carry bit and update status flags. + void adcs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract with carry bit. + void sbc(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract with carry bit and update status flags. + void sbcs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Negate with carry bit. + void ngc(const Register& rd, + const Operand& operand); + + // Negate with carry bit and update status flags. + void ngcs(const Register& rd, + const Operand& operand); + + // Logical instructions. + // Bitwise and (A & B). + void and_(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bitwise and (A & B) and update status flags. + BufferOffset ands(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bit test and set flags. + BufferOffset tst(const Register& rn, const Operand& operand); + + // Bit clear (A & ~B). + void bic(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bit clear (A & ~B) and update status flags. + void bics(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bitwise or (A | B). + void orr(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise nor (A | ~B). + void orn(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise eor/xor (A ^ B). + void eor(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise enor/xnor (A ^ ~B). + void eon(const Register& rd, const Register& rn, const Operand& operand); + + // Logical shift left by variable. + void lslv(const Register& rd, const Register& rn, const Register& rm); + + // Logical shift right by variable. + void lsrv(const Register& rd, const Register& rn, const Register& rm); + + // Arithmetic shift right by variable. + void asrv(const Register& rd, const Register& rn, const Register& rm); + + // Rotate right by variable. + void rorv(const Register& rd, const Register& rn, const Register& rm); + + // Bitfield instructions. + // Bitfield move. + void bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Signed bitfield move. + void sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Unsigned bitfield move. + void ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Bfm aliases. + // Bitfield insert. + void bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Bitfield extract and insert low. + void bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + bfm(rd, rn, lsb, lsb + width - 1); + } + + // Sbfm aliases. + // Arithmetic shift right. + void asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < rd.size()); + sbfm(rd, rn, shift, rd.size() - 1); + } + + // Signed bitfield insert with zero at right. + void sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Signed bitfield extract. + void sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + sbfm(rd, rn, lsb, lsb + width - 1); + } + + // Signed extend byte. + void sxtb(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 7); + } + + // Signed extend halfword. + void sxth(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 15); + } + + // Signed extend word. + void sxtw(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 31); + } + + // Ubfm aliases. + // Logical shift left. + void lsl(const Register& rd, const Register& rn, unsigned shift) { + unsigned reg_size = rd.size(); + VIXL_ASSERT(shift < reg_size); + ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); + } + + // Logical shift right. + void lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < rd.size()); + ubfm(rd, rn, shift, rd.size() - 1); + } + + // Unsigned bitfield insert with zero at right. + void ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Unsigned bitfield extract. + void ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + ubfm(rd, rn, lsb, lsb + width - 1); + } + + // Unsigned extend byte. + void uxtb(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 7); + } + + // Unsigned extend halfword. + void uxth(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 15); + } + + // Unsigned extend word. + void uxtw(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 31); + } + + // Extract. + void extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb); + + // Conditional select: rd = cond ? rn : rm. + void csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select increment: rd = cond ? rn : rm + 1. + void csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select inversion: rd = cond ? rn : ~rm. + void csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select negation: rd = cond ? rn : -rm. + void csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional set: rd = cond ? 1 : 0. + void cset(const Register& rd, Condition cond); + + // Conditional set mask: rd = cond ? -1 : 0. + void csetm(const Register& rd, Condition cond); + + // Conditional increment: rd = cond ? rn + 1 : rn. + void cinc(const Register& rd, const Register& rn, Condition cond); + + // Conditional invert: rd = cond ? ~rn : rn. + void cinv(const Register& rd, const Register& rn, Condition cond); + + // Conditional negate: rd = cond ? -rn : rn. + void cneg(const Register& rd, const Register& rn, Condition cond); + + // Rotate right. + void ror(const Register& rd, const Register& rs, unsigned shift) { + extr(rd, rs, rs, shift); + } + + // Conditional comparison. + // Conditional compare negative. + void ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // Conditional compare. + void ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // CRC-32 checksum from byte. + void crc32b(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 checksum from half-word. + void crc32h(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 checksum from word. + void crc32w(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 checksum from double word. + void crc32x(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 C checksum from byte. + void crc32cb(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 C checksum from half-word. + void crc32ch(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32 C checksum from word. + void crc32cw(const Register& rd, + const Register& rn, + const Register& rm); + + // CRC-32C checksum from double word. + void crc32cx(const Register& rd, + const Register& rn, + const Register& rm); + + // Multiply. + void mul(const Register& rd, const Register& rn, const Register& rm); + + // Negated multiply. + void mneg(const Register& rd, const Register& rn, const Register& rm); + + // Signed long multiply: 32 x 32 -> 64-bit. + void smull(const Register& rd, const Register& rn, const Register& rm); + + // Signed multiply high: 64 x 64 -> 64-bit <127:64>. + void smulh(const Register& xd, const Register& xn, const Register& xm); + + // Multiply and accumulate. + void madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Multiply and subtract. + void msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Unsigned long multiply: 32 x 32 -> 64-bit. + void umull(const Register& rd, + const Register& rn, + const Register& rm) { + umaddl(rd, rn, rm, xzr); + } + + // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>. + void umulh(const Register& xd, + const Register& xn, + const Register& xm); + + // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed integer divide. + void sdiv(const Register& rd, const Register& rn, const Register& rm); + + // Unsigned integer divide. + void udiv(const Register& rd, const Register& rn, const Register& rm); + + // Bit reverse. + void rbit(const Register& rd, const Register& rn); + + // Reverse bytes in 16-bit half words. + void rev16(const Register& rd, const Register& rn); + + // Reverse bytes in 32-bit words. + void rev32(const Register& rd, const Register& rn); + + // Reverse bytes. + void rev(const Register& rd, const Register& rn); + + // Count leading zeroes. + void clz(const Register& rd, const Register& rn); + + // Count leading sign bits. + void cls(const Register& rd, const Register& rn); + + // Memory instructions. + // Load integer or FP register. + void ldr(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store integer or FP register. + void str(const CPURegister& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load word with sign extension. + void ldrsw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte. + void ldrb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store byte. + void strb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte with sign extension. + void ldrsb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word. + void ldrh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store half-word. + void strh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word with sign extension. + void ldrsh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load integer or FP register (with unscaled offset). + void ldur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store integer or FP register (with unscaled offset). + void stur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load word with sign extension. + void ldursw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte (with unscaled offset). + void ldurb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store byte (with unscaled offset). + void sturb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte with sign extension (and unscaled offset). + void ldursb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word (with unscaled offset). + void ldurh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store half-word (with unscaled offset). + void sturh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word with sign extension (and unscaled offset). + void ldursh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load integer or FP register pair. + void ldp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair. + void stp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& dst); + + // Load word pair with sign extension. + void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src); + + // Load integer or FP register pair, non-temporal. + void ldnp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair, non-temporal. + void stnp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& dst); + + // Load integer or FP register from pc + imm19 << 2. + void ldr(const CPURegister& rt, int imm19); + static void ldr(Instruction* at, const CPURegister& rt, int imm19); + + // Load word with sign extension from pc + imm19 << 2. + void ldrsw(const Register& rt, int imm19); + + // Store exclusive byte. + void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive half-word. + void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive register. + void stxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load exclusive byte. + void ldxrb(const Register& rt, const MemOperand& src); + + // Load exclusive half-word. + void ldxrh(const Register& rt, const MemOperand& src); + + // Load exclusive register. + void ldxr(const Register& rt, const MemOperand& src); + + // Store exclusive register pair. + void stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load exclusive register pair. + void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release exclusive byte. + void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive half-word. + void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive register. + void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load-acquire exclusive byte. + void ldaxrb(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive half-word. + void ldaxrh(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive register. + void ldaxr(const Register& rt, const MemOperand& src); + + // Store-release exclusive register pair. + void stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load-acquire exclusive register pair. + void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release byte. + void stlrb(const Register& rt, const MemOperand& dst); + + // Store-release half-word. + void stlrh(const Register& rt, const MemOperand& dst); + + // Store-release register. + void stlr(const Register& rt, const MemOperand& dst); + + // Load-acquire byte. + void ldarb(const Register& rt, const MemOperand& src); + + // Load-acquire half-word. + void ldarh(const Register& rt, const MemOperand& src); + + // Load-acquire register. + void ldar(const Register& rt, const MemOperand& src); + + // Prefetch memory. + void prfm(PrefetchOperation op, const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // Prefetch memory (with unscaled offset). + void prfum(PrefetchOperation op, const MemOperand& addr, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Prefetch from pc + imm19 << 2. + void prfm(PrefetchOperation op, int imm19); + + // Move instructions. The default shift of -1 indicates that the move + // instruction will calculate an appropriate 16-bit immediate and left shift + // that is equal to the 64-bit immediate argument. If an explicit left shift + // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. + // + // For movk, an explicit shift can be used to indicate which half word should + // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant + // half word with zero, whereas movk(x0, 0, 48) will overwrite the + // most-significant. + + // Move immediate and keep. + void movk(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVK); + } + + // Move inverted immediate. + void movn(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVN); + } + + // Move immediate. + void movz(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVZ); + } + + // Misc instructions. + // Monitor debug-mode breakpoint. + void brk(int code); + + // Halting debug-mode breakpoint. + void hlt(int code); + + // Generate exception targeting EL1. + void svc(int code); + static void svc(Instruction* at, int code); + + // Move register to register. + void mov(const Register& rd, const Register& rn); + + // Move inverted operand to register. + void mvn(const Register& rd, const Operand& operand); + + // System instructions. + // Move to register from system register. + void mrs(const Register& rt, SystemRegister sysreg); + + // Move from register to system register. + void msr(SystemRegister sysreg, const Register& rt); + + // System instruction. + void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr); + + // System instruction with pre-encoded op (op1:crn:crm:op2). + void sys(int op, const Register& rt = xzr); + + // System data cache operation. + void dc(DataCacheOp op, const Register& rt); + + // System instruction cache operation. + void ic(InstructionCacheOp op, const Register& rt); + + // System hint. + BufferOffset hint(SystemHint code); + static void hint(Instruction* at, SystemHint code); + + // Clear exclusive monitor. + void clrex(int imm4 = 0xf); + + // Data memory barrier. + void dmb(BarrierDomain domain, BarrierType type); + + // Data synchronization barrier. + void dsb(BarrierDomain domain, BarrierType type); + + // Instruction synchronization barrier. + void isb(); + + // Alias for system instructions. + // No-op. + BufferOffset nop() { + return hint(NOP); + } + static void nop(Instruction* at); + + // FP and NEON instructions. + // Move double precision immediate to FP register. + void fmov(const VRegister& vd, double imm); + + // Move single precision immediate to FP register. + void fmov(const VRegister& vd, float imm); + + // Move FP register to register. + void fmov(const Register& rd, const VRegister& fn); + + // Move register to FP register. + void fmov(const VRegister& vd, const Register& rn); + + // Move FP register to FP register. + void fmov(const VRegister& vd, const VRegister& fn); + + // Move 64-bit register to top half of 128-bit FP register. + void fmov(const VRegister& vd, int index, const Register& rn); + + // Move top half of 128-bit FP register to 64-bit register. + void fmov(const Register& rd, const VRegister& vn, int index); + + // FP add. + void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP subtract. + void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply. + void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add. + void fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract. + void fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-add and negate. + void fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract and negate. + void fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP multiply-negate scalar. + void fnmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP reciprocal exponent scalar. + void frecpx(const VRegister& vd, + const VRegister& vn); + + // FP divide. + void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum. + void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum. + void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum number. + void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum number. + void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP absolute. + void fabs(const VRegister& vd, const VRegister& vn); + + // FP negate. + void fneg(const VRegister& vd, const VRegister& vn); + + // FP square root. + void fsqrt(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to away. + void frinta(const VRegister& vd, const VRegister& vn); + + // FP round to integer, implicit rounding. + void frinti(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward minus infinity. + void frintm(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to even. + void frintn(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward plus infinity. + void frintp(const VRegister& vd, const VRegister& vn); + + // FP round to integer, exact, implicit rounding. + void frintx(const VRegister& vd, const VRegister& vn); + + // FP round to integer, towards zero. + void frintz(const VRegister& vd, const VRegister& vn); + + void FPCompareMacro(const VRegister& vn, + double value, + FPTrapFlags trap); + + void FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap); + + // FP compare registers. + void fcmp(const VRegister& vn, const VRegister& vm); + + // FP compare immediate. + void fcmp(const VRegister& vn, double value); + + void FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap); + + // FP conditional compare. + void fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP signaling compare registers. + void fcmpe(const VRegister& vn, const VRegister& vm); + + // FP signaling compare immediate. + void fcmpe(const VRegister& vn, double value); + + // FP conditional signaling compare. + void fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP conditional select. + void fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond); + + // Common FP Convert functions. + void NEONFPConvertToInt(const Register& rd, + const VRegister& vn, + Instr op); + void NEONFPConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op); + + // FP convert between precisions. + void fcvt(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision. + void fcvtl(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision (second part). + void fcvtl2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision. + void fcvtn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower prevision (second part). + void fcvtn2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd. + void fcvtxn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd (second part). + void fcvtxn2(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const VRegister& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const VRegister& rd, const VRegister& vn); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const VRegister& vd, const VRegister& vn); + + // Convert signed integer or fixed point to FP. + void scvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert unsigned integer or fixed point to FP. + void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert signed integer or fixed-point to FP. + void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Convert unsigned integer or fixed-point to FP. + void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Unsigned absolute difference. + void uabd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference. + void sabd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned absolute difference and accumulate. + void uaba(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference and accumulate. + void saba(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add. + void add(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Subtract. + void sub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned halving add. + void uhadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed halving add. + void shadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned rounding halving add. + void urhadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed rounding halving add. + void srhadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned halving sub. + void uhsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed halving sub. + void shsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned saturating add. + void uqadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating add. + void sqadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned saturating subtract. + void uqsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating subtract. + void sqsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add pairwise. + void addp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add pair of elements scalar. + void addp(const VRegister& vd, + const VRegister& vn); + + // Multiply-add to accumulator. + void mla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Multiply-subtract to accumulator. + void mls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Multiply. + void mul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Multiply by scalar element. + void mul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-add by scalar element. + void mla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-subtract by scalar element. + void mls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element. + void smlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element (second part). + void smlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element. + void umlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element (second part). + void umlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element. + void smlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element (second part). + void smlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element. + void umlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element (second part). + void umlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element. + void smull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element (second part). + void smull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element. + void umull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element (second part). + void umull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element. + void sqdmull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element (second part). + void sqdmull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element. + void sqdmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element (second part). + void sqdmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element. + void sqdmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element (second part). + void sqdmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Compare equal. + void cmeq(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare signed greater than or equal. + void cmge(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare signed greater than. + void cmgt(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare unsigned higher. + void cmhi(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare unsigned higher or same. + void cmhs(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare bitwise test bits nonzero. + void cmtst(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Compare bitwise to zero. + void cmeq(const VRegister& vd, + const VRegister& vn, + int value); + + // Compare signed greater than or equal to zero. + void cmge(const VRegister& vd, + const VRegister& vn, + int value); + + // Compare signed greater than zero. + void cmgt(const VRegister& vd, + const VRegister& vn, + int value); + + // Compare signed less than or equal to zero. + void cmle(const VRegister& vd, + const VRegister& vn, + int value); + + // Compare signed less than zero. + void cmlt(const VRegister& vd, + const VRegister& vn, + int value); + + // Signed shift left by register. + void sshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned shift left by register. + void ushl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating shift left by register. + void sqshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned saturating shift left by register. + void uqshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed rounding shift left by register. + void srshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned rounding shift left by register. + void urshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating rounding shift left by register. + void sqrshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned saturating rounding shift left by register. + void uqrshl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise and. + void and_(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise or. + void orr(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise or immediate. + void orr(const VRegister& vd, + const int imm8, + const int left_shift = 0); + + // Move register to register. + void mov(const VRegister& vd, + const VRegister& vn); + + // Bitwise orn. + void orn(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise eor. + void eor(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bit clear immediate. + void bic(const VRegister& vd, + const int imm8, + const int left_shift = 0); + + // Bit clear. + void bic(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise insert if false. + void bif(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise insert if true. + void bit(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Bitwise select. + void bsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Polynomial multiply. + void pmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Vector move immediate. + void movi(const VRegister& vd, + const uint64_t imm, + Shift shift = LSL, + const int shift_amount = 0); + + // Bitwise not. + void mvn(const VRegister& vd, + const VRegister& vn); + + // Vector move inverted immediate. + void mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0); + + // Signed saturating accumulate of unsigned value. + void suqadd(const VRegister& vd, + const VRegister& vn); + + // Unsigned saturating accumulate of signed value. + void usqadd(const VRegister& vd, + const VRegister& vn); + + // Absolute value. + void abs(const VRegister& vd, + const VRegister& vn); + + // Signed saturating absolute value. + void sqabs(const VRegister& vd, + const VRegister& vn); + + // Negate. + void neg(const VRegister& vd, + const VRegister& vn); + + // Signed saturating negate. + void sqneg(const VRegister& vd, + const VRegister& vn); + + // Bitwise not. + void not_(const VRegister& vd, + const VRegister& vn); + + // Extract narrow. + void xtn(const VRegister& vd, + const VRegister& vn); + + // Extract narrow (second part). + void xtn2(const VRegister& vd, + const VRegister& vn); + + // Signed saturating extract narrow. + void sqxtn(const VRegister& vd, + const VRegister& vn); + + // Signed saturating extract narrow (second part). + void sqxtn2(const VRegister& vd, + const VRegister& vn); + + // Unsigned saturating extract narrow. + void uqxtn(const VRegister& vd, + const VRegister& vn); + + // Unsigned saturating extract narrow (second part). + void uqxtn2(const VRegister& vd, + const VRegister& vn); + + // Signed saturating extract unsigned narrow. + void sqxtun(const VRegister& vd, + const VRegister& vn); + + // Signed saturating extract unsigned narrow (second part). + void sqxtun2(const VRegister& vd, + const VRegister& vn); + + // Extract vector from pair of vectors. + void ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index); + + // Duplicate vector element to vector or scalar. + void dup(const VRegister& vd, + const VRegister& vn, + int vn_index); + + // Move vector element to scalar. + void mov(const VRegister& vd, + const VRegister& vn, + int vn_index); + + // Duplicate general-purpose register to vector. + void dup(const VRegister& vd, + const Register& rn); + + // Insert vector element from another vector element. + void ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Move vector element to another vector element. + void mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Insert vector element from general-purpose register. + void ins(const VRegister& vd, + int vd_index, + const Register& rn); + + // Move general-purpose register to a vector element. + void mov(const VRegister& vd, + int vd_index, + const Register& rn); + + // Unsigned move vector element to general-purpose register. + void umov(const Register& rd, + const VRegister& vn, + int vn_index); + + // Move vector element to general-purpose register. + void mov(const Register& rd, + const VRegister& vn, + int vn_index); + + // Signed move vector element to general-purpose register. + void smov(const Register& rd, + const VRegister& vn, + int vn_index); + + // One-element structure load to one register. + void ld1(const VRegister& vt, + const MemOperand& src); + + // One-element structure load to two registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src); + + // One-element structure load to three registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure load to four registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure load to one lane. + void ld1(const VRegister& vt, + int lane, + const MemOperand& src); + + // One-element single structure load to all lanes. + void ld1r(const VRegister& vt, + const MemOperand& src); + + // Two-element structure load. + void ld2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src); + + // Two-element single structure load to one lane. + void ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Two-element single structure load to all lanes. + void ld2r(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src); + + // Three-element structure load. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure load to one lane. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Three-element single structure load to all lanes. + void ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Four-element structure load. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure load to one lane. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Four-element single structure load to all lanes. + void ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Count leading sign bits. + void cls(const VRegister& vd, + const VRegister& vn); + + // Count leading zero bits (vector). + void clz(const VRegister& vd, + const VRegister& vn); + + // Population count per byte. + void cnt(const VRegister& vd, + const VRegister& vn); + + // Reverse bit order. + void rbit(const VRegister& vd, + const VRegister& vn); + + // Reverse elements in 16-bit halfwords. + void rev16(const VRegister& vd, + const VRegister& vn); + + // Reverse elements in 32-bit words. + void rev32(const VRegister& vd, + const VRegister& vn); + + // Reverse elements in 64-bit doublewords. + void rev64(const VRegister& vd, + const VRegister& vn); + + // Unsigned reciprocal square root estimate. + void ursqrte(const VRegister& vd, + const VRegister& vn); + + // Unsigned reciprocal estimate. + void urecpe(const VRegister& vd, + const VRegister& vn); + + // Signed pairwise long add. + void saddlp(const VRegister& vd, + const VRegister& vn); + + // Unsigned pairwise long add. + void uaddlp(const VRegister& vd, + const VRegister& vn); + + // Signed pairwise long add and accumulate. + void sadalp(const VRegister& vd, + const VRegister& vn); + + // Unsigned pairwise long add and accumulate. + void uadalp(const VRegister& vd, + const VRegister& vn); + + // Shift left by immediate. + void shl(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift left by immediate. + void sqshl(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift left unsigned by immediate. + void sqshlu(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned saturating shift left by immediate. + void uqshl(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed shift left long by immediate. + void sshll(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed shift left long by immediate (second part). + void sshll2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed extend long. + void sxtl(const VRegister& vd, + const VRegister& vn); + + // Signed extend long (second part). + void sxtl2(const VRegister& vd, + const VRegister& vn); + + // Unsigned shift left long by immediate. + void ushll(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned shift left long by immediate (second part). + void ushll2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Shift left long by element size. + void shll(const VRegister& vd, + const VRegister& vn, + int shift); + + // Shift left long by element size (second part). + void shll2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned extend long. + void uxtl(const VRegister& vd, + const VRegister& vn); + + // Unsigned extend long (second part). + void uxtl2(const VRegister& vd, + const VRegister& vn); + + // Shift left by immediate and insert. + void sli(const VRegister& vd, + const VRegister& vn, + int shift); + + // Shift right by immediate and insert. + void sri(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed maximum. + void smax(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed pairwise maximum. + void smaxp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add across vector. + void addv(const VRegister& vd, + const VRegister& vn); + + // Signed add long across vector. + void saddlv(const VRegister& vd, + const VRegister& vn); + + // Unsigned add long across vector. + void uaddlv(const VRegister& vd, + const VRegister& vn); + + // FP maximum number across vector. + void fmaxnmv(const VRegister& vd, + const VRegister& vn); + + // FP maximum across vector. + void fmaxv(const VRegister& vd, + const VRegister& vn); + + // FP minimum number across vector. + void fminnmv(const VRegister& vd, + const VRegister& vn); + + // FP minimum across vector. + void fminv(const VRegister& vd, + const VRegister& vn); + + // Signed maximum across vector. + void smaxv(const VRegister& vd, + const VRegister& vn); + + // Signed minimum. + void smin(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed minimum pairwise. + void sminp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed minimum across vector. + void sminv(const VRegister& vd, + const VRegister& vn); + + // One-element structure store from one register. + void st1(const VRegister& vt, + const MemOperand& src); + + // One-element structure store from two registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src); + + // One-element structure store from three registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure store from four registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure store from one lane. + void st1(const VRegister& vt, + int lane, + const MemOperand& src); + + // Two-element structure store from two registers. + void st2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src); + + // Two-element single structure store from two lanes. + void st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Three-element structure store from three registers. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure store from three lanes. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Four-element structure store from four registers. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure store from four lanes. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Unsigned add long. + void uaddl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned add long (second part). + void uaddl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned add wide. + void uaddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned add wide (second part). + void uaddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed add long. + void saddl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed add long (second part). + void saddl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed add wide. + void saddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed add wide (second part). + void saddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned subtract long. + void usubl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned subtract long (second part). + void usubl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned subtract wide. + void usubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned subtract wide (second part). + void usubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed subtract long. + void ssubl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed subtract long (second part). + void ssubl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed integer subtract wide. + void ssubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed integer subtract wide (second part). + void ssubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned maximum. + void umax(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned pairwise maximum. + void umaxp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned maximum across vector. + void umaxv(const VRegister& vd, + const VRegister& vn); + + // Unsigned minimum. + void umin(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned pairwise minimum. + void uminp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned minimum across vector. + void uminv(const VRegister& vd, + const VRegister& vn); + + // Transpose vectors (primary). + void trn1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Transpose vectors (secondary). + void trn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unzip vectors (primary). + void uzp1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unzip vectors (secondary). + void uzp2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Zip vectors (primary). + void zip1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Zip vectors (secondary). + void zip2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed shift right by immediate. + void sshr(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned shift right by immediate. + void ushr(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed rounding shift right by immediate. + void srshr(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned rounding shift right by immediate. + void urshr(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed shift right by immediate and accumulate. + void ssra(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned shift right by immediate and accumulate. + void usra(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed rounding shift right by immediate and accumulate. + void srsra(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned rounding shift right by immediate and accumulate. + void ursra(const VRegister& vd, + const VRegister& vn, + int shift); + + // Shift right narrow by immediate. + void shrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Shift right narrow by immediate (second part). + void shrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Rounding shift right narrow by immediate. + void rshrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Rounding shift right narrow by immediate (second part). + void rshrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned saturating shift right narrow by immediate. + void uqshrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned saturating shift right narrow by immediate (second part). + void uqshrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned saturating rounding shift right narrow by immediate. + void uqrshrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Unsigned saturating rounding shift right narrow by immediate (second part). + void uqrshrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift right narrow by immediate. + void sqshrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift right narrow by immediate (second part). + void sqshrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating rounded shift right narrow by immediate. + void sqrshrn(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating rounded shift right narrow by immediate (second part). + void sqrshrn2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift right unsigned narrow by immediate. + void sqshrun(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed saturating shift right unsigned narrow by immediate (second part). + void sqshrun2(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed sat rounded shift right unsigned narrow by immediate. + void sqrshrun(const VRegister& vd, + const VRegister& vn, + int shift); + + // Signed sat rounded shift right unsigned narrow by immediate (second part). + void sqrshrun2(const VRegister& vd, + const VRegister& vn, + int shift); + + // FP reciprocal step. + void frecps(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP reciprocal estimate. + void frecpe(const VRegister& vd, + const VRegister& vn); + + // FP reciprocal square root estimate. + void frsqrte(const VRegister& vd, + const VRegister& vn); + + // FP reciprocal square root step. + void frsqrts(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference and accumulate long. + void sabal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference and accumulate long (second part). + void sabal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned absolute difference and accumulate long. + void uabal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned absolute difference and accumulate long (second part). + void uabal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference long. + void sabdl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed absolute difference long (second part). + void sabdl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned absolute difference long. + void uabdl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned absolute difference long (second part). + void uabdl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Polynomial multiply long. + void pmull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Polynomial multiply long (second part). + void pmull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply-add. + void smlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply-add (second part). + void smlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned long multiply-add. + void umlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned long multiply-add (second part). + void umlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply-sub. + void smlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply-sub (second part). + void smlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned long multiply-sub. + void umlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned long multiply-sub (second part). + void umlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply. + void smull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed long multiply (second part). + void smull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply-add. + void sqdmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply-add (second part). + void sqdmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply-subtract. + void sqdmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply-subtract (second part). + void sqdmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply. + void sqdmull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling long multiply (second part). + void sqdmull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling multiply returning high half. + void sqdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating rounding doubling multiply returning high half. + void sqrdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Signed saturating doubling multiply element returning high half. + void sqdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply element returning high half. + void sqrdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply long. + void umull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Unsigned long multiply (second part). + void umull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add narrow returning high half. + void addhn(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Add narrow returning high half (second part). + void addhn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Rounding add narrow returning high half. + void raddhn(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Rounding add narrow returning high half (second part). + void raddhn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Subtract narrow returning high half. + void subhn(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Subtract narrow returning high half (second part). + void subhn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Rounding subtract narrow returning high half. + void rsubhn(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // Rounding subtract narrow returning high half (second part). + void rsubhn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP vector multiply accumulate. + void fmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP vector multiply subtract. + void fmls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP vector multiply extended. + void fmulx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP absolute greater than or equal. + void facge(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP absolute greater than. + void facgt(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP multiply by element. + void fmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-add to accumulator by element. + void fmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-sub from accumulator by element. + void fmls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP multiply extended by element. + void fmulx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP compare equal. + void fcmeq(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP greater than. + void fcmgt(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP greater than or equal. + void fcmge(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP compare equal to zero. + void fcmeq(const VRegister& vd, + const VRegister& vn, + double imm); + + // FP greater than zero. + void fcmgt(const VRegister& vd, + const VRegister& vn, + double imm); + + // FP greater than or equal to zero. + void fcmge(const VRegister& vd, + const VRegister& vn, + double imm); + + // FP less than or equal to zero. + void fcmle(const VRegister& vd, + const VRegister& vn, + double imm); + + // FP less than to zero. + void fcmlt(const VRegister& vd, + const VRegister& vn, + double imm); + + // FP absolute difference. + void fabd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise add vector. + void faddp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise add scalar. + void faddp(const VRegister& vd, + const VRegister& vn); + + // FP pairwise maximum vector. + void fmaxp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise maximum scalar. + void fmaxp(const VRegister& vd, + const VRegister& vn); + + // FP pairwise minimum vector. + void fminp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise minimum scalar. + void fminp(const VRegister& vd, + const VRegister& vn); + + // FP pairwise maximum number vector. + void fmaxnmp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise maximum number scalar. + void fmaxnmp(const VRegister& vd, + const VRegister& vn); + + // FP pairwise minimum number vector. + void fminnmp(const VRegister& vd, + const VRegister& vn, + const VRegister& vm); + + // FP pairwise minimum number scalar. + void fminnmp(const VRegister& vd, + const VRegister& vn); + + // Emit generic instructions. + // Emit raw instructions into the instruction stream. + void dci(Instr raw_inst) { Emit(raw_inst); } + + // Emit 32 bits of data into the instruction stream. + void dc32(uint32_t data) { + EmitData(&data, sizeof(data)); + } + + // Emit 64 bits of data into the instruction stream. + void dc64(uint64_t data) { + EmitData(&data, sizeof(data)); + } + + // Code generation helpers. + + // Register encoding. + static Instr Rd(CPURegister rd) { + VIXL_ASSERT(rd.code() != kSPRegInternalCode); + return rd.code() << Rd_offset; + } + + static Instr Rn(CPURegister rn) { + VIXL_ASSERT(rn.code() != kSPRegInternalCode); + return rn.code() << Rn_offset; + } + + static Instr Rm(CPURegister rm) { + VIXL_ASSERT(rm.code() != kSPRegInternalCode); + return rm.code() << Rm_offset; + } + + static Instr RmNot31(CPURegister rm) { + VIXL_ASSERT(rm.code() != kSPRegInternalCode); + VIXL_ASSERT(!rm.IsZero()); + return Rm(rm); + } + + static Instr Ra(CPURegister ra) { + VIXL_ASSERT(ra.code() != kSPRegInternalCode); + return ra.code() << Ra_offset; + } + + static Instr Rt(CPURegister rt) { + VIXL_ASSERT(rt.code() != kSPRegInternalCode); + return rt.code() << Rt_offset; + } + + static Instr Rt2(CPURegister rt2) { + VIXL_ASSERT(rt2.code() != kSPRegInternalCode); + return rt2.code() << Rt2_offset; + } + + static Instr Rs(CPURegister rs) { + VIXL_ASSERT(rs.code() != kSPRegInternalCode); + return rs.code() << Rs_offset; + } + + // These encoding functions allow the stack pointer to be encoded, and + // disallow the zero register. + static Instr RdSP(Register rd) { + VIXL_ASSERT(!rd.IsZero()); + return (rd.code() & kRegCodeMask) << Rd_offset; + } + + static Instr RnSP(Register rn) { + VIXL_ASSERT(!rn.IsZero()); + return (rn.code() & kRegCodeMask) << Rn_offset; + } + + // Flags encoding. + static Instr Flags(FlagsUpdate S) { + if (S == SetFlags) { + return 1 << FlagsUpdate_offset; + } else if (S == LeaveFlags) { + return 0 << FlagsUpdate_offset; + } + VIXL_UNREACHABLE(); + return 0; + } + + static Instr Cond(Condition cond) { + return cond << Condition_offset; + } + + // PC-relative address encoding. + static Instr ImmPCRelAddress(int imm21) { + VIXL_ASSERT(is_int21(imm21)); + Instr imm = static_cast<Instr>(truncate_to_int21(imm21)); + Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; + Instr immlo = imm << ImmPCRelLo_offset; + return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); + } + + // Branch encoding. + static Instr ImmUncondBranch(int imm26) { + VIXL_ASSERT(is_int26(imm26)); + return truncate_to_int26(imm26) << ImmUncondBranch_offset; + } + + static Instr ImmCondBranch(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmCondBranch_offset; + } + + static Instr ImmCmpBranch(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmCmpBranch_offset; + } + + static Instr ImmTestBranch(int imm14) { + VIXL_ASSERT(is_int14(imm14)); + return truncate_to_int14(imm14) << ImmTestBranch_offset; + } + + static Instr ImmTestBranchBit(unsigned bit_pos) { + VIXL_ASSERT(is_uint6(bit_pos)); + // Subtract five from the shift offset, as we need bit 5 from bit_pos. + unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); + unsigned b40 = bit_pos << ImmTestBranchBit40_offset; + b5 &= ImmTestBranchBit5_mask; + b40 &= ImmTestBranchBit40_mask; + return b5 | b40; + } + + // Data Processing encoding. + static Instr SF(Register rd) { + return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; + } + + static Instr ImmAddSub(int imm) { + VIXL_ASSERT(IsImmAddSub(imm)); + if (is_uint12(imm)) { // No shift required. + imm <<= ImmAddSub_offset; + } else { + imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); + } + return imm; + } + + static Instr ImmS(unsigned imms, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) || + ((reg_size == kWRegSize) && is_uint5(imms))); + USE(reg_size); + return imms << ImmS_offset; + } + + static Instr ImmR(unsigned immr, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || + ((reg_size == kWRegSize) && is_uint5(immr))); + USE(reg_size); + VIXL_ASSERT(is_uint6(immr)); + return immr << ImmR_offset; + } + + static Instr ImmSetBits(unsigned imms, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(is_uint6(imms)); + VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3)); + USE(reg_size); + return imms << ImmSetBits_offset; + } + + static Instr ImmRotate(unsigned immr, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || + ((reg_size == kWRegSize) && is_uint5(immr))); + USE(reg_size); + return immr << ImmRotate_offset; + } + + static Instr ImmLLiteral(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmLLiteral_offset; + } + + static Instr BitN(unsigned bitn, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); + USE(reg_size); + return bitn << BitN_offset; + } + + static Instr ShiftDP(Shift shift) { + VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); + return shift << ShiftDP_offset; + } + + static Instr ImmDPShift(unsigned amount) { + VIXL_ASSERT(is_uint6(amount)); + return amount << ImmDPShift_offset; + } + + static Instr ExtendMode(Extend extend) { + return extend << ExtendMode_offset; + } + + static Instr ImmExtendShift(unsigned left_shift) { + VIXL_ASSERT(left_shift <= 4); + return left_shift << ImmExtendShift_offset; + } + + static Instr ImmCondCmp(unsigned imm) { + VIXL_ASSERT(is_uint5(imm)); + return imm << ImmCondCmp_offset; + } + + static Instr Nzcv(StatusFlags nzcv) { + return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; + } + + // MemOperand offset encoding. + static Instr ImmLSUnsigned(int imm12) { + VIXL_ASSERT(is_uint12(imm12)); + return imm12 << ImmLSUnsigned_offset; + } + + static Instr ImmLS(int imm9) { + VIXL_ASSERT(is_int9(imm9)); + return truncate_to_int9(imm9) << ImmLS_offset; + } + + static Instr ImmLSPair(int imm7, unsigned access_size) { + VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7); + int scaled_imm7 = imm7 >> access_size; + VIXL_ASSERT(is_int7(scaled_imm7)); + return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; + } + + static Instr ImmShiftLS(unsigned shift_amount) { + VIXL_ASSERT(is_uint1(shift_amount)); + return shift_amount << ImmShiftLS_offset; + } + + static Instr ImmPrefetchOperation(int imm5) { + VIXL_ASSERT(is_uint5(imm5)); + return imm5 << ImmPrefetchOperation_offset; + } + + static Instr ImmException(int imm16) { + VIXL_ASSERT(is_uint16(imm16)); + return imm16 << ImmException_offset; + } + + static Instr ImmSystemRegister(int imm15) { + VIXL_ASSERT(is_uint15(imm15)); + return imm15 << ImmSystemRegister_offset; + } + + static Instr ImmHint(int imm7) { + VIXL_ASSERT(is_uint7(imm7)); + return imm7 << ImmHint_offset; + } + + static Instr CRm(int imm4) { + VIXL_ASSERT(is_uint4(imm4)); + return imm4 << CRm_offset; + } + + static Instr CRn(int imm4) { + VIXL_ASSERT(is_uint4(imm4)); + return imm4 << CRn_offset; + } + + static Instr SysOp(int imm14) { + VIXL_ASSERT(is_uint14(imm14)); + return imm14 << SysOp_offset; + } + + static Instr ImmSysOp1(int imm3) { + VIXL_ASSERT(is_uint3(imm3)); + return imm3 << SysOp1_offset; + } + + static Instr ImmSysOp2(int imm3) { + VIXL_ASSERT(is_uint3(imm3)); + return imm3 << SysOp2_offset; + } + + static Instr ImmBarrierDomain(int imm2) { + VIXL_ASSERT(is_uint2(imm2)); + return imm2 << ImmBarrierDomain_offset; + } + + static Instr ImmBarrierType(int imm2) { + VIXL_ASSERT(is_uint2(imm2)); + return imm2 << ImmBarrierType_offset; + } + + // Move immediates encoding. + static Instr ImmMoveWide(uint64_t imm) { + VIXL_ASSERT(is_uint16(imm)); + return static_cast<Instr>(imm << ImmMoveWide_offset); + } + + static Instr ShiftMoveWide(int64_t shift) { + VIXL_ASSERT(is_uint2(shift)); + return static_cast<Instr>(shift << ShiftMoveWide_offset); + } + + // FP Immediates. + static Instr ImmFP32(float imm); + static Instr ImmFP64(double imm); + + // FP register type. + static Instr FPType(FPRegister fd) { + return fd.Is64Bits() ? FP64 : FP32; + } + + static Instr FPScale(unsigned scale) { + VIXL_ASSERT(is_uint6(scale)); + return scale << FPScale_offset; + } + + // Immediate field checking helpers. + static bool IsImmAddSub(int64_t immediate); + static bool IsImmConditionalCompare(int64_t immediate); + static bool IsImmFP32(float imm); + static bool IsImmFP64(double imm); + static bool IsImmLogical(uint64_t value, + unsigned width, + unsigned* n = NULL, + unsigned* imm_s = NULL, + unsigned* imm_r = NULL); + static bool IsImmLSPair(int64_t offset, unsigned access_size); + static bool IsImmLSScaled(int64_t offset, unsigned access_size); + static bool IsImmLSUnscaled(int64_t offset); + static bool IsImmMovn(uint64_t imm, unsigned reg_size); + static bool IsImmMovz(uint64_t imm, unsigned reg_size); + + // Instruction bits for vector format in data processing operations. + static Instr VFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.lanes()) { + case 2: return NEON_2S; + case 4: return NEON_4H; + case 8: return NEON_8B; + default: return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.lanes()) { + case 2: return NEON_2D; + case 4: return NEON_4S; + case 8: return NEON_8H; + case 16: return NEON_16B; + default: return 0xffffffff; + } + } + } + + // Instruction bits for vector format in floating point data processing + // operations. + static Instr FPFormat(VRegister vd) { + if (vd.lanes() == 1) { + // Floating point scalar formats. + VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits()); + return vd.Is64Bits() ? FP64 : FP32; + } + + // Two lane floating point vector formats. + if (vd.lanes() == 2) { + VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits()); + return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S; + } + + // Four lane floating point vector format. + VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits()); + return NEON_FP_4S; + } + + // Instruction bits for vector format in load and store operations. + static Instr LSVFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.lanes()) { + case 1: return LS_NEON_1D; + case 2: return LS_NEON_2S; + case 4: return LS_NEON_4H; + case 8: return LS_NEON_8B; + default: return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.lanes()) { + case 2: return LS_NEON_2D; + case 4: return LS_NEON_4S; + case 8: return LS_NEON_8H; + case 16: return LS_NEON_16B; + default: return 0xffffffff; + } + } + } + + // Instruction bits for scalar format in data processing operations. + static Instr SFormat(VRegister vd) { + VIXL_ASSERT(vd.lanes() == 1); + switch (vd.SizeInBytes()) { + case 1: return NEON_B; + case 2: return NEON_H; + case 4: return NEON_S; + case 8: return NEON_D; + default: return 0xffffffff; + } + } + + static Instr ImmNEONHLM(int index, int num_bits) { + int h, l, m; + if (num_bits == 3) { + VIXL_ASSERT(is_uint3(index)); + h = (index >> 2) & 1; + l = (index >> 1) & 1; + m = (index >> 0) & 1; + } else if (num_bits == 2) { + VIXL_ASSERT(is_uint2(index)); + h = (index >> 1) & 1; + l = (index >> 0) & 1; + m = 0; + } else { + VIXL_ASSERT(is_uint1(index) && (num_bits == 1)); + h = (index >> 0) & 1; + l = 0; + m = 0; + } + return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset); + } + + static Instr ImmNEONExt(int imm4) { + VIXL_ASSERT(is_uint4(imm4)); + return imm4 << ImmNEONExt_offset; + } + + static Instr ImmNEON5(Instr format, int index) { + VIXL_ASSERT(is_uint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format)); + int imm5 = (index << (s + 1)) | (1 << s); + return imm5 << ImmNEON5_offset; + } + + static Instr ImmNEON4(Instr format, int index) { + VIXL_ASSERT(is_uint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format)); + int imm4 = index << s; + return imm4 << ImmNEON4_offset; + } + + static Instr ImmNEONabcdefgh(int imm8) { + VIXL_ASSERT(is_uint8(imm8)); + Instr instr; + instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset; + instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset; + return instr; + } + + static Instr NEONCmode(int cmode) { + VIXL_ASSERT(is_uint4(cmode)); + return cmode << NEONCmode_offset; + } + + static Instr NEONModImmOp(int op) { + VIXL_ASSERT(is_uint1(op)); + return op << NEONModImmOp_offset; + } + + size_t size() const { + return SizeOfCodeGenerated(); + } + + size_t SizeOfCodeGenerated() const { + return armbuffer_.size(); + } + + PositionIndependentCodeOption pic() const { + return pic_; + } + + bool AllowPageOffsetDependentCode() const { + return (pic() == PageOffsetDependentCode) || + (pic() == PositionDependentCode); + } + + static const Register& AppropriateZeroRegFor(const CPURegister& reg) { + return reg.Is64Bits() ? xzr : wzr; + } + + + protected: + void LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option = PreferScaledOffset); + + void LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + void LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op); + void LoadStoreStruct1(const VRegister& vt, + int reg_count, + const MemOperand& addr); + void LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op); + + void Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 + // reports a bogus uninitialised warning then. + BufferOffset Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op); + BufferOffset LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op); + + void ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + + // Functions for emulating operands not directly supported by the instruction + // set. + void EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned amount); + void EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift); + + void AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + void NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op); + + // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified + // registers. Only simple loads are supported; sign- and zero-extension (such + // as in LDPSW_x or LDRB_w) are not supported. + static LoadStoreOp LoadOpFor(const CPURegister& rt); + static LoadStorePairOp LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStoreOp StoreOpFor(const CPURegister& rt); + static LoadStorePairOp StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); + + + private: + static uint32_t FP32ToImm8(float imm); + static uint32_t FP64ToImm8(double imm); + + // Instruction helpers. + void MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op); + BufferOffset DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op); + void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op); + void ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op); + void DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op); + void DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op); + void FPDataProcessing1Source(const VRegister& fd, + const VRegister& fn, + FPDataProcessing1SourceOp op); + void FPDataProcessing3Source(const VRegister& fd, + const VRegister& fn, + const VRegister& fm, + const VRegister& fa, + FPDataProcessing3SourceOp op); + void NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op); + void NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op); + void NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op); + void NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op); + void NEONFP2Same(const VRegister& vd, + const VRegister& vn, + Instr vop); + void NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop); + void NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op); + void NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value = 0.0); + void NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value = 0); + void NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + Instr op); + void NEONAddlp(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp op); + void NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op); + void NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb); + void NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONXtn(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop); + + Instr LoadStoreStructAddrModeField(const MemOperand& addr); + + // Encode the specified MemOperand for the specified access size and scaling + // preference. + Instr LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option); + + protected: + // Prevent generation of a literal pool for the next |maxInst| instructions. + // Guarantees instruction linearity. + class AutoBlockLiteralPool { + ARMBuffer* armbuffer_; + + public: + AutoBlockLiteralPool(Assembler* assembler, size_t maxInst) + : armbuffer_(&assembler->armbuffer_) { + armbuffer_->enterNoPool(maxInst); + } + ~AutoBlockLiteralPool() { + armbuffer_->leaveNoPool(); + } + }; + + protected: + // Buffer where the code is emitted. + PositionIndependentCodeOption pic_; + +#ifdef DEBUG + bool finalized_; +#endif +}; + +} // namespace vixl + +#endif // VIXL_A64_ASSEMBLER_A64_H_ diff --git a/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h new file mode 100644 index 000000000..e13eef613 --- /dev/null +++ b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h @@ -0,0 +1,179 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef VIXL_COMPILER_INTRINSICS_H +#define VIXL_COMPILER_INTRINSICS_H + +#include "mozilla/MathAlgorithms.h" + +#include "jit/arm64/vixl/Globals-vixl.h" + +namespace vixl { + +// Helper to check whether the version of GCC used is greater than the specified +// requirement. +#define MAJOR 1000000 +#define MINOR 1000 +#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \ + ((major) * MAJOR + (minor) * MINOR + (patchlevel))) +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \ + ((major) * MAJOR + (minor) * MINOR + (patchlevel))) +#else +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0 +#endif + + +#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS) + +#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb)) +#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz)) +#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz)) +#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs)) +#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) + +#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS) +// The documentation for these builtins is available at: +// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html + +# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0)) +# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0)) + +#else +// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually +// implemented C++ methods. + +#define COMPILER_HAS_BUILTIN_BSWAP false +#define COMPILER_HAS_BUILTIN_CLRSB false +#define COMPILER_HAS_BUILTIN_CLZ false +#define COMPILER_HAS_BUILTIN_CTZ false +#define COMPILER_HAS_BUILTIN_FFS false +#define COMPILER_HAS_BUILTIN_POPCOUNT false + +#endif + + +template<typename V> +inline bool IsPowerOf2(V value) { + return (value != 0) && ((value & (value - 1)) == 0); +} + + +// Implementation of intrinsics functions. +// TODO: The implementations could be improved for sizes different from 32bit +// and 64bit: we could mask the values and call the appropriate builtin. + + +template<typename V> +inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_clzll(value); + } + MOZ_CRASH("Unhandled width."); +#else + if (width == 32) { + return mozilla::CountLeadingZeroes32(value); + } else if (width == 64) { + return mozilla::CountLeadingZeroes64(value); + } + MOZ_CRASH("Unhandled width."); +#endif +} + + +template<typename V> +inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLRSB + if (width == 32) { + return __builtin_clrsb(value); + } else if (width == 64) { + return __builtin_clrsbll(value); + } + MOZ_CRASH("Unhandled width."); +#else + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + if (value >= 0) { + return CountLeadingZeros(value, width) - 1; + } else { + return CountLeadingZeros(~value, width) - 1; + } +#endif +} + + +template<typename V> +inline int CountSetBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_POPCOUNT + if (width == 32) { + return __builtin_popcount(static_cast<unsigned>(value)); + } else if (width == 64) { + return __builtin_popcountll(value); + } + MOZ_CRASH("Unhandled width."); +#else + if (width == 32) { + return mozilla::CountPopulation32(value); + } else if (width == 64) { + return mozilla::CountPopulation64(value); + } + MOZ_CRASH("Unhandled width."); +#endif +} + + +template<typename V> +inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CTZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_ctzll(value); + } + MOZ_CRASH("Unhandled width."); +#else + if (width == 32) { + return mozilla::CountTrailingZeroes32(value); + } else if (width == 64) { + return mozilla::CountTrailingZeroes64(value); + } + MOZ_CRASH("Unhandled width."); +#endif +} + +} // namespace vixl + +#endif // VIXL_COMPILER_INTRINSICS_H + diff --git a/js/src/jit/arm64/vixl/Constants-vixl.h b/js/src/jit/arm64/vixl/Constants-vixl.h new file mode 100644 index 000000000..3724cf4b3 --- /dev/null +++ b/js/src/jit/arm64/vixl/Constants-vixl.h @@ -0,0 +1,2148 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_CONSTANTS_A64_H_ +#define VIXL_A64_CONSTANTS_A64_H_ + +#include <stdint.h> + +#include "jit/arm64/vixl/Globals-vixl.h" + +namespace vixl { + +// Supervisor Call (svc) specific support. +// +// The SVC instruction encodes an optional 16-bit immediate value. +// The simulator understands the codes below. +enum SVCSimulatorCodes { + kCallRtRedirected = 0x10, // Transition to x86_64 C code. + kMarkStackPointer = 0x11, // Push the current SP on a special Simulator stack. + kCheckStackPointer = 0x12 // Pop from the special Simulator stack and compare to SP. +}; + +const unsigned kNumberOfRegisters = 32; +const unsigned kNumberOfVRegisters = 32; +const unsigned kNumberOfFPRegisters = kNumberOfVRegisters; +// Callee saved registers are x21-x30(lr). +const int kNumberOfCalleeSavedRegisters = 10; +const int kFirstCalleeSavedRegisterIndex = 21; +// Callee saved FP registers are d8-d15. +const int kNumberOfCalleeSavedFPRegisters = 8; +const int kFirstCalleeSavedFPRegisterIndex = 8; + +#define REGISTER_CODE_LIST(R) \ +R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ +R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ +R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ +R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) + +#define INSTRUCTION_FIELDS_LIST(V_) \ +/* Register fields */ \ +V_(Rd, 4, 0, Bits) /* Destination register. */ \ +V_(Rn, 9, 5, Bits) /* First source register. */ \ +V_(Rm, 20, 16, Bits) /* Second source register. */ \ +V_(Ra, 14, 10, Bits) /* Third source register. */ \ +V_(Rt, 4, 0, Bits) /* Load/store register. */ \ +V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \ +V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \ + \ +/* Common bits */ \ +V_(SixtyFourBits, 31, 31, Bits) \ +V_(FlagsUpdate, 29, 29, Bits) \ + \ +/* PC relative addressing */ \ +V_(ImmPCRelHi, 23, 5, SignedBits) \ +V_(ImmPCRelLo, 30, 29, Bits) \ + \ +/* Add/subtract/logical shift register */ \ +V_(ShiftDP, 23, 22, Bits) \ +V_(ImmDPShift, 15, 10, Bits) \ + \ +/* Add/subtract immediate */ \ +V_(ImmAddSub, 21, 10, Bits) \ +V_(ShiftAddSub, 23, 22, Bits) \ + \ +/* Add/substract extend */ \ +V_(ImmExtendShift, 12, 10, Bits) \ +V_(ExtendMode, 15, 13, Bits) \ + \ +/* Move wide */ \ +V_(ImmMoveWide, 20, 5, Bits) \ +V_(ShiftMoveWide, 22, 21, Bits) \ + \ +/* Logical immediate, bitfield and extract */ \ +V_(BitN, 22, 22, Bits) \ +V_(ImmRotate, 21, 16, Bits) \ +V_(ImmSetBits, 15, 10, Bits) \ +V_(ImmR, 21, 16, Bits) \ +V_(ImmS, 15, 10, Bits) \ + \ +/* Test and branch immediate */ \ +V_(ImmTestBranch, 18, 5, SignedBits) \ +V_(ImmTestBranchBit40, 23, 19, Bits) \ +V_(ImmTestBranchBit5, 31, 31, Bits) \ + \ +/* Conditionals */ \ +V_(Condition, 15, 12, Bits) \ +V_(ConditionBranch, 3, 0, Bits) \ +V_(Nzcv, 3, 0, Bits) \ +V_(ImmCondCmp, 20, 16, Bits) \ +V_(ImmCondBranch, 23, 5, SignedBits) \ + \ +/* Floating point */ \ +V_(FPType, 23, 22, Bits) \ +V_(ImmFP, 20, 13, Bits) \ +V_(FPScale, 15, 10, Bits) \ + \ +/* Load Store */ \ +V_(ImmLS, 20, 12, SignedBits) \ +V_(ImmLSUnsigned, 21, 10, Bits) \ +V_(ImmLSPair, 21, 15, SignedBits) \ +V_(ImmShiftLS, 12, 12, Bits) \ +V_(LSOpc, 23, 22, Bits) \ +V_(LSVector, 26, 26, Bits) \ +V_(LSSize, 31, 30, Bits) \ +V_(ImmPrefetchOperation, 4, 0, Bits) \ +V_(PrefetchHint, 4, 3, Bits) \ +V_(PrefetchTarget, 2, 1, Bits) \ +V_(PrefetchStream, 0, 0, Bits) \ + \ +/* Other immediates */ \ +V_(ImmUncondBranch, 25, 0, SignedBits) \ +V_(ImmCmpBranch, 23, 5, SignedBits) \ +V_(ImmLLiteral, 23, 5, SignedBits) \ +V_(ImmException, 20, 5, Bits) \ +V_(ImmHint, 11, 5, Bits) \ +V_(ImmBarrierDomain, 11, 10, Bits) \ +V_(ImmBarrierType, 9, 8, Bits) \ + \ +/* System (MRS, MSR, SYS) */ \ +V_(ImmSystemRegister, 19, 5, Bits) \ +V_(SysO0, 19, 19, Bits) \ +V_(SysOp, 18, 5, Bits) \ +V_(SysOp1, 18, 16, Bits) \ +V_(SysOp2, 7, 5, Bits) \ +V_(CRn, 15, 12, Bits) \ +V_(CRm, 11, 8, Bits) \ + \ +/* Load-/store-exclusive */ \ +V_(LdStXLoad, 22, 22, Bits) \ +V_(LdStXNotExclusive, 23, 23, Bits) \ +V_(LdStXAcquireRelease, 15, 15, Bits) \ +V_(LdStXSizeLog2, 31, 30, Bits) \ +V_(LdStXPair, 21, 21, Bits) \ + \ +/* NEON generic fields */ \ +V_(NEONQ, 30, 30, Bits) \ +V_(NEONSize, 23, 22, Bits) \ +V_(NEONLSSize, 11, 10, Bits) \ +V_(NEONS, 12, 12, Bits) \ +V_(NEONL, 21, 21, Bits) \ +V_(NEONM, 20, 20, Bits) \ +V_(NEONH, 11, 11, Bits) \ +V_(ImmNEONExt, 14, 11, Bits) \ +V_(ImmNEON5, 20, 16, Bits) \ +V_(ImmNEON4, 14, 11, Bits) \ + \ +/* NEON Modified Immediate fields */ \ +V_(ImmNEONabc, 18, 16, Bits) \ +V_(ImmNEONdefgh, 9, 5, Bits) \ +V_(NEONModImmOp, 29, 29, Bits) \ +V_(NEONCmode, 15, 12, Bits) \ + \ +/* NEON Shift Immediate fields */ \ +V_(ImmNEONImmhImmb, 22, 16, Bits) \ +V_(ImmNEONImmh, 22, 19, Bits) \ +V_(ImmNEONImmb, 18, 16, Bits) + +#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ +/* NZCV */ \ +V_(Flags, 31, 28, Bits) \ +V_(N, 31, 31, Bits) \ +V_(Z, 30, 30, Bits) \ +V_(C, 29, 29, Bits) \ +V_(V, 28, 28, Bits) \ +M_(NZCV, Flags_mask) \ +/* FPCR */ \ +V_(AHP, 26, 26, Bits) \ +V_(DN, 25, 25, Bits) \ +V_(FZ, 24, 24, Bits) \ +V_(RMode, 23, 22, Bits) \ +M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask) + +// Fields offsets. +#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \ +const int Name##_offset = LowBit; \ +const int Name##_width = HighBit - LowBit + 1; \ +const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit; +#define NOTHING(A, B) +INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS) +SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) +#undef NOTHING +#undef DECLARE_FIELDS_BITS + +// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed +// from ImmPCRelLo and ImmPCRelHi. +const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; + +// Condition codes. +enum Condition { + eq = 0, // Z set Equal. + ne = 1, // Z clear Not equal. + cs = 2, // C set Carry set. + cc = 3, // C clear Carry clear. + mi = 4, // N set Negative. + pl = 5, // N clear Positive or zero. + vs = 6, // V set Overflow. + vc = 7, // V clear No overflow. + hi = 8, // C set, Z clear Unsigned higher. + ls = 9, // C clear or Z set Unsigned lower or same. + ge = 10, // N == V Greater or equal. + lt = 11, // N != V Less than. + gt = 12, // Z clear, N == V Greater than. + le = 13, // Z set or N != V Less then or equal + al = 14, // Always. + nv = 15, // Behaves as always/al. + + // Aliases. + hs = cs, // C set Unsigned higher or same. + lo = cc, // C clear Unsigned lower. + + // Mozilla expanded aliases. + Equal = 0, Zero = 0, + NotEqual = 1, NonZero = 1, + AboveOrEqual = 2, CarrySet = 2, + Below = 3, CarryClear = 3, + Signed = 4, + NotSigned = 5, + Overflow = 6, + NoOverflow = 7, + Above = 8, + BelowOrEqual = 9, + GreaterThanOrEqual_ = 10, + LessThan_ = 11, + GreaterThan_ = 12, + LessThanOrEqual_ = 13, + Always = 14, + Never = 15 +}; + +inline Condition InvertCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + VIXL_ASSERT((cond != al) && (cond != nv)); + return static_cast<Condition>(cond ^ 1); +} + +enum FPTrapFlags { + EnableTrap = 1, + DisableTrap = 0 +}; + +enum FlagsUpdate { + SetFlags = 1, + LeaveFlags = 0 +}; + +enum StatusFlags { + NoFlag = 0, + + // Derive the flag combinations from the system register bit descriptions. + NFlag = N_mask, + ZFlag = Z_mask, + CFlag = C_mask, + VFlag = V_mask, + NZFlag = NFlag | ZFlag, + NCFlag = NFlag | CFlag, + NVFlag = NFlag | VFlag, + ZCFlag = ZFlag | CFlag, + ZVFlag = ZFlag | VFlag, + CVFlag = CFlag | VFlag, + NZCFlag = NFlag | ZFlag | CFlag, + NZVFlag = NFlag | ZFlag | VFlag, + NCVFlag = NFlag | CFlag | VFlag, + ZCVFlag = ZFlag | CFlag | VFlag, + NZCVFlag = NFlag | ZFlag | CFlag | VFlag, + + // Floating-point comparison results. + FPEqualFlag = ZCFlag, + FPLessThanFlag = NFlag, + FPGreaterThanFlag = CFlag, + FPUnorderedFlag = CVFlag +}; + +enum Shift { + NO_SHIFT = -1, + LSL = 0x0, + LSR = 0x1, + ASR = 0x2, + ROR = 0x3, + MSL = 0x4 +}; + +enum Extend { + NO_EXTEND = -1, + UXTB = 0, + UXTH = 1, + UXTW = 2, + UXTX = 3, + SXTB = 4, + SXTH = 5, + SXTW = 6, + SXTX = 7 +}; + +enum SystemHint { + NOP = 0, + YIELD = 1, + WFE = 2, + WFI = 3, + SEV = 4, + SEVL = 5 +}; + +enum BarrierDomain { + OuterShareable = 0, + NonShareable = 1, + InnerShareable = 2, + FullSystem = 3 +}; + +enum BarrierType { + BarrierOther = 0, + BarrierReads = 1, + BarrierWrites = 2, + BarrierAll = 3 +}; + +enum PrefetchOperation { + PLDL1KEEP = 0x00, + PLDL1STRM = 0x01, + PLDL2KEEP = 0x02, + PLDL2STRM = 0x03, + PLDL3KEEP = 0x04, + PLDL3STRM = 0x05, + + PLIL1KEEP = 0x08, + PLIL1STRM = 0x09, + PLIL2KEEP = 0x0a, + PLIL2STRM = 0x0b, + PLIL3KEEP = 0x0c, + PLIL3STRM = 0x0d, + + PSTL1KEEP = 0x10, + PSTL1STRM = 0x11, + PSTL2KEEP = 0x12, + PSTL2STRM = 0x13, + PSTL3KEEP = 0x14, + PSTL3STRM = 0x15 +}; + +// System/special register names. +// This information is not encoded as one field but as the concatenation of +// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). +enum SystemRegister { + NZCV = ((0x1 << SysO0_offset) | + (0x3 << SysOp1_offset) | + (0x4 << CRn_offset) | + (0x2 << CRm_offset) | + (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset, + FPCR = ((0x1 << SysO0_offset) | + (0x3 << SysOp1_offset) | + (0x4 << CRn_offset) | + (0x4 << CRm_offset) | + (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset +}; + +enum InstructionCacheOp { + IVAU = ((0x3 << SysOp1_offset) | + (0x7 << CRn_offset) | + (0x5 << CRm_offset) | + (0x1 << SysOp2_offset)) >> SysOp_offset +}; + +enum DataCacheOp { + CVAC = ((0x3 << SysOp1_offset) | + (0x7 << CRn_offset) | + (0xa << CRm_offset) | + (0x1 << SysOp2_offset)) >> SysOp_offset, + CVAU = ((0x3 << SysOp1_offset) | + (0x7 << CRn_offset) | + (0xb << CRm_offset) | + (0x1 << SysOp2_offset)) >> SysOp_offset, + CIVAC = ((0x3 << SysOp1_offset) | + (0x7 << CRn_offset) | + (0xe << CRm_offset) | + (0x1 << SysOp2_offset)) >> SysOp_offset, + ZVA = ((0x3 << SysOp1_offset) | + (0x7 << CRn_offset) | + (0x4 << CRm_offset) | + (0x1 << SysOp2_offset)) >> SysOp_offset +}; + +// Instruction enumerations. +// +// These are the masks that define a class of instructions, and the list of +// instructions within each class. Each enumeration has a Fixed, FMask and +// Mask value. +// +// Fixed: The fixed bits in this instruction class. +// FMask: The mask used to extract the fixed bits in the class. +// Mask: The mask used to identify the instructions within a class. +// +// The enumerations can be used like this: +// +// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); +// switch(instr->Mask(PCRelAddressingMask)) { +// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; +// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; +// default: printf("Unknown instruction\n"); +// } + + +// Generic fields. +enum GenericInstrField { + SixtyFourBits = 0x80000000, + ThirtyTwoBits = 0x00000000, + FP32 = 0x00000000, + FP64 = 0x00400000 +}; + +enum NEONFormatField { + NEONFormatFieldMask = 0x40C00000, + NEON_Q = 0x40000000, + NEON_8B = 0x00000000, + NEON_16B = NEON_8B | NEON_Q, + NEON_4H = 0x00400000, + NEON_8H = NEON_4H | NEON_Q, + NEON_2S = 0x00800000, + NEON_4S = NEON_2S | NEON_Q, + NEON_1D = 0x00C00000, + NEON_2D = 0x00C00000 | NEON_Q +}; + +enum NEONFPFormatField { + NEONFPFormatFieldMask = 0x40400000, + NEON_FP_2S = FP32, + NEON_FP_4S = FP32 | NEON_Q, + NEON_FP_2D = FP64 | NEON_Q +}; + +enum NEONLSFormatField { + NEONLSFormatFieldMask = 0x40000C00, + LS_NEON_8B = 0x00000000, + LS_NEON_16B = LS_NEON_8B | NEON_Q, + LS_NEON_4H = 0x00000400, + LS_NEON_8H = LS_NEON_4H | NEON_Q, + LS_NEON_2S = 0x00000800, + LS_NEON_4S = LS_NEON_2S | NEON_Q, + LS_NEON_1D = 0x00000C00, + LS_NEON_2D = LS_NEON_1D | NEON_Q +}; + +enum NEONScalarFormatField { + NEONScalarFormatFieldMask = 0x00C00000, + NEONScalar = 0x10000000, + NEON_B = 0x00000000, + NEON_H = 0x00400000, + NEON_S = 0x00800000, + NEON_D = 0x00C00000 +}; + +// PC relative addressing. +enum PCRelAddressingOp { + PCRelAddressingFixed = 0x10000000, + PCRelAddressingFMask = 0x1F000000, + PCRelAddressingMask = 0x9F000000, + ADR = PCRelAddressingFixed | 0x00000000, + ADRP = PCRelAddressingFixed | 0x80000000 +}; + +// Add/sub (immediate, shifted and extended.) +const int kSFOffset = 31; +enum AddSubOp { + AddSubOpMask = 0x60000000, + AddSubSetFlagsBit = 0x20000000, + ADD = 0x00000000, + ADDS = ADD | AddSubSetFlagsBit, + SUB = 0x40000000, + SUBS = SUB | AddSubSetFlagsBit +}; + +#define ADD_SUB_OP_LIST(V) \ + V(ADD), \ + V(ADDS), \ + V(SUB), \ + V(SUBS) + +enum AddSubImmediateOp { + AddSubImmediateFixed = 0x11000000, + AddSubImmediateFMask = 0x1F000000, + AddSubImmediateMask = 0xFF000000, + #define ADD_SUB_IMMEDIATE(A) \ + A##_w_imm = AddSubImmediateFixed | A, \ + A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE) + #undef ADD_SUB_IMMEDIATE +}; + +enum AddSubShiftedOp { + AddSubShiftedFixed = 0x0B000000, + AddSubShiftedFMask = 0x1F200000, + AddSubShiftedMask = 0xFF200000, + #define ADD_SUB_SHIFTED(A) \ + A##_w_shift = AddSubShiftedFixed | A, \ + A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_SHIFTED) + #undef ADD_SUB_SHIFTED +}; + +enum AddSubExtendedOp { + AddSubExtendedFixed = 0x0B200000, + AddSubExtendedFMask = 0x1F200000, + AddSubExtendedMask = 0xFFE00000, + #define ADD_SUB_EXTENDED(A) \ + A##_w_ext = AddSubExtendedFixed | A, \ + A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_EXTENDED) + #undef ADD_SUB_EXTENDED +}; + +// Add/sub with carry. +enum AddSubWithCarryOp { + AddSubWithCarryFixed = 0x1A000000, + AddSubWithCarryFMask = 0x1FE00000, + AddSubWithCarryMask = 0xFFE0FC00, + ADC_w = AddSubWithCarryFixed | ADD, + ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits, + ADC = ADC_w, + ADCS_w = AddSubWithCarryFixed | ADDS, + ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits, + SBC_w = AddSubWithCarryFixed | SUB, + SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits, + SBC = SBC_w, + SBCS_w = AddSubWithCarryFixed | SUBS, + SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits +}; + + +// Logical (immediate and shifted register). +enum LogicalOp { + LogicalOpMask = 0x60200000, + NOT = 0x00200000, + AND = 0x00000000, + BIC = AND | NOT, + ORR = 0x20000000, + ORN = ORR | NOT, + EOR = 0x40000000, + EON = EOR | NOT, + ANDS = 0x60000000, + BICS = ANDS | NOT +}; + +// Logical immediate. +enum LogicalImmediateOp { + LogicalImmediateFixed = 0x12000000, + LogicalImmediateFMask = 0x1F800000, + LogicalImmediateMask = 0xFF800000, + AND_w_imm = LogicalImmediateFixed | AND, + AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits, + ORR_w_imm = LogicalImmediateFixed | ORR, + ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits, + EOR_w_imm = LogicalImmediateFixed | EOR, + EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits, + ANDS_w_imm = LogicalImmediateFixed | ANDS, + ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits +}; + +// Logical shifted register. +enum LogicalShiftedOp { + LogicalShiftedFixed = 0x0A000000, + LogicalShiftedFMask = 0x1F000000, + LogicalShiftedMask = 0xFF200000, + AND_w = LogicalShiftedFixed | AND, + AND_x = LogicalShiftedFixed | AND | SixtyFourBits, + AND_shift = AND_w, + BIC_w = LogicalShiftedFixed | BIC, + BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits, + BIC_shift = BIC_w, + ORR_w = LogicalShiftedFixed | ORR, + ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits, + ORR_shift = ORR_w, + ORN_w = LogicalShiftedFixed | ORN, + ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits, + ORN_shift = ORN_w, + EOR_w = LogicalShiftedFixed | EOR, + EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits, + EOR_shift = EOR_w, + EON_w = LogicalShiftedFixed | EON, + EON_x = LogicalShiftedFixed | EON | SixtyFourBits, + EON_shift = EON_w, + ANDS_w = LogicalShiftedFixed | ANDS, + ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits, + ANDS_shift = ANDS_w, + BICS_w = LogicalShiftedFixed | BICS, + BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits, + BICS_shift = BICS_w +}; + +// Move wide immediate. +enum MoveWideImmediateOp { + MoveWideImmediateFixed = 0x12800000, + MoveWideImmediateFMask = 0x1F800000, + MoveWideImmediateMask = 0xFF800000, + MOVN = 0x00000000, + MOVZ = 0x40000000, + MOVK = 0x60000000, + MOVN_w = MoveWideImmediateFixed | MOVN, + MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits, + MOVZ_w = MoveWideImmediateFixed | MOVZ, + MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits, + MOVK_w = MoveWideImmediateFixed | MOVK, + MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits +}; + +// Bitfield. +const int kBitfieldNOffset = 22; +enum BitfieldOp { + BitfieldFixed = 0x13000000, + BitfieldFMask = 0x1F800000, + BitfieldMask = 0xFF800000, + SBFM_w = BitfieldFixed | 0x00000000, + SBFM_x = BitfieldFixed | 0x80000000, + SBFM = SBFM_w, + BFM_w = BitfieldFixed | 0x20000000, + BFM_x = BitfieldFixed | 0xA0000000, + BFM = BFM_w, + UBFM_w = BitfieldFixed | 0x40000000, + UBFM_x = BitfieldFixed | 0xC0000000, + UBFM = UBFM_w + // Bitfield N field. +}; + +// Extract. +enum ExtractOp { + ExtractFixed = 0x13800000, + ExtractFMask = 0x1F800000, + ExtractMask = 0xFFA00000, + EXTR_w = ExtractFixed | 0x00000000, + EXTR_x = ExtractFixed | 0x80000000, + EXTR = EXTR_w +}; + +// Unconditional branch. +enum UnconditionalBranchOp { + UnconditionalBranchFixed = 0x14000000, + UnconditionalBranchFMask = 0x7C000000, + UnconditionalBranchMask = 0xFC000000, + B = UnconditionalBranchFixed | 0x00000000, + BL = UnconditionalBranchFixed | 0x80000000 +}; + +// Unconditional branch to register. +enum UnconditionalBranchToRegisterOp { + UnconditionalBranchToRegisterFixed = 0xD6000000, + UnconditionalBranchToRegisterFMask = 0xFE000000, + UnconditionalBranchToRegisterMask = 0xFFFFFC1F, + BR = UnconditionalBranchToRegisterFixed | 0x001F0000, + BLR = UnconditionalBranchToRegisterFixed | 0x003F0000, + RET = UnconditionalBranchToRegisterFixed | 0x005F0000 +}; + +// Compare and branch. +enum CompareBranchOp { + CompareBranchFixed = 0x34000000, + CompareBranchFMask = 0x7E000000, + CompareBranchMask = 0xFF000000, + CBZ_w = CompareBranchFixed | 0x00000000, + CBZ_x = CompareBranchFixed | 0x80000000, + CBZ = CBZ_w, + CBNZ_w = CompareBranchFixed | 0x01000000, + CBNZ_x = CompareBranchFixed | 0x81000000, + CBNZ = CBNZ_w +}; + +// Test and branch. +enum TestBranchOp { + TestBranchFixed = 0x36000000, + TestBranchFMask = 0x7E000000, + TestBranchMask = 0x7F000000, + TBZ = TestBranchFixed | 0x00000000, + TBNZ = TestBranchFixed | 0x01000000 +}; + +// Conditional branch. +enum ConditionalBranchOp { + ConditionalBranchFixed = 0x54000000, + ConditionalBranchFMask = 0xFE000000, + ConditionalBranchMask = 0xFF000010, + B_cond = ConditionalBranchFixed | 0x00000000 +}; + +// System. +// System instruction encoding is complicated because some instructions use op +// and CR fields to encode parameters. To handle this cleanly, the system +// instructions are split into more than one enum. + +enum SystemOp { + SystemFixed = 0xD5000000, + SystemFMask = 0xFFC00000 +}; + +enum SystemSysRegOp { + SystemSysRegFixed = 0xD5100000, + SystemSysRegFMask = 0xFFD00000, + SystemSysRegMask = 0xFFF00000, + MRS = SystemSysRegFixed | 0x00200000, + MSR = SystemSysRegFixed | 0x00000000 +}; + +enum SystemHintOp { + SystemHintFixed = 0xD503201F, + SystemHintFMask = 0xFFFFF01F, + SystemHintMask = 0xFFFFF01F, + HINT = SystemHintFixed | 0x00000000 +}; + +enum SystemSysOp { + SystemSysFixed = 0xD5080000, + SystemSysFMask = 0xFFF80000, + SystemSysMask = 0xFFF80000, + SYS = SystemSysFixed | 0x00000000 +}; + +// Exception. +enum ExceptionOp { + ExceptionFixed = 0xD4000000, + ExceptionFMask = 0xFF000000, + ExceptionMask = 0xFFE0001F, + HLT = ExceptionFixed | 0x00400000, + BRK = ExceptionFixed | 0x00200000, + SVC = ExceptionFixed | 0x00000001, + HVC = ExceptionFixed | 0x00000002, + SMC = ExceptionFixed | 0x00000003, + DCPS1 = ExceptionFixed | 0x00A00001, + DCPS2 = ExceptionFixed | 0x00A00002, + DCPS3 = ExceptionFixed | 0x00A00003 +}; + +enum MemBarrierOp { + MemBarrierFixed = 0xD503309F, + MemBarrierFMask = 0xFFFFF09F, + MemBarrierMask = 0xFFFFF0FF, + DSB = MemBarrierFixed | 0x00000000, + DMB = MemBarrierFixed | 0x00000020, + ISB = MemBarrierFixed | 0x00000040 +}; + +enum SystemExclusiveMonitorOp { + SystemExclusiveMonitorFixed = 0xD503305F, + SystemExclusiveMonitorFMask = 0xFFFFF0FF, + SystemExclusiveMonitorMask = 0xFFFFF0FF, + CLREX = SystemExclusiveMonitorFixed +}; + +// Any load or store. +enum LoadStoreAnyOp { + LoadStoreAnyFMask = 0x0a000000, + LoadStoreAnyFixed = 0x08000000 +}; + +// Any load pair or store pair. +enum LoadStorePairAnyOp { + LoadStorePairAnyFMask = 0x3a000000, + LoadStorePairAnyFixed = 0x28000000 +}; + +#define LOAD_STORE_PAIR_OP_LIST(V) \ + V(STP, w, 0x00000000), \ + V(LDP, w, 0x00400000), \ + V(LDPSW, x, 0x40400000), \ + V(STP, x, 0x80000000), \ + V(LDP, x, 0x80400000), \ + V(STP, s, 0x04000000), \ + V(LDP, s, 0x04400000), \ + V(STP, d, 0x44000000), \ + V(LDP, d, 0x44400000), \ + V(STP, q, 0x84000000), \ + V(LDP, q, 0x84400000) + +// Load/store pair (post, pre and offset.) +enum LoadStorePairOp { + LoadStorePairMask = 0xC4400000, + LoadStorePairLBit = 1 << 22, + #define LOAD_STORE_PAIR(A, B, C) \ + A##_##B = C + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR) + #undef LOAD_STORE_PAIR +}; + +enum LoadStorePairPostIndexOp { + LoadStorePairPostIndexFixed = 0x28800000, + LoadStorePairPostIndexFMask = 0x3B800000, + LoadStorePairPostIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \ + A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX) + #undef LOAD_STORE_PAIR_POST_INDEX +}; + +enum LoadStorePairPreIndexOp { + LoadStorePairPreIndexFixed = 0x29800000, + LoadStorePairPreIndexFMask = 0x3B800000, + LoadStorePairPreIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \ + A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX) + #undef LOAD_STORE_PAIR_PRE_INDEX +}; + +enum LoadStorePairOffsetOp { + LoadStorePairOffsetFixed = 0x29000000, + LoadStorePairOffsetFMask = 0x3B800000, + LoadStorePairOffsetMask = 0xFFC00000, + #define LOAD_STORE_PAIR_OFFSET(A, B, C) \ + A##_##B##_off = LoadStorePairOffsetFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET) + #undef LOAD_STORE_PAIR_OFFSET +}; + +enum LoadStorePairNonTemporalOp { + LoadStorePairNonTemporalFixed = 0x28000000, + LoadStorePairNonTemporalFMask = 0x3B800000, + LoadStorePairNonTemporalMask = 0xFFC00000, + LoadStorePairNonTemporalLBit = 1 << 22, + STNP_w = LoadStorePairNonTemporalFixed | STP_w, + LDNP_w = LoadStorePairNonTemporalFixed | LDP_w, + STNP_x = LoadStorePairNonTemporalFixed | STP_x, + LDNP_x = LoadStorePairNonTemporalFixed | LDP_x, + STNP_s = LoadStorePairNonTemporalFixed | STP_s, + LDNP_s = LoadStorePairNonTemporalFixed | LDP_s, + STNP_d = LoadStorePairNonTemporalFixed | STP_d, + LDNP_d = LoadStorePairNonTemporalFixed | LDP_d, + STNP_q = LoadStorePairNonTemporalFixed | STP_q, + LDNP_q = LoadStorePairNonTemporalFixed | LDP_q +}; + +// Load literal. +enum LoadLiteralOp { + LoadLiteralFixed = 0x18000000, + LoadLiteralFMask = 0x3B000000, + LoadLiteralMask = 0xFF000000, + LDR_w_lit = LoadLiteralFixed | 0x00000000, + LDR_x_lit = LoadLiteralFixed | 0x40000000, + LDRSW_x_lit = LoadLiteralFixed | 0x80000000, + PRFM_lit = LoadLiteralFixed | 0xC0000000, + LDR_s_lit = LoadLiteralFixed | 0x04000000, + LDR_d_lit = LoadLiteralFixed | 0x44000000, + LDR_q_lit = LoadLiteralFixed | 0x84000000 +}; + +#define LOAD_STORE_OP_LIST(V) \ + V(ST, RB, w, 0x00000000), \ + V(ST, RH, w, 0x40000000), \ + V(ST, R, w, 0x80000000), \ + V(ST, R, x, 0xC0000000), \ + V(LD, RB, w, 0x00400000), \ + V(LD, RH, w, 0x40400000), \ + V(LD, R, w, 0x80400000), \ + V(LD, R, x, 0xC0400000), \ + V(LD, RSB, x, 0x00800000), \ + V(LD, RSH, x, 0x40800000), \ + V(LD, RSW, x, 0x80800000), \ + V(LD, RSB, w, 0x00C00000), \ + V(LD, RSH, w, 0x40C00000), \ + V(ST, R, b, 0x04000000), \ + V(ST, R, h, 0x44000000), \ + V(ST, R, s, 0x84000000), \ + V(ST, R, d, 0xC4000000), \ + V(ST, R, q, 0x04800000), \ + V(LD, R, b, 0x04400000), \ + V(LD, R, h, 0x44400000), \ + V(LD, R, s, 0x84400000), \ + V(LD, R, d, 0xC4400000), \ + V(LD, R, q, 0x04C00000) + +// Load/store (post, pre, offset and unsigned.) +enum LoadStoreOp { + LoadStoreMask = 0xC4C00000, + LoadStoreVMask = 0x04000000, + #define LOAD_STORE(A, B, C, D) \ + A##B##_##C = D + LOAD_STORE_OP_LIST(LOAD_STORE), + #undef LOAD_STORE + PRFM = 0xC0800000 +}; + +// Load/store unscaled offset. +enum LoadStoreUnscaledOffsetOp { + LoadStoreUnscaledOffsetFixed = 0x38000000, + LoadStoreUnscaledOffsetFMask = 0x3B200C00, + LoadStoreUnscaledOffsetMask = 0xFFE00C00, + PRFUM = LoadStoreUnscaledOffsetFixed | PRFM, + #define LOAD_STORE_UNSCALED(A, B, C, D) \ + A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED) + #undef LOAD_STORE_UNSCALED +}; + +// Load/store post index. +enum LoadStorePostIndex { + LoadStorePostIndexFixed = 0x38000400, + LoadStorePostIndexFMask = 0x3B200C00, + LoadStorePostIndexMask = 0xFFE00C00, + #define LOAD_STORE_POST_INDEX(A, B, C, D) \ + A##B##_##C##_post = LoadStorePostIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX) + #undef LOAD_STORE_POST_INDEX +}; + +// Load/store pre index. +enum LoadStorePreIndex { + LoadStorePreIndexFixed = 0x38000C00, + LoadStorePreIndexFMask = 0x3B200C00, + LoadStorePreIndexMask = 0xFFE00C00, + #define LOAD_STORE_PRE_INDEX(A, B, C, D) \ + A##B##_##C##_pre = LoadStorePreIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX) + #undef LOAD_STORE_PRE_INDEX +}; + +// Load/store unsigned offset. +enum LoadStoreUnsignedOffset { + LoadStoreUnsignedOffsetFixed = 0x39000000, + LoadStoreUnsignedOffsetFMask = 0x3B000000, + LoadStoreUnsignedOffsetMask = 0xFFC00000, + PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM, + #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \ + A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET) + #undef LOAD_STORE_UNSIGNED_OFFSET +}; + +// Load/store register offset. +enum LoadStoreRegisterOffset { + LoadStoreRegisterOffsetFixed = 0x38200800, + LoadStoreRegisterOffsetFMask = 0x3B200C00, + LoadStoreRegisterOffsetMask = 0xFFE00C00, + PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM, + #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \ + A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET) + #undef LOAD_STORE_REGISTER_OFFSET +}; + +enum LoadStoreExclusive { + LoadStoreExclusiveFixed = 0x08000000, + LoadStoreExclusiveFMask = 0x3F000000, + LoadStoreExclusiveMask = 0xFFE08000, + STXRB_w = LoadStoreExclusiveFixed | 0x00000000, + STXRH_w = LoadStoreExclusiveFixed | 0x40000000, + STXR_w = LoadStoreExclusiveFixed | 0x80000000, + STXR_x = LoadStoreExclusiveFixed | 0xC0000000, + LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, + LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, + LDXR_w = LoadStoreExclusiveFixed | 0x80400000, + LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, + STXP_w = LoadStoreExclusiveFixed | 0x80200000, + STXP_x = LoadStoreExclusiveFixed | 0xC0200000, + LDXP_w = LoadStoreExclusiveFixed | 0x80600000, + LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, + STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, + STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, + STLXR_w = LoadStoreExclusiveFixed | 0x80008000, + STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, + LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, + LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, + LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, + LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, + STLXP_w = LoadStoreExclusiveFixed | 0x80208000, + STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, + LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, + LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, + STLRB_w = LoadStoreExclusiveFixed | 0x00808000, + STLRH_w = LoadStoreExclusiveFixed | 0x40808000, + STLR_w = LoadStoreExclusiveFixed | 0x80808000, + STLR_x = LoadStoreExclusiveFixed | 0xC0808000, + LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, + LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, + LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, + LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000 +}; + +// Conditional compare. +enum ConditionalCompareOp { + ConditionalCompareMask = 0x60000000, + CCMN = 0x20000000, + CCMP = 0x60000000 +}; + +// Conditional compare register. +enum ConditionalCompareRegisterOp { + ConditionalCompareRegisterFixed = 0x1A400000, + ConditionalCompareRegisterFMask = 0x1FE00800, + ConditionalCompareRegisterMask = 0xFFE00C10, + CCMN_w = ConditionalCompareRegisterFixed | CCMN, + CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN, + CCMP_w = ConditionalCompareRegisterFixed | CCMP, + CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP +}; + +// Conditional compare immediate. +enum ConditionalCompareImmediateOp { + ConditionalCompareImmediateFixed = 0x1A400800, + ConditionalCompareImmediateFMask = 0x1FE00800, + ConditionalCompareImmediateMask = 0xFFE00C10, + CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN, + CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN, + CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP, + CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP +}; + +// Conditional select. +enum ConditionalSelectOp { + ConditionalSelectFixed = 0x1A800000, + ConditionalSelectFMask = 0x1FE00000, + ConditionalSelectMask = 0xFFE00C00, + CSEL_w = ConditionalSelectFixed | 0x00000000, + CSEL_x = ConditionalSelectFixed | 0x80000000, + CSEL = CSEL_w, + CSINC_w = ConditionalSelectFixed | 0x00000400, + CSINC_x = ConditionalSelectFixed | 0x80000400, + CSINC = CSINC_w, + CSINV_w = ConditionalSelectFixed | 0x40000000, + CSINV_x = ConditionalSelectFixed | 0xC0000000, + CSINV = CSINV_w, + CSNEG_w = ConditionalSelectFixed | 0x40000400, + CSNEG_x = ConditionalSelectFixed | 0xC0000400, + CSNEG = CSNEG_w +}; + +// Data processing 1 source. +enum DataProcessing1SourceOp { + DataProcessing1SourceFixed = 0x5AC00000, + DataProcessing1SourceFMask = 0x5FE00000, + DataProcessing1SourceMask = 0xFFFFFC00, + RBIT = DataProcessing1SourceFixed | 0x00000000, + RBIT_w = RBIT, + RBIT_x = RBIT | SixtyFourBits, + REV16 = DataProcessing1SourceFixed | 0x00000400, + REV16_w = REV16, + REV16_x = REV16 | SixtyFourBits, + REV = DataProcessing1SourceFixed | 0x00000800, + REV_w = REV, + REV32_x = REV | SixtyFourBits, + REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00, + CLZ = DataProcessing1SourceFixed | 0x00001000, + CLZ_w = CLZ, + CLZ_x = CLZ | SixtyFourBits, + CLS = DataProcessing1SourceFixed | 0x00001400, + CLS_w = CLS, + CLS_x = CLS | SixtyFourBits +}; + +// Data processing 2 source. +enum DataProcessing2SourceOp { + DataProcessing2SourceFixed = 0x1AC00000, + DataProcessing2SourceFMask = 0x5FE00000, + DataProcessing2SourceMask = 0xFFE0FC00, + UDIV_w = DataProcessing2SourceFixed | 0x00000800, + UDIV_x = DataProcessing2SourceFixed | 0x80000800, + UDIV = UDIV_w, + SDIV_w = DataProcessing2SourceFixed | 0x00000C00, + SDIV_x = DataProcessing2SourceFixed | 0x80000C00, + SDIV = SDIV_w, + LSLV_w = DataProcessing2SourceFixed | 0x00002000, + LSLV_x = DataProcessing2SourceFixed | 0x80002000, + LSLV = LSLV_w, + LSRV_w = DataProcessing2SourceFixed | 0x00002400, + LSRV_x = DataProcessing2SourceFixed | 0x80002400, + LSRV = LSRV_w, + ASRV_w = DataProcessing2SourceFixed | 0x00002800, + ASRV_x = DataProcessing2SourceFixed | 0x80002800, + ASRV = ASRV_w, + RORV_w = DataProcessing2SourceFixed | 0x00002C00, + RORV_x = DataProcessing2SourceFixed | 0x80002C00, + RORV = RORV_w, + CRC32B = DataProcessing2SourceFixed | 0x00004000, + CRC32H = DataProcessing2SourceFixed | 0x00004400, + CRC32W = DataProcessing2SourceFixed | 0x00004800, + CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00, + CRC32CB = DataProcessing2SourceFixed | 0x00005000, + CRC32CH = DataProcessing2SourceFixed | 0x00005400, + CRC32CW = DataProcessing2SourceFixed | 0x00005800, + CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00 +}; + +// Data processing 3 source. +enum DataProcessing3SourceOp { + DataProcessing3SourceFixed = 0x1B000000, + DataProcessing3SourceFMask = 0x1F000000, + DataProcessing3SourceMask = 0xFFE08000, + MADD_w = DataProcessing3SourceFixed | 0x00000000, + MADD_x = DataProcessing3SourceFixed | 0x80000000, + MADD = MADD_w, + MSUB_w = DataProcessing3SourceFixed | 0x00008000, + MSUB_x = DataProcessing3SourceFixed | 0x80008000, + MSUB = MSUB_w, + SMADDL_x = DataProcessing3SourceFixed | 0x80200000, + SMSUBL_x = DataProcessing3SourceFixed | 0x80208000, + SMULH_x = DataProcessing3SourceFixed | 0x80400000, + UMADDL_x = DataProcessing3SourceFixed | 0x80A00000, + UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000, + UMULH_x = DataProcessing3SourceFixed | 0x80C00000 +}; + +// Floating point compare. +enum FPCompareOp { + FPCompareFixed = 0x1E202000, + FPCompareFMask = 0x5F203C00, + FPCompareMask = 0xFFE0FC1F, + FCMP_s = FPCompareFixed | 0x00000000, + FCMP_d = FPCompareFixed | FP64 | 0x00000000, + FCMP = FCMP_s, + FCMP_s_zero = FPCompareFixed | 0x00000008, + FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008, + FCMP_zero = FCMP_s_zero, + FCMPE_s = FPCompareFixed | 0x00000010, + FCMPE_d = FPCompareFixed | FP64 | 0x00000010, + FCMPE = FCMPE_s, + FCMPE_s_zero = FPCompareFixed | 0x00000018, + FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018, + FCMPE_zero = FCMPE_s_zero +}; + +// Floating point conditional compare. +enum FPConditionalCompareOp { + FPConditionalCompareFixed = 0x1E200400, + FPConditionalCompareFMask = 0x5F200C00, + FPConditionalCompareMask = 0xFFE00C10, + FCCMP_s = FPConditionalCompareFixed | 0x00000000, + FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000, + FCCMP = FCCMP_s, + FCCMPE_s = FPConditionalCompareFixed | 0x00000010, + FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010, + FCCMPE = FCCMPE_s +}; + +// Floating point conditional select. +enum FPConditionalSelectOp { + FPConditionalSelectFixed = 0x1E200C00, + FPConditionalSelectFMask = 0x5F200C00, + FPConditionalSelectMask = 0xFFE00C00, + FCSEL_s = FPConditionalSelectFixed | 0x00000000, + FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000, + FCSEL = FCSEL_s +}; + +// Floating point immediate. +enum FPImmediateOp { + FPImmediateFixed = 0x1E201000, + FPImmediateFMask = 0x5F201C00, + FPImmediateMask = 0xFFE01C00, + FMOV_s_imm = FPImmediateFixed | 0x00000000, + FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 +}; + +// Floating point data processing 1 source. +enum FPDataProcessing1SourceOp { + FPDataProcessing1SourceFixed = 0x1E204000, + FPDataProcessing1SourceFMask = 0x5F207C00, + FPDataProcessing1SourceMask = 0xFFFFFC00, + FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, + FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, + FMOV = FMOV_s, + FABS_s = FPDataProcessing1SourceFixed | 0x00008000, + FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, + FABS = FABS_s, + FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, + FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, + FNEG = FNEG_s, + FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, + FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, + FSQRT = FSQRT_s, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, + FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, + FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, + FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, + FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, + FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, + FRINTN = FRINTN_s, + FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, + FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, + FRINTP = FRINTP_s, + FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, + FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, + FRINTM = FRINTM_s, + FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, + FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, + FRINTZ = FRINTZ_s, + FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, + FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, + FRINTA = FRINTA_s, + FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, + FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, + FRINTX = FRINTX_s, + FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, + FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, + FRINTI = FRINTI_s +}; + +// Floating point data processing 2 source. +enum FPDataProcessing2SourceOp { + FPDataProcessing2SourceFixed = 0x1E200800, + FPDataProcessing2SourceFMask = 0x5F200C00, + FPDataProcessing2SourceMask = 0xFFE0FC00, + FMUL = FPDataProcessing2SourceFixed | 0x00000000, + FMUL_s = FMUL, + FMUL_d = FMUL | FP64, + FDIV = FPDataProcessing2SourceFixed | 0x00001000, + FDIV_s = FDIV, + FDIV_d = FDIV | FP64, + FADD = FPDataProcessing2SourceFixed | 0x00002000, + FADD_s = FADD, + FADD_d = FADD | FP64, + FSUB = FPDataProcessing2SourceFixed | 0x00003000, + FSUB_s = FSUB, + FSUB_d = FSUB | FP64, + FMAX = FPDataProcessing2SourceFixed | 0x00004000, + FMAX_s = FMAX, + FMAX_d = FMAX | FP64, + FMIN = FPDataProcessing2SourceFixed | 0x00005000, + FMIN_s = FMIN, + FMIN_d = FMIN | FP64, + FMAXNM = FPDataProcessing2SourceFixed | 0x00006000, + FMAXNM_s = FMAXNM, + FMAXNM_d = FMAXNM | FP64, + FMINNM = FPDataProcessing2SourceFixed | 0x00007000, + FMINNM_s = FMINNM, + FMINNM_d = FMINNM | FP64, + FNMUL = FPDataProcessing2SourceFixed | 0x00008000, + FNMUL_s = FNMUL, + FNMUL_d = FNMUL | FP64 +}; + +// Floating point data processing 3 source. +enum FPDataProcessing3SourceOp { + FPDataProcessing3SourceFixed = 0x1F000000, + FPDataProcessing3SourceFMask = 0x5F000000, + FPDataProcessing3SourceMask = 0xFFE08000, + FMADD_s = FPDataProcessing3SourceFixed | 0x00000000, + FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000, + FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000, + FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000, + FMADD_d = FPDataProcessing3SourceFixed | 0x00400000, + FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000, + FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000, + FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000 +}; + +// Conversion between floating point and integer. +enum FPIntegerConvertOp { + FPIntegerConvertFixed = 0x1E200000, + FPIntegerConvertFMask = 0x5F20FC00, + FPIntegerConvertMask = 0xFFFFFC00, + FCVTNS = FPIntegerConvertFixed | 0x00000000, + FCVTNS_ws = FCVTNS, + FCVTNS_xs = FCVTNS | SixtyFourBits, + FCVTNS_wd = FCVTNS | FP64, + FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, + FCVTNU = FPIntegerConvertFixed | 0x00010000, + FCVTNU_ws = FCVTNU, + FCVTNU_xs = FCVTNU | SixtyFourBits, + FCVTNU_wd = FCVTNU | FP64, + FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, + FCVTPS = FPIntegerConvertFixed | 0x00080000, + FCVTPS_ws = FCVTPS, + FCVTPS_xs = FCVTPS | SixtyFourBits, + FCVTPS_wd = FCVTPS | FP64, + FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, + FCVTPU = FPIntegerConvertFixed | 0x00090000, + FCVTPU_ws = FCVTPU, + FCVTPU_xs = FCVTPU | SixtyFourBits, + FCVTPU_wd = FCVTPU | FP64, + FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, + FCVTMS = FPIntegerConvertFixed | 0x00100000, + FCVTMS_ws = FCVTMS, + FCVTMS_xs = FCVTMS | SixtyFourBits, + FCVTMS_wd = FCVTMS | FP64, + FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, + FCVTMU = FPIntegerConvertFixed | 0x00110000, + FCVTMU_ws = FCVTMU, + FCVTMU_xs = FCVTMU | SixtyFourBits, + FCVTMU_wd = FCVTMU | FP64, + FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, + FCVTZS = FPIntegerConvertFixed | 0x00180000, + FCVTZS_ws = FCVTZS, + FCVTZS_xs = FCVTZS | SixtyFourBits, + FCVTZS_wd = FCVTZS | FP64, + FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, + FCVTZU = FPIntegerConvertFixed | 0x00190000, + FCVTZU_ws = FCVTZU, + FCVTZU_xs = FCVTZU | SixtyFourBits, + FCVTZU_wd = FCVTZU | FP64, + FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, + SCVTF = FPIntegerConvertFixed | 0x00020000, + SCVTF_sw = SCVTF, + SCVTF_sx = SCVTF | SixtyFourBits, + SCVTF_dw = SCVTF | FP64, + SCVTF_dx = SCVTF | SixtyFourBits | FP64, + UCVTF = FPIntegerConvertFixed | 0x00030000, + UCVTF_sw = UCVTF, + UCVTF_sx = UCVTF | SixtyFourBits, + UCVTF_dw = UCVTF | FP64, + UCVTF_dx = UCVTF | SixtyFourBits | FP64, + FCVTAS = FPIntegerConvertFixed | 0x00040000, + FCVTAS_ws = FCVTAS, + FCVTAS_xs = FCVTAS | SixtyFourBits, + FCVTAS_wd = FCVTAS | FP64, + FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, + FCVTAU = FPIntegerConvertFixed | 0x00050000, + FCVTAU_ws = FCVTAU, + FCVTAU_xs = FCVTAU | SixtyFourBits, + FCVTAU_wd = FCVTAU | FP64, + FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, + FMOV_ws = FPIntegerConvertFixed | 0x00060000, + FMOV_sw = FPIntegerConvertFixed | 0x00070000, + FMOV_xd = FMOV_ws | SixtyFourBits | FP64, + FMOV_dx = FMOV_sw | SixtyFourBits | FP64, + FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, + FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000 +}; + +// Conversion between fixed point and floating point. +enum FPFixedPointConvertOp { + FPFixedPointConvertFixed = 0x1E000000, + FPFixedPointConvertFMask = 0x5F200000, + FPFixedPointConvertMask = 0xFFFF0000, + FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000, + FCVTZS_ws_fixed = FCVTZS_fixed, + FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits, + FCVTZS_wd_fixed = FCVTZS_fixed | FP64, + FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64, + FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000, + FCVTZU_ws_fixed = FCVTZU_fixed, + FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits, + FCVTZU_wd_fixed = FCVTZU_fixed | FP64, + FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64, + SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000, + SCVTF_sw_fixed = SCVTF_fixed, + SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits, + SCVTF_dw_fixed = SCVTF_fixed | FP64, + SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64, + UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000, + UCVTF_sw_fixed = UCVTF_fixed, + UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits, + UCVTF_dw_fixed = UCVTF_fixed | FP64, + UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 +}; + +// Crypto - two register SHA. +enum Crypto2RegSHAOp { + Crypto2RegSHAFixed = 0x5E280800, + Crypto2RegSHAFMask = 0xFF3E0C00 +}; + +// Crypto - three register SHA. +enum Crypto3RegSHAOp { + Crypto3RegSHAFixed = 0x5E000000, + Crypto3RegSHAFMask = 0xFF208C00 +}; + +// Crypto - AES. +enum CryptoAESOp { + CryptoAESFixed = 0x4E280800, + CryptoAESFMask = 0xFF3E0C00 +}; + +// NEON instructions with two register operands. +enum NEON2RegMiscOp { + NEON2RegMiscFixed = 0x0E200800, + NEON2RegMiscFMask = 0x9F3E0C00, + NEON2RegMiscMask = 0xBF3FFC00, + NEON2RegMiscUBit = 0x20000000, + NEON_REV64 = NEON2RegMiscFixed | 0x00000000, + NEON_REV32 = NEON2RegMiscFixed | 0x20000000, + NEON_REV16 = NEON2RegMiscFixed | 0x00001000, + NEON_SADDLP = NEON2RegMiscFixed | 0x00002000, + NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit, + NEON_SUQADD = NEON2RegMiscFixed | 0x00003000, + NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit, + NEON_CLS = NEON2RegMiscFixed | 0x00004000, + NEON_CLZ = NEON2RegMiscFixed | 0x20004000, + NEON_CNT = NEON2RegMiscFixed | 0x00005000, + NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000, + NEON_SADALP = NEON2RegMiscFixed | 0x00006000, + NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit, + NEON_SQABS = NEON2RegMiscFixed | 0x00007000, + NEON_SQNEG = NEON2RegMiscFixed | 0x20007000, + NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000, + NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000, + NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000, + NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000, + NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000, + NEON_ABS = NEON2RegMiscFixed | 0x0000B000, + NEON_NEG = NEON2RegMiscFixed | 0x2000B000, + NEON_XTN = NEON2RegMiscFixed | 0x00012000, + NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000, + NEON_SHLL = NEON2RegMiscFixed | 0x20013000, + NEON_SQXTN = NEON2RegMiscFixed | 0x00014000, + NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit, + + NEON2RegMiscOpcode = 0x0001F000, + NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode, + NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode, + NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode, + NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode, + + // These instructions use only one bit of the size field. The other bit is + // used to distinguish between instructions. + NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000, + NEON_FABS = NEON2RegMiscFixed | 0x0080F000, + NEON_FNEG = NEON2RegMiscFixed | 0x2080F000, + NEON_FCVTN = NEON2RegMiscFixed | 0x00016000, + NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000, + NEON_FCVTL = NEON2RegMiscFixed | 0x00017000, + NEON_FRINTN = NEON2RegMiscFixed | 0x00018000, + NEON_FRINTA = NEON2RegMiscFixed | 0x20018000, + NEON_FRINTP = NEON2RegMiscFixed | 0x00818000, + NEON_FRINTM = NEON2RegMiscFixed | 0x00019000, + NEON_FRINTX = NEON2RegMiscFixed | 0x20019000, + NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000, + NEON_FRINTI = NEON2RegMiscFixed | 0x20819000, + NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000, + NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit, + NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000, + NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit, + NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000, + NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit, + NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000, + NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit, + NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000, + NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit, + NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000, + NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000, + NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit, + NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000, + NEON_URECPE = NEON2RegMiscFixed | 0x0081C000, + NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000, + NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000, + NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000, + NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000, + NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000, + NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000, + NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000, + + NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode, + NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode +}; + +// NEON instructions with three same-type operands. +enum NEON3SameOp { + NEON3SameFixed = 0x0E200400, + NEON3SameFMask = 0x9F200400, + NEON3SameMask = 0xBF20FC00, + NEON3SameUBit = 0x20000000, + NEON_ADD = NEON3SameFixed | 0x00008000, + NEON_ADDP = NEON3SameFixed | 0x0000B800, + NEON_SHADD = NEON3SameFixed | 0x00000000, + NEON_SHSUB = NEON3SameFixed | 0x00002000, + NEON_SRHADD = NEON3SameFixed | 0x00001000, + NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800, + NEON_CMGE = NEON3SameFixed | 0x00003800, + NEON_CMGT = NEON3SameFixed | 0x00003000, + NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT, + NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE, + NEON_CMTST = NEON3SameFixed | 0x00008800, + NEON_MLA = NEON3SameFixed | 0x00009000, + NEON_MLS = NEON3SameFixed | 0x20009000, + NEON_MUL = NEON3SameFixed | 0x00009800, + NEON_PMUL = NEON3SameFixed | 0x20009800, + NEON_SRSHL = NEON3SameFixed | 0x00005000, + NEON_SQSHL = NEON3SameFixed | 0x00004800, + NEON_SQRSHL = NEON3SameFixed | 0x00005800, + NEON_SSHL = NEON3SameFixed | 0x00004000, + NEON_SMAX = NEON3SameFixed | 0x00006000, + NEON_SMAXP = NEON3SameFixed | 0x0000A000, + NEON_SMIN = NEON3SameFixed | 0x00006800, + NEON_SMINP = NEON3SameFixed | 0x0000A800, + NEON_SABD = NEON3SameFixed | 0x00007000, + NEON_SABA = NEON3SameFixed | 0x00007800, + NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD, + NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA, + NEON_SQADD = NEON3SameFixed | 0x00000800, + NEON_SQSUB = NEON3SameFixed | 0x00002800, + NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000, + NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD, + NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB, + NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD, + NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX, + NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP, + NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN, + NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP, + NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL, + NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD, + NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL, + NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL, + NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB, + NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL, + NEON_SQDMULH = NEON3SameFixed | 0x0000B000, + NEON_SQRDMULH = NEON3SameFixed | 0x2000B000, + + // NEON floating point instructions with three same-type operands. + NEON3SameFPFixed = NEON3SameFixed | 0x0000C000, + NEON3SameFPFMask = NEON3SameFMask | 0x0000C000, + NEON3SameFPMask = NEON3SameMask | 0x00800000, + NEON_FADD = NEON3SameFixed | 0x0000D000, + NEON_FSUB = NEON3SameFixed | 0x0080D000, + NEON_FMUL = NEON3SameFixed | 0x2000D800, + NEON_FDIV = NEON3SameFixed | 0x2000F800, + NEON_FMAX = NEON3SameFixed | 0x0000F000, + NEON_FMAXNM = NEON3SameFixed | 0x0000C000, + NEON_FMAXP = NEON3SameFixed | 0x2000F000, + NEON_FMAXNMP = NEON3SameFixed | 0x2000C000, + NEON_FMIN = NEON3SameFixed | 0x0080F000, + NEON_FMINNM = NEON3SameFixed | 0x0080C000, + NEON_FMINP = NEON3SameFixed | 0x2080F000, + NEON_FMINNMP = NEON3SameFixed | 0x2080C000, + NEON_FMLA = NEON3SameFixed | 0x0000C800, + NEON_FMLS = NEON3SameFixed | 0x0080C800, + NEON_FMULX = NEON3SameFixed | 0x0000D800, + NEON_FRECPS = NEON3SameFixed | 0x0000F800, + NEON_FRSQRTS = NEON3SameFixed | 0x0080F800, + NEON_FABD = NEON3SameFixed | 0x2080D000, + NEON_FADDP = NEON3SameFixed | 0x2000D000, + NEON_FCMEQ = NEON3SameFixed | 0x0000E000, + NEON_FCMGE = NEON3SameFixed | 0x2000E000, + NEON_FCMGT = NEON3SameFixed | 0x2080E000, + NEON_FACGE = NEON3SameFixed | 0x2000E800, + NEON_FACGT = NEON3SameFixed | 0x2080E800, + + // NEON logical instructions with three same-type operands. + NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800, + NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800, + NEON3SameLogicalMask = 0xBFE0FC00, + NEON3SameLogicalFormatMask = NEON_Q, + NEON_AND = NEON3SameLogicalFixed | 0x00000000, + NEON_ORR = NEON3SameLogicalFixed | 0x00A00000, + NEON_ORN = NEON3SameLogicalFixed | 0x00C00000, + NEON_EOR = NEON3SameLogicalFixed | 0x20000000, + NEON_BIC = NEON3SameLogicalFixed | 0x00400000, + NEON_BIF = NEON3SameLogicalFixed | 0x20C00000, + NEON_BIT = NEON3SameLogicalFixed | 0x20800000, + NEON_BSL = NEON3SameLogicalFixed | 0x20400000 +}; + +// NEON instructions with three different-type operands. +enum NEON3DifferentOp { + NEON3DifferentFixed = 0x0E200000, + NEON3DifferentFMask = 0x9F200C00, + NEON3DifferentMask = 0xFF20FC00, + NEON_ADDHN = NEON3DifferentFixed | 0x00004000, + NEON_ADDHN2 = NEON_ADDHN | NEON_Q, + NEON_PMULL = NEON3DifferentFixed | 0x0000E000, + NEON_PMULL2 = NEON_PMULL | NEON_Q, + NEON_RADDHN = NEON3DifferentFixed | 0x20004000, + NEON_RADDHN2 = NEON_RADDHN | NEON_Q, + NEON_RSUBHN = NEON3DifferentFixed | 0x20006000, + NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q, + NEON_SABAL = NEON3DifferentFixed | 0x00005000, + NEON_SABAL2 = NEON_SABAL | NEON_Q, + NEON_SABDL = NEON3DifferentFixed | 0x00007000, + NEON_SABDL2 = NEON_SABDL | NEON_Q, + NEON_SADDL = NEON3DifferentFixed | 0x00000000, + NEON_SADDL2 = NEON_SADDL | NEON_Q, + NEON_SADDW = NEON3DifferentFixed | 0x00001000, + NEON_SADDW2 = NEON_SADDW | NEON_Q, + NEON_SMLAL = NEON3DifferentFixed | 0x00008000, + NEON_SMLAL2 = NEON_SMLAL | NEON_Q, + NEON_SMLSL = NEON3DifferentFixed | 0x0000A000, + NEON_SMLSL2 = NEON_SMLSL | NEON_Q, + NEON_SMULL = NEON3DifferentFixed | 0x0000C000, + NEON_SMULL2 = NEON_SMULL | NEON_Q, + NEON_SSUBL = NEON3DifferentFixed | 0x00002000, + NEON_SSUBL2 = NEON_SSUBL | NEON_Q, + NEON_SSUBW = NEON3DifferentFixed | 0x00003000, + NEON_SSUBW2 = NEON_SSUBW | NEON_Q, + NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000, + NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q, + NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000, + NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q, + NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000, + NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q, + NEON_SUBHN = NEON3DifferentFixed | 0x00006000, + NEON_SUBHN2 = NEON_SUBHN | NEON_Q, + NEON_UABAL = NEON_SABAL | NEON3SameUBit, + NEON_UABAL2 = NEON_UABAL | NEON_Q, + NEON_UABDL = NEON_SABDL | NEON3SameUBit, + NEON_UABDL2 = NEON_UABDL | NEON_Q, + NEON_UADDL = NEON_SADDL | NEON3SameUBit, + NEON_UADDL2 = NEON_UADDL | NEON_Q, + NEON_UADDW = NEON_SADDW | NEON3SameUBit, + NEON_UADDW2 = NEON_UADDW | NEON_Q, + NEON_UMLAL = NEON_SMLAL | NEON3SameUBit, + NEON_UMLAL2 = NEON_UMLAL | NEON_Q, + NEON_UMLSL = NEON_SMLSL | NEON3SameUBit, + NEON_UMLSL2 = NEON_UMLSL | NEON_Q, + NEON_UMULL = NEON_SMULL | NEON3SameUBit, + NEON_UMULL2 = NEON_UMULL | NEON_Q, + NEON_USUBL = NEON_SSUBL | NEON3SameUBit, + NEON_USUBL2 = NEON_USUBL | NEON_Q, + NEON_USUBW = NEON_SSUBW | NEON3SameUBit, + NEON_USUBW2 = NEON_USUBW | NEON_Q +}; + +// NEON instructions operating across vectors. +enum NEONAcrossLanesOp { + NEONAcrossLanesFixed = 0x0E300800, + NEONAcrossLanesFMask = 0x9F3E0C00, + NEONAcrossLanesMask = 0xBF3FFC00, + NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000, + NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000, + NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000, + NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000, + NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000, + NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000, + NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000, + + // NEON floating point across instructions. + NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000, + NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000, + NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x00800000, + + NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000, + NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000, + NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000, + NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 +}; + +// NEON instructions with indexed element operand. +enum NEONByIndexedElementOp { + NEONByIndexedElementFixed = 0x0F000000, + NEONByIndexedElementFMask = 0x9F000400, + NEONByIndexedElementMask = 0xBF00F400, + NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000, + NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000, + NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000, + NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000, + NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000, + NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000, + NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000, + NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000, + NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000, + NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000, + NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000, + NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000, + NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000, + NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000, + + // Floating point instructions. + NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000, + NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000, + NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000, + NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000, + NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000 +}; + +// NEON register copy. +enum NEONCopyOp { + NEONCopyFixed = 0x0E000400, + NEONCopyFMask = 0x9FE08400, + NEONCopyMask = 0x3FE08400, + NEONCopyInsElementMask = NEONCopyMask | 0x40000000, + NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800, + NEONCopyDupElementMask = NEONCopyMask | 0x20007800, + NEONCopyDupGeneralMask = NEONCopyDupElementMask, + NEONCopyUmovMask = NEONCopyMask | 0x20007800, + NEONCopySmovMask = NEONCopyMask | 0x20007800, + NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000, + NEON_INS_GENERAL = NEONCopyFixed | 0x40001800, + NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000, + NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800, + NEON_SMOV = NEONCopyFixed | 0x00002800, + NEON_UMOV = NEONCopyFixed | 0x00003800 +}; + +// NEON extract. +enum NEONExtractOp { + NEONExtractFixed = 0x2E000000, + NEONExtractFMask = 0xBF208400, + NEONExtractMask = 0xBFE08400, + NEON_EXT = NEONExtractFixed | 0x00000000 +}; + +enum NEONLoadStoreMultiOp { + NEONLoadStoreMultiL = 0x00400000, + NEONLoadStoreMulti1_1v = 0x00007000, + NEONLoadStoreMulti1_2v = 0x0000A000, + NEONLoadStoreMulti1_3v = 0x00006000, + NEONLoadStoreMulti1_4v = 0x00002000, + NEONLoadStoreMulti2 = 0x00008000, + NEONLoadStoreMulti3 = 0x00004000, + NEONLoadStoreMulti4 = 0x00000000 +}; + +// NEON load/store multiple structures. +enum NEONLoadStoreMultiStructOp { + NEONLoadStoreMultiStructFixed = 0x0C000000, + NEONLoadStoreMultiStructFMask = 0xBFBF0000, + NEONLoadStoreMultiStructMask = 0xBFFFF000, + NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed, + NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed | + NEONLoadStoreMultiL, + NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v, + NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v, + NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v, + NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v, + NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2, + NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3, + NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4, + NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v, + NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v, + NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v, + NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v, + NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2, + NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3, + NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4 +}; + +// NEON load/store multiple structures with post-index addressing. +enum NEONLoadStoreMultiStructPostIndexOp { + NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000, + NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000, + NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000, + NEONLoadStoreMultiStructPostIndex = 0x00800000, + NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex, + NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex, + NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex, + NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex, + NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex +}; + +enum NEONLoadStoreSingleOp { + NEONLoadStoreSingle1 = 0x00000000, + NEONLoadStoreSingle2 = 0x00200000, + NEONLoadStoreSingle3 = 0x00002000, + NEONLoadStoreSingle4 = 0x00202000, + NEONLoadStoreSingleL = 0x00400000, + NEONLoadStoreSingle_b = 0x00000000, + NEONLoadStoreSingle_h = 0x00004000, + NEONLoadStoreSingle_s = 0x00008000, + NEONLoadStoreSingle_d = 0x00008400, + NEONLoadStoreSingleAllLanes = 0x0000C000, + NEONLoadStoreSingleLenMask = 0x00202000 +}; + +// NEON load/store single structure. +enum NEONLoadStoreSingleStructOp { + NEONLoadStoreSingleStructFixed = 0x0D000000, + NEONLoadStoreSingleStructFMask = 0xBF9F0000, + NEONLoadStoreSingleStructMask = 0xBFFFE000, + NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed | + NEONLoadStoreSingleL, + NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructFixed, + NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b, + NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h, + NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s, + NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d, + NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes, + NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b, + NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h, + NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s, + NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d, + + NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b, + NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h, + NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s, + NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d, + NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes, + NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b, + NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h, + NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s, + NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d, + + NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b, + NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h, + NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s, + NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d, + NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes, + NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b, + NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h, + NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s, + NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d, + + NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b, + NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h, + NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s, + NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d, + NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes, + NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b, + NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h, + NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s, + NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d +}; + +// NEON load/store single structure with post-index addressing. +enum NEONLoadStoreSingleStructPostIndexOp { + NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000, + NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000, + NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000, + NEONLoadStoreSingleStructPostIndex = 0x00800000, + NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex +}; + +// NEON modified immediate. +enum NEONModifiedImmediateOp { + NEONModifiedImmediateFixed = 0x0F000400, + NEONModifiedImmediateFMask = 0x9FF80400, + NEONModifiedImmediateOpBit = 0x20000000, + NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000, + NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000, + NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000, + NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000 +}; + +// NEON shift immediate. +enum NEONShiftImmediateOp { + NEONShiftImmediateFixed = 0x0F000400, + NEONShiftImmediateFMask = 0x9F800400, + NEONShiftImmediateMask = 0xBF80FC00, + NEONShiftImmediateUBit = 0x20000000, + NEON_SHL = NEONShiftImmediateFixed | 0x00005000, + NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000, + NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000, + NEON_SLI = NEONShiftImmediateFixed | 0x20005000, + NEON_SRI = NEONShiftImmediateFixed | 0x20004000, + NEON_SHRN = NEONShiftImmediateFixed | 0x00008000, + NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800, + NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000, + NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800, + NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000, + NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800, + NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000, + NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800, + NEON_SSHR = NEONShiftImmediateFixed | 0x00000000, + NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000, + NEON_USHR = NEONShiftImmediateFixed | 0x20000000, + NEON_URSHR = NEONShiftImmediateFixed | 0x20002000, + NEON_SSRA = NEONShiftImmediateFixed | 0x00001000, + NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000, + NEON_USRA = NEONShiftImmediateFixed | 0x20001000, + NEON_URSRA = NEONShiftImmediateFixed | 0x20003000, + NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000, + NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000, + NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000, + NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800, + NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800, + NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000, + NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000 +}; + +// NEON table. +enum NEONTableOp { + NEONTableFixed = 0x0E000000, + NEONTableFMask = 0xBF208C00, + NEONTableExt = 0x00001000, + NEONTableMask = 0xBF20FC00, + NEON_TBL_1v = NEONTableFixed | 0x00000000, + NEON_TBL_2v = NEONTableFixed | 0x00002000, + NEON_TBL_3v = NEONTableFixed | 0x00004000, + NEON_TBL_4v = NEONTableFixed | 0x00006000, + NEON_TBX_1v = NEON_TBL_1v | NEONTableExt, + NEON_TBX_2v = NEON_TBL_2v | NEONTableExt, + NEON_TBX_3v = NEON_TBL_3v | NEONTableExt, + NEON_TBX_4v = NEON_TBL_4v | NEONTableExt +}; + +// NEON perm. +enum NEONPermOp { + NEONPermFixed = 0x0E000800, + NEONPermFMask = 0xBF208C00, + NEONPermMask = 0x3F20FC00, + NEON_UZP1 = NEONPermFixed | 0x00001000, + NEON_TRN1 = NEONPermFixed | 0x00002000, + NEON_ZIP1 = NEONPermFixed | 0x00003000, + NEON_UZP2 = NEONPermFixed | 0x00005000, + NEON_TRN2 = NEONPermFixed | 0x00006000, + NEON_ZIP2 = NEONPermFixed | 0x00007000 +}; + +// NEON scalar instructions with two register operands. +enum NEONScalar2RegMiscOp { + NEONScalar2RegMiscFixed = 0x5E200800, + NEONScalar2RegMiscFMask = 0xDF3E0C00, + NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask, + NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero, + NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero, + NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero, + NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero, + NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero, + NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS, + NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS, + NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG, + NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG, + NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN, + NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN, + NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN, + NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD, + NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD, + + NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, + NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, + + NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, + NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, + NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, + NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, + NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF, + NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero, + NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero, + NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, + NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, + NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, + NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, + NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, + NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, + NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, + NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU, + NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS, + NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU, + NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS, + NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU, + NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS, + NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU, + NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN +}; + +// NEON scalar instructions with three same-type operands. +enum NEONScalar3SameOp { + NEONScalar3SameFixed = 0x5E200400, + NEONScalar3SameFMask = 0xDF200400, + NEONScalar3SameMask = 0xFF20FC00, + NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD, + NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ, + NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE, + NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT, + NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI, + NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS, + NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST, + NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB, + NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD, + NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD, + NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB, + NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB, + NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL, + NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL, + NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL, + NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL, + NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL, + NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL, + NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL, + NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL, + NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH, + NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH, + + // NEON floating point scalar instructions with three same-type operands. + NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000, + NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000, + NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000, + NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE, + NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT, + NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ, + NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE, + NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT, + NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX, + NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS, + NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS, + NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD +}; + +// NEON scalar instructions with three different-type operands. +enum NEONScalar3DiffOp { + NEONScalar3DiffFixed = 0x5E200000, + NEONScalar3DiffFMask = 0xDF200C00, + NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask, + NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL, + NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL, + NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL +}; + +// NEON scalar instructions with indexed element operand. +enum NEONScalarByIndexedElementOp { + NEONScalarByIndexedElementFixed = 0x5F000000, + NEONScalarByIndexedElementFMask = 0xDF000400, + NEONScalarByIndexedElementMask = 0xFF00F400, + NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement, + NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement, + NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement, + NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement, + NEON_SQRDMULH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement, + + // Floating point instructions. + NEONScalarByIndexedElementFPFixed + = NEONScalarByIndexedElementFixed | 0x00800000, + NEONScalarByIndexedElementFPMask + = NEONScalarByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement, + NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement, + NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement, + NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement +}; + +// NEON scalar register copy. +enum NEONScalarCopyOp { + NEONScalarCopyFixed = 0x5E000400, + NEONScalarCopyFMask = 0xDFE08400, + NEONScalarCopyMask = 0xFFE0FC00, + NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT +}; + +// NEON scalar pairwise instructions. +enum NEONScalarPairwiseOp { + NEONScalarPairwiseFixed = 0x5E300800, + NEONScalarPairwiseFMask = 0xDF3E0C00, + NEONScalarPairwiseMask = 0xFFB1F800, + NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000, + NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000, + NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000, + NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000, + NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000, + NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000 +}; + +// NEON scalar shift immediate. +enum NEONScalarShiftImmediateOp { + NEONScalarShiftImmediateFixed = 0x5F000400, + NEONScalarShiftImmediateFMask = 0xDF800400, + NEONScalarShiftImmediateMask = 0xFF80FC00, + NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL, + NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI, + NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI, + NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR, + NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR, + NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR, + NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR, + NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA, + NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA, + NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA, + NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA, + NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN, + NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN, + NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN, + NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN, + NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN, + NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN, + NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU, + NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm, + NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm, + NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm, + NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm, + NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm, + NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm +}; + +// Unimplemented and unallocated instructions. These are defined to make fixed +// bit assertion easier. +enum UnimplementedOp { + UnimplementedFixed = 0x00000000, + UnimplementedFMask = 0x00000000 +}; + +enum UnallocatedOp { + UnallocatedFixed = 0x00000000, + UnallocatedFMask = 0x00000000 +}; + +} // namespace vixl + +#endif // VIXL_A64_CONSTANTS_A64_H_ diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp new file mode 100644 index 000000000..804f0cad1 --- /dev/null +++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp @@ -0,0 +1,170 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Cpu-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { + +// Initialise to smallest possible cache size. +unsigned CPU::dcache_line_size_ = 1; +unsigned CPU::icache_line_size_ = 1; + + +// Currently computes I and D cache line size. +void CPU::SetUp() { + uint32_t cache_type_register = GetCacheType(); + + // The cache type register holds information about the caches, including I + // D caches line size. + static const int kDCacheLineSizeShift = 16; + static const int kICacheLineSizeShift = 0; + static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift; + static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift; + + // The cache type register holds the size of the I and D caches in words as + // a power of two. + uint32_t dcache_line_size_power_of_two = + (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift; + uint32_t icache_line_size_power_of_two = + (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift; + + dcache_line_size_ = 4 << dcache_line_size_power_of_two; + icache_line_size_ = 4 << icache_line_size_power_of_two; +} + + +uint32_t CPU::GetCacheType() { +#ifdef __aarch64__ + uint64_t cache_type_register; + // Copy the content of the cache type register to a core register. + __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT + : [ctr] "=r" (cache_type_register)); + VIXL_ASSERT(is_uint32(cache_type_register)); + return cache_type_register; +#else + // This will lead to a cache with 1 byte long lines, which is fine since + // neither EnsureIAndDCacheCoherency nor the simulator will need this + // information. + return 0; +#endif +} + + +void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) { +#ifdef __aarch64__ + // Implement the cache synchronisation for all targets where AArch64 is the + // host, even if we're building the simulator for an AAarch64 host. This + // allows for cases where the user wants to simulate code as well as run it + // natively. + + if (length == 0) { + return; + } + + // The code below assumes user space cache operations are allowed. + + // Work out the line sizes for each cache, and use them to determine the + // start addresses. + uintptr_t start = reinterpret_cast<uintptr_t>(address); + uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_); + uintptr_t isize = static_cast<uintptr_t>(icache_line_size_); + uintptr_t dline = start & ~(dsize - 1); + uintptr_t iline = start & ~(isize - 1); + + // Cache line sizes are always a power of 2. + VIXL_ASSERT(IsPowerOf2(dsize)); + VIXL_ASSERT(IsPowerOf2(isize)); + uintptr_t end = start + length; + + do { + __asm__ __volatile__ ( + // Clean each line of the D cache containing the target data. + // + // dc : Data Cache maintenance + // c : Clean + // va : by (Virtual) Address + // u : to the point of Unification + // The point of unification for a processor is the point by which the + // instruction and data caches are guaranteed to see the same copy of a + // memory location. See ARM DDI 0406B page B2-12 for more information. + " dc cvau, %[dline]\n" + : + : [dline] "r" (dline) + // This code does not write to memory, but the "memory" dependency + // prevents GCC from reordering the code. + : "memory"); + dline += dsize; + } while (dline < end); + + __asm__ __volatile__ ( + // Make sure that the data cache operations (above) complete before the + // instruction cache operations (below). + // + // dsb : Data Synchronisation Barrier + // ish : Inner SHareable domain + // + // The point of unification for an Inner Shareable shareability domain is + // the point by which the instruction and data caches of all the processors + // in that Inner Shareable shareability domain are guaranteed to see the + // same copy of a memory location. See ARM DDI 0406B page B2-12 for more + // information. + " dsb ish\n" + : : : "memory"); + + do { + __asm__ __volatile__ ( + // Invalidate each line of the I cache containing the target data. + // + // ic : Instruction Cache maintenance + // i : Invalidate + // va : by Address + // u : to the point of Unification + " ic ivau, %[iline]\n" + : + : [iline] "r" (iline) + : "memory"); + iline += isize; + } while (iline < end); + + __asm__ __volatile__ ( + // Make sure that the instruction cache operations (above) take effect + // before the isb (below). + " dsb ish\n" + + // Ensure that any instructions already in the pipeline are discarded and + // reloaded from the new data. + // isb : Instruction Synchronisation Barrier + " isb\n" + : : : "memory"); +#else + // If the host isn't AArch64, we must be using the simulator, so this function + // doesn't have to do anything. + USE(address, length); +#endif +} + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.h b/js/src/jit/arm64/vixl/Cpu-vixl.h new file mode 100644 index 000000000..57ac65f61 --- /dev/null +++ b/js/src/jit/arm64/vixl/Cpu-vixl.h @@ -0,0 +1,83 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_A64_H +#define VIXL_CPU_A64_H + +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" + +namespace vixl { + +class CPU { + public: + // Initialise CPU support. + static void SetUp(); + + // Ensures the data at a given address and with a given size is the same for + // the I and D caches. I and D caches are not automatically coherent on ARM + // so this operation is required before any dynamically generated code can + // safely run. + static void EnsureIAndDCacheCoherency(void *address, size_t length); + + // Handle tagged pointers. + template <typename T> + static T SetPointerTag(T pointer, uint64_t tag) { + VIXL_ASSERT(is_uintn(kAddressTagWidth, tag)); + + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); + return (T)raw; + } + + template <typename T> + static uint64_t GetPointerTag(T pointer) { + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + return (raw & kAddressTagMask) >> kAddressTagOffset; + } + + private: + // Return the content of the cache type register. + static uint32_t GetCacheType(); + + // I and D cache line size in bytes. + static unsigned icache_line_size_; + static unsigned dcache_line_size_; +}; + +} // namespace vixl + +#endif // VIXL_CPU_A64_H diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.cpp b/js/src/jit/arm64/vixl/Debugger-vixl.cpp new file mode 100644 index 000000000..85097ed5a --- /dev/null +++ b/js/src/jit/arm64/vixl/Debugger-vixl.cpp @@ -0,0 +1,1535 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL ARM LIMITED BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "js-config.h" + +#ifdef JS_SIMULATOR_ARM64 + +#include "jit/arm64/vixl/Debugger-vixl.h" + +#include "mozilla/Vector.h" + +#include "jsalloc.h" + +namespace vixl { + +// List of commands supported by the debugger. +#define DEBUG_COMMAND_LIST(C) \ +C(HelpCommand) \ +C(ContinueCommand) \ +C(StepCommand) \ +C(DisasmCommand) \ +C(PrintCommand) \ +C(ExamineCommand) + +// Debugger command lines are broken up in token of different type to make +// processing easier later on. +class Token { + public: + virtual ~Token() {} + + // Token type. + virtual bool IsRegister() const { return false; } + virtual bool IsFPRegister() const { return false; } + virtual bool IsIdentifier() const { return false; } + virtual bool IsAddress() const { return false; } + virtual bool IsInteger() const { return false; } + virtual bool IsFormat() const { return false; } + virtual bool IsUnknown() const { return false; } + // Token properties. + virtual bool CanAddressMemory() const { return false; } + virtual uint8_t* ToAddress(Debugger* debugger) const = 0; + virtual void Print(FILE* out = stdout) const = 0; + + static Token* Tokenize(const char* arg); +}; + +typedef mozilla::Vector<Token*, 0, js::SystemAllocPolicy> TokenVector; + +// Tokens often hold one value. +template<typename T> class ValueToken : public Token { + public: + explicit ValueToken(T value) : value_(value) {} + ValueToken() {} + + T value() const { return value_; } + + virtual uint8_t* ToAddress(Debugger* debugger) const { + USE(debugger); + VIXL_ABORT(); + } + + protected: + T value_; +}; + +// Integer registers (X or W) and their aliases. +// Format: wn or xn with 0 <= n < 32 or a name in the aliases list. +class RegisterToken : public ValueToken<const Register> { + public: + explicit RegisterToken(const Register reg) + : ValueToken<const Register>(reg) {} + + virtual bool IsRegister() const { return true; } + virtual bool CanAddressMemory() const { return value().Is64Bits(); } + virtual uint8_t* ToAddress(Debugger* debugger) const; + virtual void Print(FILE* out = stdout) const ; + const char* Name() const; + + static Token* Tokenize(const char* arg); + static RegisterToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsRegister()); + return reinterpret_cast<RegisterToken*>(tok); + } + + private: + static const int kMaxAliasNumber = 4; + static const char* kXAliases[kNumberOfRegisters][kMaxAliasNumber]; + static const char* kWAliases[kNumberOfRegisters][kMaxAliasNumber]; +}; + +// Floating point registers (D or S). +// Format: sn or dn with 0 <= n < 32. +class FPRegisterToken : public ValueToken<const FPRegister> { + public: + explicit FPRegisterToken(const FPRegister fpreg) + : ValueToken<const FPRegister>(fpreg) {} + + virtual bool IsFPRegister() const { return true; } + virtual void Print(FILE* out = stdout) const ; + + static Token* Tokenize(const char* arg); + static FPRegisterToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsFPRegister()); + return reinterpret_cast<FPRegisterToken*>(tok); + } +}; + + +// Non-register identifiers. +// Format: Alphanumeric string starting with a letter. +class IdentifierToken : public ValueToken<char*> { + public: + explicit IdentifierToken(const char* name) { + size_t size = strlen(name) + 1; + value_ = (char*)js_malloc(size); + strncpy(value_, name, size); + } + virtual ~IdentifierToken() { js_free(value_); } + + virtual bool IsIdentifier() const { return true; } + virtual bool CanAddressMemory() const { return strcmp(value(), "pc") == 0; } + virtual uint8_t* ToAddress(Debugger* debugger) const; + virtual void Print(FILE* out = stdout) const; + + static Token* Tokenize(const char* arg); + static IdentifierToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsIdentifier()); + return reinterpret_cast<IdentifierToken*>(tok); + } +}; + +// 64-bit address literal. +// Format: 0x... with up to 16 hexadecimal digits. +class AddressToken : public ValueToken<uint8_t*> { + public: + explicit AddressToken(uint8_t* address) : ValueToken<uint8_t*>(address) {} + + virtual bool IsAddress() const { return true; } + virtual bool CanAddressMemory() const { return true; } + virtual uint8_t* ToAddress(Debugger* debugger) const; + virtual void Print(FILE* out = stdout) const ; + + static Token* Tokenize(const char* arg); + static AddressToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsAddress()); + return reinterpret_cast<AddressToken*>(tok); + } +}; + + +// 64-bit decimal integer literal. +// Format: n. +class IntegerToken : public ValueToken<int64_t> { + public: + explicit IntegerToken(int64_t value) : ValueToken<int64_t>(value) {} + + virtual bool IsInteger() const { return true; } + virtual void Print(FILE* out = stdout) const; + + static Token* Tokenize(const char* arg); + static IntegerToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsInteger()); + return reinterpret_cast<IntegerToken*>(tok); + } +}; + +// Literal describing how to print a chunk of data (up to 64 bits). +// Format: .ln +// where l (letter) is one of +// * x: hexadecimal +// * s: signed integer +// * u: unsigned integer +// * f: floating point +// * i: instruction +// and n (size) is one of 8, 16, 32 and 64. n should be omitted for +// instructions. +class FormatToken : public Token { + public: + FormatToken() {} + + virtual bool IsFormat() const { return true; } + virtual int SizeOf() const = 0; + virtual char type_code() const = 0; + virtual void PrintData(void* data, FILE* out = stdout) const = 0; + virtual void Print(FILE* out = stdout) const = 0; + + virtual uint8_t* ToAddress(Debugger* debugger) const { + USE(debugger); + VIXL_ABORT(); + } + + static Token* Tokenize(const char* arg); + static FormatToken* Cast(Token* tok) { + VIXL_ASSERT(tok->IsFormat()); + return reinterpret_cast<FormatToken*>(tok); + } +}; + + +template<typename T> class Format : public FormatToken { + public: + Format(const char* fmt, char type_code) : fmt_(fmt), type_code_(type_code) {} + + virtual int SizeOf() const { return sizeof(T); } + virtual char type_code() const { return type_code_; } + virtual void PrintData(void* data, FILE* out = stdout) const { + T value; + memcpy(&value, data, sizeof(value)); + fprintf(out, fmt_, value); + } + virtual void Print(FILE* out = stdout) const; + + private: + const char* fmt_; + char type_code_; +}; + +// Tokens which don't fit any of the above. +class UnknownToken : public Token { + public: + explicit UnknownToken(const char* arg) { + size_t size = strlen(arg) + 1; + unknown_ = (char*)js_malloc(size); + strncpy(unknown_, arg, size); + } + virtual ~UnknownToken() { js_free(unknown_); } + virtual uint8_t* ToAddress(Debugger* debugger) const { + USE(debugger); + VIXL_ABORT(); + } + + virtual bool IsUnknown() const { return true; } + virtual void Print(FILE* out = stdout) const; + + private: + char* unknown_; +}; + + +// All debugger commands must subclass DebugCommand and implement Run, Print +// and Build. Commands must also define kHelp and kAliases. +class DebugCommand { + public: + explicit DebugCommand(Token* name) : name_(IdentifierToken::Cast(name)) {} + DebugCommand() : name_(NULL) {} + virtual ~DebugCommand() { js_delete(name_); } + + const char* name() { return name_->value(); } + // Run the command on the given debugger. The command returns true if + // execution should move to the next instruction. + virtual bool Run(Debugger * debugger) = 0; + virtual void Print(FILE* out = stdout); + + static bool Match(const char* name, const char** aliases); + static DebugCommand* Parse(char* line); + static void PrintHelp(const char** aliases, + const char* args, + const char* help); + + private: + IdentifierToken* name_; +}; + +// For all commands below see their respective kHelp and kAliases in +// debugger-a64.cc +class HelpCommand : public DebugCommand { + public: + explicit HelpCommand(Token* name) : DebugCommand(name) {} + + virtual bool Run(Debugger* debugger); + + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; +}; + + +class ContinueCommand : public DebugCommand { + public: + explicit ContinueCommand(Token* name) : DebugCommand(name) {} + + virtual bool Run(Debugger* debugger); + + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; +}; + + +class StepCommand : public DebugCommand { + public: + StepCommand(Token* name, IntegerToken* count) + : DebugCommand(name), count_(count) {} + virtual ~StepCommand() { js_delete(count_); } + + int64_t count() { return count_->value(); } + virtual bool Run(Debugger* debugger); + virtual void Print(FILE* out = stdout); + + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; + + private: + IntegerToken* count_; +}; + +class DisasmCommand : public DebugCommand { + public: + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; +}; + + +class PrintCommand : public DebugCommand { + public: + PrintCommand(Token* name, Token* target, FormatToken* format) + : DebugCommand(name), target_(target), format_(format) {} + virtual ~PrintCommand() { + js_delete(target_); + js_delete(format_); + } + + Token* target() { return target_; } + FormatToken* format() { return format_; } + virtual bool Run(Debugger* debugger); + virtual void Print(FILE* out = stdout); + + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; + + private: + Token* target_; + FormatToken* format_; +}; + +class ExamineCommand : public DebugCommand { + public: + ExamineCommand(Token* name, + Token* target, + FormatToken* format, + IntegerToken* count) + : DebugCommand(name), target_(target), format_(format), count_(count) {} + virtual ~ExamineCommand() { + js_delete(target_); + js_delete(format_); + js_delete(count_); + } + + Token* target() { return target_; } + FormatToken* format() { return format_; } + IntegerToken* count() { return count_; } + virtual bool Run(Debugger* debugger); + virtual void Print(FILE* out = stdout); + + static DebugCommand* Build(TokenVector&& args); + + static const char* kHelp; + static const char* kAliases[]; + static const char* kArguments; + + private: + Token* target_; + FormatToken* format_; + IntegerToken* count_; +}; + +// Commands which name does not match any of the known commnand. +class UnknownCommand : public DebugCommand { + public: + explicit UnknownCommand(TokenVector&& args) : args_(Move(args)) {} + virtual ~UnknownCommand(); + + virtual bool Run(Debugger* debugger); + + private: + TokenVector args_; +}; + +// Commands which name match a known command but the syntax is invalid. +class InvalidCommand : public DebugCommand { + public: + InvalidCommand(TokenVector&& args, int index, const char* cause) + : args_(Move(args)), index_(index), cause_(cause) {} + virtual ~InvalidCommand(); + + virtual bool Run(Debugger* debugger); + + private: + TokenVector args_; + int index_; + const char* cause_; +}; + +const char* HelpCommand::kAliases[] = { "help", NULL }; +const char* HelpCommand::kArguments = NULL; +const char* HelpCommand::kHelp = " Print this help."; + +const char* ContinueCommand::kAliases[] = { "continue", "c", NULL }; +const char* ContinueCommand::kArguments = NULL; +const char* ContinueCommand::kHelp = " Resume execution."; + +const char* StepCommand::kAliases[] = { "stepi", "si", NULL }; +const char* StepCommand::kArguments = "[n = 1]"; +const char* StepCommand::kHelp = " Execute n next instruction(s)."; + +const char* DisasmCommand::kAliases[] = { "disasm", "di", NULL }; +const char* DisasmCommand::kArguments = "[n = 10]"; +const char* DisasmCommand::kHelp = + " Disassemble n instruction(s) at pc.\n" + " This command is equivalent to x pc.i [n = 10]." +; + +const char* PrintCommand::kAliases[] = { "print", "p", NULL }; +const char* PrintCommand::kArguments = "<entity>[.format]"; +const char* PrintCommand::kHelp = + " Print the given entity according to the given format.\n" + " The format parameter only affects individual registers; it is ignored\n" + " for other entities.\n" + " <entity> can be one of the following:\n" + " * A register name (such as x0, s1, ...).\n" + " * 'regs', to print all integer (W and X) registers.\n" + " * 'fpregs' to print all floating-point (S and D) registers.\n" + " * 'sysregs' to print all system registers (including NZCV).\n" + " * 'pc' to print the current program counter.\n" +; + +const char* ExamineCommand::kAliases[] = { "m", "mem", "x", NULL }; +const char* ExamineCommand::kArguments = "<addr>[.format] [n = 10]"; +const char* ExamineCommand::kHelp = + " Examine memory. Print n items of memory at address <addr> according to\n" + " the given [.format].\n" + " Addr can be an immediate address, a register name or pc.\n" + " Format is made of a type letter: 'x' (hexadecimal), 's' (signed), 'u'\n" + " (unsigned), 'f' (floating point), i (instruction) and a size in bits\n" + " when appropriate (8, 16, 32, 64)\n" + " E.g 'x sp.x64' will print 10 64-bit words from the stack in\n" + " hexadecimal format." +; + +const char* RegisterToken::kXAliases[kNumberOfRegisters][kMaxAliasNumber] = { + { "x0", NULL }, + { "x1", NULL }, + { "x2", NULL }, + { "x3", NULL }, + { "x4", NULL }, + { "x5", NULL }, + { "x6", NULL }, + { "x7", NULL }, + { "x8", NULL }, + { "x9", NULL }, + { "x10", NULL }, + { "x11", NULL }, + { "x12", NULL }, + { "x13", NULL }, + { "x14", NULL }, + { "x15", NULL }, + { "ip0", "x16", NULL }, + { "ip1", "x17", NULL }, + { "x18", "pr", NULL }, + { "x19", NULL }, + { "x20", NULL }, + { "x21", NULL }, + { "x22", NULL }, + { "x23", NULL }, + { "x24", NULL }, + { "x25", NULL }, + { "x26", NULL }, + { "x27", NULL }, + { "x28", NULL }, + { "fp", "x29", NULL }, + { "lr", "x30", NULL }, + { "sp", NULL} +}; + +const char* RegisterToken::kWAliases[kNumberOfRegisters][kMaxAliasNumber] = { + { "w0", NULL }, + { "w1", NULL }, + { "w2", NULL }, + { "w3", NULL }, + { "w4", NULL }, + { "w5", NULL }, + { "w6", NULL }, + { "w7", NULL }, + { "w8", NULL }, + { "w9", NULL }, + { "w10", NULL }, + { "w11", NULL }, + { "w12", NULL }, + { "w13", NULL }, + { "w14", NULL }, + { "w15", NULL }, + { "w16", NULL }, + { "w17", NULL }, + { "w18", NULL }, + { "w19", NULL }, + { "w20", NULL }, + { "w21", NULL }, + { "w22", NULL }, + { "w23", NULL }, + { "w24", NULL }, + { "w25", NULL }, + { "w26", NULL }, + { "w27", NULL }, + { "w28", NULL }, + { "w29", NULL }, + { "w30", NULL }, + { "wsp", NULL } +}; + + +Debugger::Debugger(Decoder* decoder, FILE* stream) + : Simulator(decoder, stream), + debug_parameters_(DBG_INACTIVE), + pending_request_(false), + steps_(0), + last_command_(NULL) { + disasm_ = js_new<PrintDisassembler>(stdout); + printer_ = js_new<Decoder>(); + printer_->AppendVisitor(disasm_); +} + + +Debugger::~Debugger() { + js_delete(disasm_); + js_delete(printer_); +} + + +void Debugger::Run() { + pc_modified_ = false; + while (pc_ != kEndOfSimAddress) { + if (pending_request()) RunDebuggerShell(); + ExecuteInstruction(); + LogAllWrittenRegisters(); + } +} + + +void Debugger::PrintInstructions(const void* address, int64_t count) { + if (count == 0) { + return; + } + + const Instruction* from = Instruction::CastConst(address); + if (count < 0) { + count = -count; + from -= (count - 1) * kInstructionSize; + } + const Instruction* to = from + count * kInstructionSize; + + for (const Instruction* current = from; + current < to; + current = current->NextInstruction()) { + printer_->Decode(current); + } +} + + +void Debugger::PrintMemory(const uint8_t* address, + const FormatToken* format, + int64_t count) { + if (count == 0) { + return; + } + + const uint8_t* from = address; + int size = format->SizeOf(); + if (count < 0) { + count = -count; + from -= (count - 1) * size; + } + const uint8_t* to = from + count * size; + + for (const uint8_t* current = from; current < to; current += size) { + if (((current - from) % 8) == 0) { + printf("\n%p: ", current); + } + + uint64_t data = Memory::Read<uint64_t>(current); + format->PrintData(&data); + printf(" "); + } + printf("\n\n"); +} + + +void Debugger::PrintRegister(const Register& target_reg, + const char* name, + const FormatToken* format) { + const uint64_t reg_size = target_reg.size(); + const uint64_t format_size = format->SizeOf() * 8; + const uint64_t count = reg_size / format_size; + const uint64_t mask = 0xffffffffffffffff >> (64 - format_size); + const uint64_t reg_value = reg<uint64_t>(target_reg.code(), + Reg31IsStackPointer); + VIXL_ASSERT(count > 0); + + printf("%s = ", name); + for (uint64_t i = 1; i <= count; i++) { + uint64_t data = reg_value >> (reg_size - (i * format_size)); + data &= mask; + format->PrintData(&data); + printf(" "); + } + printf("\n"); +} + + +// TODO(all): fix this for vector registers. +void Debugger::PrintFPRegister(const FPRegister& target_fpreg, + const FormatToken* format) { + const unsigned fpreg_size = target_fpreg.size(); + const uint64_t format_size = format->SizeOf() * 8; + const uint64_t count = fpreg_size / format_size; + const uint64_t mask = 0xffffffffffffffff >> (64 - format_size); + const uint64_t fpreg_value = vreg<uint64_t>(fpreg_size, target_fpreg.code()); + VIXL_ASSERT(count > 0); + + if (target_fpreg.Is32Bits()) { + printf("s%u = ", target_fpreg.code()); + } else { + printf("d%u = ", target_fpreg.code()); + } + for (uint64_t i = 1; i <= count; i++) { + uint64_t data = fpreg_value >> (fpreg_size - (i * format_size)); + data &= mask; + format->PrintData(&data); + printf(" "); + } + printf("\n"); +} + + +void Debugger::VisitException(const Instruction* instr) { + switch (instr->Mask(ExceptionMask)) { + case BRK: + DoBreakpoint(instr); + return; + case HLT: + VIXL_FALLTHROUGH(); + default: Simulator::VisitException(instr); + } +} + + +// Read a command. A command will be at most kMaxDebugShellLine char long and +// ends with '\n\0'. +// TODO: Should this be a utility function? +char* Debugger::ReadCommandLine(const char* prompt, char* buffer, int length) { + int fgets_calls = 0; + char* end = NULL; + + printf("%s", prompt); + fflush(stdout); + + do { + if (fgets(buffer, length, stdin) == NULL) { + printf(" ** Error while reading command. **\n"); + return NULL; + } + + fgets_calls++; + end = strchr(buffer, '\n'); + } while (end == NULL); + + if (fgets_calls != 1) { + printf(" ** Command too long. **\n"); + return NULL; + } + + // Remove the newline from the end of the command. + VIXL_ASSERT(end[1] == '\0'); + VIXL_ASSERT((end - buffer) < (length - 1)); + end[0] = '\0'; + + return buffer; +} + + +void Debugger::RunDebuggerShell() { + if (IsDebuggerRunning()) { + if (steps_ > 0) { + // Finish stepping first. + --steps_; + return; + } + + printf("Next: "); + PrintInstructions(pc()); + bool done = false; + while (!done) { + char buffer[kMaxDebugShellLine]; + char* line = ReadCommandLine("vixl> ", buffer, kMaxDebugShellLine); + + if (line == NULL) continue; // An error occurred. + + DebugCommand* command = DebugCommand::Parse(line); + if (command != NULL) { + last_command_ = command; + } + + if (last_command_ != NULL) { + done = last_command_->Run(this); + } else { + printf("No previous command to run!\n"); + } + } + + if ((debug_parameters_ & DBG_BREAK) != 0) { + // The break request has now been handled, move to next instruction. + debug_parameters_ &= ~DBG_BREAK; + increment_pc(); + } + } +} + + +void Debugger::DoBreakpoint(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ExceptionMask) == BRK); + + printf("Hit breakpoint at pc=%p.\n", reinterpret_cast<const void*>(instr)); + set_debug_parameters(debug_parameters() | DBG_BREAK | DBG_ACTIVE); + // Make the shell point to the brk instruction. + set_pc(instr); +} + + +static bool StringToUInt64(uint64_t* value, const char* line, int base = 10) { + char* endptr = NULL; + errno = 0; // Reset errors. + uint64_t parsed = strtoul(line, &endptr, base); + + if (errno == ERANGE) { + // Overflow. + return false; + } + + if (endptr == line) { + // No digits were parsed. + return false; + } + + if (*endptr != '\0') { + // Non-digit characters present at the end. + return false; + } + + *value = parsed; + return true; +} + + +static bool StringToInt64(int64_t* value, const char* line, int base = 10) { + char* endptr = NULL; + errno = 0; // Reset errors. + int64_t parsed = strtol(line, &endptr, base); + + if (errno == ERANGE) { + // Overflow, undeflow. + return false; + } + + if (endptr == line) { + // No digits were parsed. + return false; + } + + if (*endptr != '\0') { + // Non-digit characters present at the end. + return false; + } + + *value = parsed; + return true; +} + + +Token* Token::Tokenize(const char* arg) { + if ((arg == NULL) || (*arg == '\0')) { + return NULL; + } + + // The order is important. For example Identifier::Tokenize would consider + // any register to be a valid identifier. + + Token* token = RegisterToken::Tokenize(arg); + if (token != NULL) { + return token; + } + + token = FPRegisterToken::Tokenize(arg); + if (token != NULL) { + return token; + } + + token = IdentifierToken::Tokenize(arg); + if (token != NULL) { + return token; + } + + token = AddressToken::Tokenize(arg); + if (token != NULL) { + return token; + } + + token = IntegerToken::Tokenize(arg); + if (token != NULL) { + return token; + } + + return js_new<UnknownToken>(arg); +} + + +uint8_t* RegisterToken::ToAddress(Debugger* debugger) const { + VIXL_ASSERT(CanAddressMemory()); + uint64_t reg_value = debugger->xreg(value().code(), Reg31IsStackPointer); + uint8_t* address = NULL; + memcpy(&address, ®_value, sizeof(address)); + return address; +} + + +void RegisterToken::Print(FILE* out) const { + VIXL_ASSERT(value().IsValid()); + fprintf(out, "[Register %s]", Name()); +} + + +const char* RegisterToken::Name() const { + if (value().Is32Bits()) { + return kWAliases[value().code()][0]; + } else { + return kXAliases[value().code()][0]; + } +} + + +Token* RegisterToken::Tokenize(const char* arg) { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + // Is it a X register or alias? + for (const char** current = kXAliases[i]; *current != NULL; current++) { + if (strcmp(arg, *current) == 0) { + return js_new<RegisterToken>(Register::XRegFromCode(i)); + } + } + + // Is it a W register or alias? + for (const char** current = kWAliases[i]; *current != NULL; current++) { + if (strcmp(arg, *current) == 0) { + return js_new<RegisterToken>(Register::WRegFromCode(i)); + } + } + } + + return NULL; +} + + +void FPRegisterToken::Print(FILE* out) const { + VIXL_ASSERT(value().IsValid()); + char prefix = value().Is32Bits() ? 's' : 'd'; + fprintf(out, "[FPRegister %c%" PRIu32 "]", prefix, value().code()); +} + + +Token* FPRegisterToken::Tokenize(const char* arg) { + if (strlen(arg) < 2) { + return NULL; + } + + switch (*arg) { + case 's': + case 'd': + const char* cursor = arg + 1; + uint64_t code = 0; + if (!StringToUInt64(&code, cursor)) { + return NULL; + } + + if (code > kNumberOfFPRegisters) { + return NULL; + } + + VRegister fpreg = NoVReg; + switch (*arg) { + case 's': + fpreg = VRegister::SRegFromCode(static_cast<unsigned>(code)); + break; + case 'd': + fpreg = VRegister::DRegFromCode(static_cast<unsigned>(code)); + break; + default: VIXL_UNREACHABLE(); + } + + return js_new<FPRegisterToken>(fpreg); + } + + return NULL; +} + + +uint8_t* IdentifierToken::ToAddress(Debugger* debugger) const { + VIXL_ASSERT(CanAddressMemory()); + const Instruction* pc_value = debugger->pc(); + uint8_t* address = NULL; + memcpy(&address, &pc_value, sizeof(address)); + return address; +} + +void IdentifierToken::Print(FILE* out) const { + fprintf(out, "[Identifier %s]", value()); +} + + +Token* IdentifierToken::Tokenize(const char* arg) { + if (!isalpha(arg[0])) { + return NULL; + } + + const char* cursor = arg + 1; + while ((*cursor != '\0') && isalnum(*cursor)) { + ++cursor; + } + + if (*cursor == '\0') { + return js_new<IdentifierToken>(arg); + } + + return NULL; +} + + +uint8_t* AddressToken::ToAddress(Debugger* debugger) const { + USE(debugger); + return value(); +} + + +void AddressToken::Print(FILE* out) const { + fprintf(out, "[Address %p]", value()); +} + + +Token* AddressToken::Tokenize(const char* arg) { + if ((strlen(arg) < 3) || (arg[0] != '0') || (arg[1] != 'x')) { + return NULL; + } + + uint64_t ptr = 0; + if (!StringToUInt64(&ptr, arg, 16)) { + return NULL; + } + + uint8_t* address = reinterpret_cast<uint8_t*>(ptr); + return js_new<AddressToken>(address); +} + + +void IntegerToken::Print(FILE* out) const { + fprintf(out, "[Integer %" PRId64 "]", value()); +} + + +Token* IntegerToken::Tokenize(const char* arg) { + int64_t value = 0; + if (!StringToInt64(&value, arg)) { + return NULL; + } + + return js_new<IntegerToken>(value); +} + + +Token* FormatToken::Tokenize(const char* arg) { + size_t length = strlen(arg); + switch (arg[0]) { + case 'x': + case 's': + case 'u': + case 'f': + if (length == 1) return NULL; + break; + case 'i': + if (length == 1) return js_new<Format<uint32_t>>("%08" PRIx32, 'i'); + VIXL_FALLTHROUGH(); + default: return NULL; + } + + char* endptr = NULL; + errno = 0; // Reset errors. + uint64_t count = strtoul(arg + 1, &endptr, 10); + + if (errno != 0) { + // Overflow, etc. + return NULL; + } + + if (endptr == arg) { + // No digits were parsed. + return NULL; + } + + if (*endptr != '\0') { + // There are unexpected (non-digit) characters after the number. + return NULL; + } + + switch (arg[0]) { + case 'x': + switch (count) { + case 8: return js_new<Format<uint8_t>>("%02" PRIx8, 'x'); + case 16: return js_new<Format<uint16_t>>("%04" PRIx16, 'x'); + case 32: return js_new<Format<uint32_t>>("%08" PRIx32, 'x'); + case 64: return js_new<Format<uint64_t>>("%016" PRIx64, 'x'); + default: return NULL; + } + case 's': + switch (count) { + case 8: return js_new<Format<int8_t>>("%4" PRId8, 's'); + case 16: return js_new<Format<int16_t>>("%6" PRId16, 's'); + case 32: return js_new<Format<int32_t>>("%11" PRId32, 's'); + case 64: return js_new<Format<int64_t>>("%20" PRId64, 's'); + default: return NULL; + } + case 'u': + switch (count) { + case 8: return js_new<Format<uint8_t>>("%3" PRIu8, 'u'); + case 16: return js_new<Format<uint16_t>>("%5" PRIu16, 'u'); + case 32: return js_new<Format<uint32_t>>("%10" PRIu32, 'u'); + case 64: return js_new<Format<uint64_t>>("%20" PRIu64, 'u'); + default: return NULL; + } + case 'f': + switch (count) { + case 32: return js_new<Format<float>>("%13g", 'f'); + case 64: return js_new<Format<double>>("%13g", 'f'); + default: return NULL; + } + default: + VIXL_UNREACHABLE(); + return NULL; + } +} + + +template<typename T> +void Format<T>::Print(FILE* out) const { + unsigned size = sizeof(T) * 8; + fprintf(out, "[Format %c%u - %s]", type_code_, size, fmt_); +} + + +void UnknownToken::Print(FILE* out) const { + fprintf(out, "[Unknown %s]", unknown_); +} + + +void DebugCommand::Print(FILE* out) { + fprintf(out, "%s", name()); +} + + +bool DebugCommand::Match(const char* name, const char** aliases) { + for (const char** current = aliases; *current != NULL; current++) { + if (strcmp(name, *current) == 0) { + return true; + } + } + + return false; +} + + +DebugCommand* DebugCommand::Parse(char* line) { + TokenVector args; + + for (char* chunk = strtok(line, " \t"); + chunk != NULL; + chunk = strtok(NULL, " \t")) { + char* dot = strchr(chunk, '.'); + if (dot != NULL) { + // 'Token.format'. + Token* format = FormatToken::Tokenize(dot + 1); + if (format != NULL) { + *dot = '\0'; + args.append(Token::Tokenize(chunk)); + args.append(format); + } else { + // Error while parsing the format, push the UnknownToken so an error + // can be accurately reported. + args.append(Token::Tokenize(chunk)); + } + } else { + args.append(Token::Tokenize(chunk)); + } + } + + if (args.empty()) { + return NULL; + } + + if (!args[0]->IsIdentifier()) { + return js_new<InvalidCommand>(Move(args), 0, "command name is not valid"); + } + + const char* name = IdentifierToken::Cast(args[0])->value(); + #define RETURN_IF_MATCH(Command) \ + if (Match(name, Command::kAliases)) { \ + return Command::Build(Move(args)); \ + } + DEBUG_COMMAND_LIST(RETURN_IF_MATCH); + #undef RETURN_IF_MATCH + + return js_new<UnknownCommand>(Move(args)); +} + + +void DebugCommand::PrintHelp(const char** aliases, + const char* args, + const char* help) { + VIXL_ASSERT(aliases[0] != NULL); + VIXL_ASSERT(help != NULL); + + printf("\n----\n\n"); + for (const char** current = aliases; *current != NULL; current++) { + if (args != NULL) { + printf("%s %s\n", *current, args); + } else { + printf("%s\n", *current); + } + } + printf("\n%s\n", help); +} + + +bool HelpCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + USE(debugger); + + #define PRINT_HELP(Command) \ + DebugCommand::PrintHelp(Command::kAliases, \ + Command::kArguments, \ + Command::kHelp); + DEBUG_COMMAND_LIST(PRINT_HELP); + #undef PRINT_HELP + printf("\n----\n\n"); + + return false; +} + + +DebugCommand* HelpCommand::Build(TokenVector&& args) { + if (args.length() != 1) { + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + return js_new<HelpCommand>(args[0]); +} + + +bool ContinueCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + + debugger->set_debug_parameters(debugger->debug_parameters() & ~DBG_ACTIVE); + return true; +} + + +DebugCommand* ContinueCommand::Build(TokenVector&& args) { + if (args.length() != 1) { + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + return js_new<ContinueCommand>(args[0]); +} + + +bool StepCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + + int64_t steps = count(); + if (steps < 0) { + printf(" ** invalid value for steps: %" PRId64 " (<0) **\n", steps); + } else if (steps > 1) { + debugger->set_steps(steps - 1); + } + + return true; +} + + +void StepCommand::Print(FILE* out) { + fprintf(out, "%s %" PRId64 "", name(), count()); +} + + +DebugCommand* StepCommand::Build(TokenVector&& args) { + IntegerToken* count = NULL; + switch (args.length()) { + case 1: { // step [1] + count = js_new<IntegerToken>(1); + break; + } + case 2: { // step n + Token* first = args[1]; + if (!first->IsInteger()) { + return js_new<InvalidCommand>(Move(args), 1, "expects int"); + } + count = IntegerToken::Cast(first); + break; + } + default: + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + return js_new<StepCommand>(args[0], count); +} + + +DebugCommand* DisasmCommand::Build(TokenVector&& args) { + IntegerToken* count = NULL; + switch (args.length()) { + case 1: { // disasm [10] + count = js_new<IntegerToken>(10); + break; + } + case 2: { // disasm n + Token* first = args[1]; + if (!first->IsInteger()) { + return js_new<InvalidCommand>(Move(args), 1, "expects int"); + } + + count = IntegerToken::Cast(first); + break; + } + default: + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + Token* target = js_new<IdentifierToken>("pc"); + FormatToken* format = js_new<Format<uint32_t>>("%08" PRIx32, 'i'); + return js_new<ExamineCommand>(args[0], target, format, count); +} + + +void PrintCommand::Print(FILE* out) { + fprintf(out, "%s ", name()); + target()->Print(out); + if (format() != NULL) format()->Print(out); +} + + +bool PrintCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + + Token* tok = target(); + if (tok->IsIdentifier()) { + char* identifier = IdentifierToken::Cast(tok)->value(); + if (strcmp(identifier, "regs") == 0) { + debugger->PrintRegisters(); + } else if (strcmp(identifier, "fpregs") == 0) { + debugger->PrintVRegisters(); + } else if (strcmp(identifier, "sysregs") == 0) { + debugger->PrintSystemRegisters(); + } else if (strcmp(identifier, "pc") == 0) { + printf("pc = %16p\n", reinterpret_cast<const void*>(debugger->pc())); + } else { + printf(" ** Unknown identifier to print: %s **\n", identifier); + } + + return false; + } + + FormatToken* format_tok = format(); + VIXL_ASSERT(format_tok != NULL); + if (format_tok->type_code() == 'i') { + // TODO(all): Add support for instruction disassembly. + printf(" ** unsupported format: instructions **\n"); + return false; + } + + if (tok->IsRegister()) { + RegisterToken* reg_tok = RegisterToken::Cast(tok); + Register reg = reg_tok->value(); + debugger->PrintRegister(reg, reg_tok->Name(), format_tok); + return false; + } + + if (tok->IsFPRegister()) { + FPRegister fpreg = FPRegisterToken::Cast(tok)->value(); + debugger->PrintFPRegister(fpreg, format_tok); + return false; + } + + VIXL_UNREACHABLE(); + return false; +} + + +DebugCommand* PrintCommand::Build(TokenVector&& args) { + if (args.length() < 2) { + return js_new<InvalidCommand>(Move(args), -1, "too few arguments"); + } + + Token* target = args[1]; + if (!target->IsRegister() && + !target->IsFPRegister() && + !target->IsIdentifier()) { + return js_new<InvalidCommand>(Move(args), 1, "expects reg or identifier"); + } + + FormatToken* format = NULL; + int target_size = 0; + if (target->IsRegister()) { + Register reg = RegisterToken::Cast(target)->value(); + target_size = reg.SizeInBytes(); + } else if (target->IsFPRegister()) { + FPRegister fpreg = FPRegisterToken::Cast(target)->value(); + target_size = fpreg.SizeInBytes(); + } + // If the target is an identifier there must be no format. This is checked + // in the switch statement below. + + switch (args.length()) { + case 2: { + if (target->IsRegister()) { + switch (target_size) { + case 4: format = js_new<Format<uint32_t>>("%08" PRIx32, 'x'); break; + case 8: format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); break; + default: VIXL_UNREACHABLE(); + } + } else if (target->IsFPRegister()) { + switch (target_size) { + case 4: format = js_new<Format<float>>("%8g", 'f'); break; + case 8: format = js_new<Format<double>>("%8g", 'f'); break; + default: VIXL_UNREACHABLE(); + } + } + break; + } + case 3: { + if (target->IsIdentifier()) { + return js_new<InvalidCommand>(Move(args), 2, + "format is only allowed with registers"); + } + + Token* second = args[2]; + if (!second->IsFormat()) { + return js_new<InvalidCommand>(Move(args), 2, "expects format"); + } + format = FormatToken::Cast(second); + + if (format->SizeOf() > target_size) { + return js_new<InvalidCommand>(Move(args), 2, "format too wide"); + } + + break; + } + default: + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + return js_new<PrintCommand>(args[0], target, format); +} + + +bool ExamineCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + + uint8_t* address = target()->ToAddress(debugger); + int64_t amount = count()->value(); + if (format()->type_code() == 'i') { + debugger->PrintInstructions(address, amount); + } else { + debugger->PrintMemory(address, format(), amount); + } + + return false; +} + + +void ExamineCommand::Print(FILE* out) { + fprintf(out, "%s ", name()); + format()->Print(out); + target()->Print(out); +} + + +DebugCommand* ExamineCommand::Build(TokenVector&& args) { + if (args.length() < 2) { + return js_new<InvalidCommand>(Move(args), -1, "too few arguments"); + } + + Token* target = args[1]; + if (!target->CanAddressMemory()) { + return js_new<InvalidCommand>(Move(args), 1, "expects address"); + } + + FormatToken* format = NULL; + IntegerToken* count = NULL; + + switch (args.length()) { + case 2: { // mem addr[.x64] [10] + format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); + count = js_new<IntegerToken>(10); + break; + } + case 3: { // mem addr.format [10] + // mem addr[.x64] n + Token* second = args[2]; + if (second->IsFormat()) { + format = FormatToken::Cast(second); + count = js_new<IntegerToken>(10); + break; + } else if (second->IsInteger()) { + format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); + count = IntegerToken::Cast(second); + } else { + return js_new<InvalidCommand>(Move(args), 2, "expects format or integer"); + } + VIXL_UNREACHABLE(); + break; + } + case 4: { // mem addr.format n + Token* second = args[2]; + Token* third = args[3]; + if (!second->IsFormat() || !third->IsInteger()) { + return js_new<InvalidCommand>(Move(args), -1, "expects addr[.format] [n]"); + } + format = FormatToken::Cast(second); + count = IntegerToken::Cast(third); + break; + } + default: + return js_new<InvalidCommand>(Move(args), -1, "too many arguments"); + } + + return js_new<ExamineCommand>(args[0], target, format, count); +} + + +UnknownCommand::~UnknownCommand() { + const size_t size = args_.length(); + for (size_t i = 0; i < size; ++i) { + js_delete(args_[i]); + } +} + + +bool UnknownCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + USE(debugger); + + printf(" ** Unknown Command:"); + const size_t size = args_.length(); + for (size_t i = 0; i < size; ++i) { + printf(" "); + args_[i]->Print(stdout); + } + printf(" **\n"); + + return false; +} + + +InvalidCommand::~InvalidCommand() { + const size_t size = args_.length(); + for (size_t i = 0; i < size; ++i) { + js_delete(args_[i]); + } +} + + +bool InvalidCommand::Run(Debugger* debugger) { + VIXL_ASSERT(debugger->IsDebuggerRunning()); + USE(debugger); + + printf(" ** Invalid Command:"); + const size_t size = args_.length(); + for (size_t i = 0; i < size; ++i) { + printf(" "); + if (i == static_cast<size_t>(index_)) { + printf(">>"); + args_[i]->Print(stdout); + printf("<<"); + } else { + args_[i]->Print(stdout); + } + } + printf(" **\n"); + printf(" ** %s\n", cause_); + + return false; +} + +} // namespace vixl + +#endif // JS_SIMULATOR_ARM64 diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.h b/js/src/jit/arm64/vixl/Debugger-vixl.h new file mode 100644 index 000000000..be2b3d9cf --- /dev/null +++ b/js/src/jit/arm64/vixl/Debugger-vixl.h @@ -0,0 +1,117 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef JS_SIMULATOR_ARM64 + +#ifndef VIXL_A64_DEBUGGER_A64_H_ +#define VIXL_A64_DEBUGGER_A64_H_ + +#include <ctype.h> +#include <errno.h> +#include <limits.h> + +#include "jit/arm64/vixl/Constants-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Simulator-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { + +// Flags that represent the debugger state. +enum DebugParameters { + DBG_INACTIVE = 0, + DBG_ACTIVE = 1 << 0, // The debugger is active. + DBG_BREAK = 1 << 1 // The debugger is at a breakpoint. +}; + +// Forward declarations. +class DebugCommand; +class Token; +class FormatToken; + +class Debugger : public Simulator { + public: + explicit Debugger(Decoder* decoder, FILE* stream = stdout); + ~Debugger(); + + virtual void Run(); + virtual void VisitException(const Instruction* instr); + + int debug_parameters() const { return debug_parameters_; } + void set_debug_parameters(int parameters) { + debug_parameters_ = parameters; + + update_pending_request(); + } + + // Numbers of instructions to execute before the debugger shell is given + // back control. + int64_t steps() const { return steps_; } + void set_steps(int64_t value) { + VIXL_ASSERT(value > 1); + steps_ = value; + } + + bool IsDebuggerRunning() const { + return (debug_parameters_ & DBG_ACTIVE) != 0; + } + + bool pending_request() const { return pending_request_; } + void update_pending_request() { + pending_request_ = IsDebuggerRunning(); + } + + void PrintInstructions(const void* address, int64_t count = 1); + void PrintMemory(const uint8_t* address, + const FormatToken* format, + int64_t count = 1); + void PrintRegister(const Register& target_reg, + const char* name, + const FormatToken* format); + void PrintFPRegister(const FPRegister& target_fpreg, + const FormatToken* format); + + private: + char* ReadCommandLine(const char* prompt, char* buffer, int length); + void RunDebuggerShell(); + void DoBreakpoint(const Instruction* instr); + + int debug_parameters_; + bool pending_request_; + int64_t steps_; + DebugCommand* last_command_; + PrintDisassembler* disasm_; + Decoder* printer_; + + // Length of the biggest command line accepted by the debugger shell. + static const int kMaxDebugShellLine = 256; +}; + +} // namespace vixl + +#endif // VIXL_A64_DEBUGGER_A64_H_ + +#endif // JS_SIMULATOR_ARM64 diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.cpp b/js/src/jit/arm64/vixl/Decoder-vixl.cpp new file mode 100644 index 000000000..5865689ae --- /dev/null +++ b/js/src/jit/arm64/vixl/Decoder-vixl.cpp @@ -0,0 +1,874 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Decoder-vixl.h" + +#include <algorithm> + +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { + +void Decoder::DecodeInstruction(const Instruction *instr) { + if (instr->Bits(28, 27) == 0) { + VisitUnallocated(instr); + } else { + switch (instr->Bits(27, 24)) { + // 0: PC relative addressing. + case 0x0: DecodePCRelAddressing(instr); break; + + // 1: Add/sub immediate. + case 0x1: DecodeAddSubImmediate(instr); break; + + // A: Logical shifted register. + // Add/sub with carry. + // Conditional compare register. + // Conditional compare immediate. + // Conditional select. + // Data processing 1 source. + // Data processing 2 source. + // B: Add/sub shifted register. + // Add/sub extended register. + // Data processing 3 source. + case 0xA: + case 0xB: DecodeDataProcessing(instr); break; + + // 2: Logical immediate. + // Move wide immediate. + case 0x2: DecodeLogical(instr); break; + + // 3: Bitfield. + // Extract. + case 0x3: DecodeBitfieldExtract(instr); break; + + // 4: Unconditional branch immediate. + // Exception generation. + // Compare and branch immediate. + // 5: Compare and branch immediate. + // Conditional branch. + // System. + // 6,7: Unconditional branch. + // Test and branch immediate. + case 0x4: + case 0x5: + case 0x6: + case 0x7: DecodeBranchSystemException(instr); break; + + // 8,9: Load/store register pair post-index. + // Load register literal. + // Load/store register unscaled immediate. + // Load/store register immediate post-index. + // Load/store register immediate pre-index. + // Load/store register offset. + // Load/store exclusive. + // C,D: Load/store register pair offset. + // Load/store register pair pre-index. + // Load/store register unsigned immediate. + // Advanced SIMD. + case 0x8: + case 0x9: + case 0xC: + case 0xD: DecodeLoadStore(instr); break; + + // E: FP fixed point conversion. + // FP integer conversion. + // FP data processing 1 source. + // FP compare. + // FP immediate. + // FP data processing 2 source. + // FP conditional compare. + // FP conditional select. + // Advanced SIMD. + // F: FP data processing 3 source. + // Advanced SIMD. + case 0xE: + case 0xF: DecodeFP(instr); break; + } + } +} + +void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { + visitors_.append(new_visitor); +} + + +void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { + visitors_.insert(visitors_.begin(), new_visitor); +} + + +void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + for (auto it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list without finding registered_visitor. + visitors_.append(new_visitor); +} + + +void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + for (auto it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + it++; + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list without finding registered_visitor. + visitors_.append(new_visitor); +} + + +void Decoder::RemoveVisitor(DecoderVisitor* visitor) { + visitors_.erase(std::remove(visitors_.begin(), visitors_.end(), visitor), + visitors_.end()); +} + + +void Decoder::DecodePCRelAddressing(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x0); + // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level + // decode. + VIXL_ASSERT(instr->Bit(28) == 0x1); + VisitPCRelAddressing(instr); +} + + +void Decoder::DecodeBranchSystemException(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0x4) || + (instr->Bits(27, 24) == 0x5) || + (instr->Bits(27, 24) == 0x6) || + (instr->Bits(27, 24) == 0x7) ); + + switch (instr->Bits(31, 29)) { + case 0: + case 4: { + VisitUnconditionalBranch(instr); + break; + } + case 1: + case 5: { + if (instr->Bit(25) == 0) { + VisitCompareBranch(instr); + } else { + VisitTestBranch(instr); + } + break; + } + case 2: { + if (instr->Bit(25) == 0) { + if ((instr->Bit(24) == 0x1) || + (instr->Mask(0x01000010) == 0x00000010)) { + VisitUnallocated(instr); + } else { + VisitConditionalBranch(instr); + } + } else { + VisitUnallocated(instr); + } + break; + } + case 6: { + if (instr->Bit(25) == 0) { + if (instr->Bit(24) == 0) { + if ((instr->Bits(4, 2) != 0) || + (instr->Mask(0x00E0001D) == 0x00200001) || + (instr->Mask(0x00E0001D) == 0x00400001) || + (instr->Mask(0x00E0001E) == 0x00200002) || + (instr->Mask(0x00E0001E) == 0x00400002) || + (instr->Mask(0x00E0001C) == 0x00600000) || + (instr->Mask(0x00E0001C) == 0x00800000) || + (instr->Mask(0x00E0001F) == 0x00A00000) || + (instr->Mask(0x00C0001C) == 0x00C00000)) { + VisitUnallocated(instr); + } else { + VisitException(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); + if ((instr->Bits(21, 19) == 0x4) || + (masked_003FF0E0 == 0x00033000) || + (masked_003FF0E0 == 0x003FF020) || + (masked_003FF0E0 == 0x003FF060) || + (masked_003FF0E0 == 0x003FF0E0) || + (instr->Mask(0x00388000) == 0x00008000) || + (instr->Mask(0x0038E000) == 0x00000000) || + (instr->Mask(0x0039E000) == 0x00002000) || + (instr->Mask(0x003AE000) == 0x00002000) || + (instr->Mask(0x003CE000) == 0x00042000) || + (instr->Mask(0x003FFFC0) == 0x000320C0) || + (instr->Mask(0x003FF100) == 0x00032100) || + (instr->Mask(0x003FF200) == 0x00032200) || + (instr->Mask(0x003FF400) == 0x00032400) || + (instr->Mask(0x003FF800) == 0x00032800) || + (instr->Mask(0x0038F000) == 0x00005000) || + (instr->Mask(0x0038E000) == 0x00006000)) { + VisitUnallocated(instr); + } else { + VisitSystem(instr); + } + } else { + VisitUnallocated(instr); + } + } + } else { + if ((instr->Bit(24) == 0x1) || + (instr->Bits(20, 16) != 0x1F) || + (instr->Bits(15, 10) != 0) || + (instr->Bits(4, 0) != 0) || + (instr->Bits(24, 21) == 0x3) || + (instr->Bits(24, 22) == 0x3)) { + VisitUnallocated(instr); + } else { + VisitUnconditionalBranchToRegister(instr); + } + } + break; + } + case 3: + case 7: { + VisitUnallocated(instr); + break; + } + } +} + + +void Decoder::DecodeLoadStore(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0x8) || + (instr->Bits(27, 24) == 0x9) || + (instr->Bits(27, 24) == 0xC) || + (instr->Bits(27, 24) == 0xD) ); + // TODO(all): rearrange the tree to integrate this branch. + if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) { + DecodeNEONLoadStore(instr); + return; + } + + if (instr->Bit(24) == 0) { + if (instr->Bit(28) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(26) == 0) { + VisitLoadStoreExclusive(instr); + } else { + VIXL_UNREACHABLE(); + } + } else { + if ((instr->Bits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + if (instr->Mask(0xC4400000) == 0xC0400000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePairNonTemporal(instr); + } + } else { + VisitLoadStorePairPostIndex(instr); + } + } + } + } else { + if (instr->Bit(29) == 0) { + if (instr->Mask(0xC4000000) == 0xC4000000) { + VisitUnallocated(instr); + } else { + VisitLoadLiteral(instr); + } + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(21) == 0) { + switch (instr->Bits(11, 10)) { + case 0: { + VisitLoadStoreUnscaledOffset(instr); + break; + } + case 1: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePostIndex(instr); + } + break; + } + case 2: { + // TODO: VisitLoadStoreRegisterOffsetUnpriv. + VisitUnimplemented(instr); + break; + } + case 3: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePreIndex(instr); + } + break; + } + } + } else { + if (instr->Bits(11, 10) == 0x2) { + if (instr->Bit(14) == 0) { + VisitUnallocated(instr); + } else { + VisitLoadStoreRegisterOffset(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } + } + } else { + if (instr->Bit(28) == 0) { + if (instr->Bit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->Bits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + VisitLoadStorePairOffset(instr); + } else { + VisitLoadStorePairPreIndex(instr); + } + } + } + } else { + if (instr->Bit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + VisitLoadStoreUnsignedOffset(instr); + } + } + } + } +} + + +void Decoder::DecodeLogical(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x2); + + if (instr->Mask(0x80400000) == 0x00400000) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + VisitLogicalImmediate(instr); + } else { + if (instr->Bits(30, 29) == 0x1) { + VisitUnallocated(instr); + } else { + VisitMoveWideImmediate(instr); + } + } + } +} + + +void Decoder::DecodeBitfieldExtract(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x3); + + if ((instr->Mask(0x80400000) == 0x80000000) || + (instr->Mask(0x80400000) == 0x00400000) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else if (instr->Bit(23) == 0) { + if ((instr->Mask(0x80200000) == 0x00200000) || + (instr->Mask(0x60000000) == 0x60000000)) { + VisitUnallocated(instr); + } else { + VisitBitfield(instr); + } + } else { + if ((instr->Mask(0x60200000) == 0x00200000) || + (instr->Mask(0x60000000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitExtract(instr); + } + } +} + + +void Decoder::DecodeAddSubImmediate(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x1); + if (instr->Bit(23) == 1) { + VisitUnallocated(instr); + } else { + VisitAddSubImmediate(instr); + } +} + + +void Decoder::DecodeDataProcessing(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0xA) || + (instr->Bits(27, 24) == 0xB)); + + if (instr->Bit(24) == 0) { + if (instr->Bit(28) == 0) { + if (instr->Mask(0x80008000) == 0x00008000) { + VisitUnallocated(instr); + } else { + VisitLogicalShifted(instr); + } + } else { + switch (instr->Bits(23, 21)) { + case 0: { + if (instr->Mask(0x0000FC00) != 0) { + VisitUnallocated(instr); + } else { + VisitAddSubWithCarry(instr); + } + break; + } + case 2: { + if ((instr->Bit(29) == 0) || + (instr->Mask(0x00000410) != 0)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(11) == 0) { + VisitConditionalCompareRegister(instr); + } else { + VisitConditionalCompareImmediate(instr); + } + } + break; + } + case 4: { + if (instr->Mask(0x20000800) != 0x00000000) { + VisitUnallocated(instr); + } else { + VisitConditionalSelect(instr); + } + break; + } + case 6: { + if (instr->Bit(29) == 0x1) { + VisitUnallocated(instr); + VIXL_FALLTHROUGH(); + } else { + if (instr->Bit(30) == 0) { + if ((instr->Bit(15) == 0x1) || + (instr->Bits(15, 11) == 0) || + (instr->Bits(15, 12) == 0x1) || + (instr->Bits(15, 12) == 0x3) || + (instr->Bits(15, 13) == 0x3) || + (instr->Mask(0x8000EC00) == 0x00004C00) || + (instr->Mask(0x8000E800) == 0x80004000) || + (instr->Mask(0x8000E400) == 0x80004000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing2Source(instr); + } + } else { + if ((instr->Bit(13) == 1) || + (instr->Bits(20, 16) != 0) || + (instr->Bits(15, 14) != 0) || + (instr->Mask(0xA01FFC00) == 0x00000C00) || + (instr->Mask(0x201FF800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing1Source(instr); + } + } + break; + } + } + case 1: + case 3: + case 5: + case 7: VisitUnallocated(instr); break; + } + } + } else { + if (instr->Bit(28) == 0) { + if (instr->Bit(21) == 0) { + if ((instr->Bits(23, 22) == 0x3) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else { + VisitAddSubShifted(instr); + } + } else { + if ((instr->Mask(0x00C00000) != 0x00000000) || + (instr->Mask(0x00001400) == 0x00001400) || + (instr->Mask(0x00001800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitAddSubExtended(instr); + } + } + } else { + if ((instr->Bit(30) == 0x1) || + (instr->Bits(30, 29) == 0x1) || + (instr->Mask(0xE0600000) == 0x00200000) || + (instr->Mask(0xE0608000) == 0x00400000) || + (instr->Mask(0x60608000) == 0x00408000) || + (instr->Mask(0x60E00000) == 0x00E00000) || + (instr->Mask(0x60E00000) == 0x00800000) || + (instr->Mask(0x60E00000) == 0x00600000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing3Source(instr); + } + } + } +} + + +void Decoder::DecodeFP(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0xE) || + (instr->Bits(27, 24) == 0xF)); + if (instr->Bit(28) == 0) { + DecodeNEONVectorDataProcessing(instr); + } else { + if (instr->Bits(31, 30) == 0x3) { + VisitUnallocated(instr); + } else if (instr->Bits(31, 30) == 0x1) { + DecodeNEONScalarDataProcessing(instr); + } else { + if (instr->Bit(29) == 0) { + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if ((instr->Bit(23) == 1) || + (instr->Bit(18) == 1) || + (instr->Mask(0x80008000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x00160000) == 0x00000000) || + (instr->Mask(0x00160000) == 0x00120000)) { + VisitUnallocated(instr); + } else { + VisitFPFixedPointConvert(instr); + } + } else { + if (instr->Bits(15, 10) == 32) { + VisitUnallocated(instr); + } else if (instr->Bits(15, 10) == 0) { + if ((instr->Bits(23, 22) == 0x3) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x000E0000) == 0x000C0000) || + (instr->Mask(0x00160000) == 0x00120000) || + (instr->Mask(0x00160000) == 0x00140000) || + (instr->Mask(0x20C40000) == 0x00800000) || + (instr->Mask(0x20C60000) == 0x00840000) || + (instr->Mask(0xA0C60000) == 0x80060000) || + (instr->Mask(0xA0C60000) == 0x00860000) || + (instr->Mask(0xA0C60000) == 0x00460000) || + (instr->Mask(0xA0CE0000) == 0x80860000) || + (instr->Mask(0xA0CE0000) == 0x804E0000) || + (instr->Mask(0xA0CE0000) == 0x000E0000) || + (instr->Mask(0xA0D60000) == 0x00160000) || + (instr->Mask(0xA0D60000) == 0x80560000) || + (instr->Mask(0xA0D60000) == 0x80960000)) { + VisitUnallocated(instr); + } else { + VisitFPIntegerConvert(instr); + } + } else if (instr->Bits(14, 10) == 16) { + const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); + if ((instr->Mask(0x80180000) != 0) || + (masked_A0DF8000 == 0x00020000) || + (masked_A0DF8000 == 0x00030000) || + (masked_A0DF8000 == 0x00068000) || + (masked_A0DF8000 == 0x00428000) || + (masked_A0DF8000 == 0x00430000) || + (masked_A0DF8000 == 0x00468000) || + (instr->Mask(0xA0D80000) == 0x00800000) || + (instr->Mask(0xA0DE0000) == 0x00C00000) || + (instr->Mask(0xA0DF0000) == 0x00C30000) || + (instr->Mask(0xA0DC0000) == 0x00C40000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing1Source(instr); + } + } else if (instr->Bits(13, 10) == 8) { + if ((instr->Bits(15, 14) != 0) || + (instr->Bits(2, 0) != 0) || + (instr->Mask(0x80800000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitFPCompare(instr); + } + } else if (instr->Bits(12, 10) == 4) { + if ((instr->Bits(9, 5) != 0) || + (instr->Mask(0x80800000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitFPImmediate(instr); + } + } else { + if (instr->Mask(0x80800000) != 0x00000000) { + VisitUnallocated(instr); + } else { + switch (instr->Bits(11, 10)) { + case 1: { + VisitFPConditionalCompare(instr); + break; + } + case 2: { + if ((instr->Bits(15, 14) == 0x3) || + (instr->Mask(0x00009000) == 0x00009000) || + (instr->Mask(0x0000A000) == 0x0000A000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing2Source(instr); + } + break; + } + case 3: { + VisitFPConditionalSelect(instr); + break; + } + default: VIXL_UNREACHABLE(); + } + } + } + } + } else { + // Bit 30 == 1 has been handled earlier. + VIXL_ASSERT(instr->Bit(30) == 0); + if (instr->Mask(0xA0800000) != 0) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing3Source(instr); + } + } + } else { + VisitUnallocated(instr); + } + } + } +} + + +void Decoder::DecodeNEONLoadStore(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(29, 25) == 0x6); + if (instr->Bit(31) == 0) { + if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) { + VisitUnallocated(instr); + return; + } + + if (instr->Bit(23) == 0) { + if (instr->Bits(20, 16) == 0) { + if (instr->Bit(24) == 0) { + VisitNEONLoadStoreMultiStruct(instr); + } else { + VisitNEONLoadStoreSingleStruct(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->Bit(24) == 0) { + VisitNEONLoadStoreMultiStructPostIndex(instr); + } else { + VisitNEONLoadStoreSingleStructPostIndex(instr); + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(28, 25) == 0x7); + if (instr->Bit(31) == 0) { + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if (instr->Bit(15) == 0) { + if (instr->Bit(10) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(11) == 0) { + VisitNEONTable(instr); + } else { + VisitNEONPerm(instr); + } + } else { + VisitNEONExtract(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + VisitNEONCopy(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->Bit(10) == 0) { + if (instr->Bit(11) == 0) { + VisitNEON3Different(instr); + } else { + if (instr->Bits(18, 17) == 0) { + if (instr->Bit(20) == 0) { + if (instr->Bit(19) == 0) { + VisitNEON2RegMisc(instr); + } else { + if (instr->Bits(30, 29) == 0x2) { + VisitCryptoAES(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->Bit(19) == 0) { + VisitNEONAcrossLanes(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEON3Same(instr); + } + } + } else { + if (instr->Bit(10) == 0) { + VisitNEONByIndexedElement(instr); + } else { + if (instr->Bit(23) == 0) { + if (instr->Bits(22, 19) == 0) { + VisitNEONModifiedImmediate(instr); + } else { + VisitNEONShiftImmediate(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(28, 25) == 0xF); + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if (instr->Bit(15) == 0) { + if (instr->Bit(10) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(11) == 0) { + VisitCrypto3RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + VisitNEONScalarCopy(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->Bit(10) == 0) { + if (instr->Bit(11) == 0) { + VisitNEONScalar3Diff(instr); + } else { + if (instr->Bits(18, 17) == 0) { + if (instr->Bit(20) == 0) { + if (instr->Bit(19) == 0) { + VisitNEONScalar2RegMisc(instr); + } else { + if (instr->Bit(29) == 0) { + VisitCrypto2RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->Bit(19) == 0) { + VisitNEONScalarPairwise(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEONScalar3Same(instr); + } + } + } else { + if (instr->Bit(10) == 0) { + VisitNEONScalarByIndexedElement(instr); + } else { + if (instr->Bit(23) == 0) { + VisitNEONScalarShiftImmediate(instr); + } else { + VisitUnallocated(instr); + } + } + } +} + + +#define DEFINE_VISITOR_CALLERS(A) \ + void Decoder::Visit##A(const Instruction *instr) { \ + VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \ + for (auto visitor : visitors_) { \ + visitor->Visit##A(instr); \ + } \ + } +VISITOR_LIST(DEFINE_VISITOR_CALLERS) +#undef DEFINE_VISITOR_CALLERS +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.h b/js/src/jit/arm64/vixl/Decoder-vixl.h new file mode 100644 index 000000000..95dd589e8 --- /dev/null +++ b/js/src/jit/arm64/vixl/Decoder-vixl.h @@ -0,0 +1,274 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_DECODER_A64_H_ +#define VIXL_A64_DECODER_A64_H_ + +#include "mozilla/Vector.h" + +#include "jsalloc.h" + +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" + + +// List macro containing all visitors needed by the decoder class. + +#define VISITOR_LIST_THAT_RETURN(V) \ + V(PCRelAddressing) \ + V(AddSubImmediate) \ + V(LogicalImmediate) \ + V(MoveWideImmediate) \ + V(Bitfield) \ + V(Extract) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) \ + V(CompareBranch) \ + V(TestBranch) \ + V(ConditionalBranch) \ + V(System) \ + V(Exception) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPreIndex) \ + V(LoadStorePairNonTemporal) \ + V(LoadLiteral) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LoadStoreExclusive) \ + V(LogicalShifted) \ + V(AddSubShifted) \ + V(AddSubExtended) \ + V(AddSubWithCarry) \ + V(ConditionalCompareRegister) \ + V(ConditionalCompareImmediate) \ + V(ConditionalSelect) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPImmediate) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPIntegerConvert) \ + V(FPFixedPointConvert) \ + V(Crypto2RegSHA) \ + V(Crypto3RegSHA) \ + V(CryptoAES) \ + V(NEON2RegMisc) \ + V(NEON3Different) \ + V(NEON3Same) \ + V(NEONAcrossLanes) \ + V(NEONByIndexedElement) \ + V(NEONCopy) \ + V(NEONExtract) \ + V(NEONLoadStoreMultiStruct) \ + V(NEONLoadStoreMultiStructPostIndex) \ + V(NEONLoadStoreSingleStruct) \ + V(NEONLoadStoreSingleStructPostIndex) \ + V(NEONModifiedImmediate) \ + V(NEONScalar2RegMisc) \ + V(NEONScalar3Diff) \ + V(NEONScalar3Same) \ + V(NEONScalarByIndexedElement) \ + V(NEONScalarCopy) \ + V(NEONScalarPairwise) \ + V(NEONScalarShiftImmediate) \ + V(NEONShiftImmediate) \ + V(NEONTable) \ + V(NEONPerm) \ + +#define VISITOR_LIST_THAT_DONT_RETURN(V) \ + V(Unallocated) \ + V(Unimplemented) \ + +#define VISITOR_LIST(V) \ + VISITOR_LIST_THAT_RETURN(V) \ + VISITOR_LIST_THAT_DONT_RETURN(V) \ + +namespace vixl { + +// The Visitor interface. Disassembler and simulator (and other tools) +// must provide implementations for all of these functions. +class DecoderVisitor { + public: + enum VisitorConstness { + kConstVisitor, + kNonConstVisitor + }; + explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) + : constness_(constness) {} + + virtual ~DecoderVisitor() {} + + #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; + VISITOR_LIST(DECLARE) + #undef DECLARE + + bool IsConstVisitor() const { return constness_ == kConstVisitor; } + Instruction* MutableInstruction(const Instruction* instr) { + VIXL_ASSERT(!IsConstVisitor()); + return const_cast<Instruction*>(instr); + } + + private: + const VisitorConstness constness_; +}; + + +class Decoder { + public: + Decoder() {} + + // Top-level wrappers around the actual decoding function. + void Decode(const Instruction* instr) { + for (auto visitor : visitors_) { + VIXL_ASSERT(visitor->IsConstVisitor()); + } + DecodeInstruction(instr); + } + void Decode(Instruction* instr) { + DecodeInstruction(const_cast<const Instruction*>(instr)); + } + + // Register a new visitor class with the decoder. + // Decode() will call the corresponding visitor method from all registered + // visitor classes when decoding reaches the leaf node of the instruction + // decode tree. + // Visitors are called in order. + // A visitor can be registered multiple times. + // + // d.AppendVisitor(V1); + // d.AppendVisitor(V2); + // d.PrependVisitor(V2); + // d.AppendVisitor(V3); + // + // d.Decode(i); + // + // will call in order visitor methods in V2, V1, V2, V3. + void AppendVisitor(DecoderVisitor* visitor); + void PrependVisitor(DecoderVisitor* visitor); + // These helpers register `new_visitor` before or after the first instance of + // `registered_visiter` in the list. + // So if + // V1, V2, V1, V2 + // are registered in this order in the decoder, calls to + // d.InsertVisitorAfter(V3, V1); + // d.InsertVisitorBefore(V4, V2); + // will yield the order + // V1, V3, V4, V2, V1, V2 + // + // For more complex modifications of the order of registered visitors, one can + // directly access and modify the list of visitors via the `visitors()' + // accessor. + void InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + void InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + + // Remove all instances of a previously registered visitor class from the list + // of visitors stored by the decoder. + void RemoveVisitor(DecoderVisitor* visitor); + + #define DECLARE(A) void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) + #undef DECLARE + + + private: + // Decodes an instruction and calls the visitor functions registered with the + // Decoder class. + void DecodeInstruction(const Instruction* instr); + + // Decode the PC relative addressing instruction, and call the corresponding + // visitors. + // On entry, instruction bits 27:24 = 0x0. + void DecodePCRelAddressing(const Instruction* instr); + + // Decode the add/subtract immediate instruction, and call the correspoding + // visitors. + // On entry, instruction bits 27:24 = 0x1. + void DecodeAddSubImmediate(const Instruction* instr); + + // Decode the branch, system command, and exception generation parts of + // the instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. + void DecodeBranchSystemException(const Instruction* instr); + + // Decode the load and store parts of the instruction tree, and call + // the corresponding visitors. + // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. + void DecodeLoadStore(const Instruction* instr); + + // Decode the logical immediate and move wide immediate parts of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x2. + void DecodeLogical(const Instruction* instr); + + // Decode the bitfield and extraction parts of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x3. + void DecodeBitfieldExtract(const Instruction* instr); + + // Decode the data processing parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. + void DecodeDataProcessing(const Instruction* instr); + + // Decode the floating point parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0xE, 0xF}. + void DecodeFP(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 29:25 = 0x6. + void DecodeNEONLoadStore(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) vector data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0x7. + void DecodeNEONVectorDataProcessing(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) scalar data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0xF. + void DecodeNEONScalarDataProcessing(const Instruction* instr); + + private: + // Visitors are registered in a list. + mozilla::Vector<DecoderVisitor*, 8, js::SystemAllocPolicy> visitors_; +}; + +} // namespace vixl + +#endif // VIXL_A64_DECODER_A64_H_ diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.cpp b/js/src/jit/arm64/vixl/Disasm-vixl.cpp new file mode 100644 index 000000000..365ef597b --- /dev/null +++ b/js/src/jit/arm64/vixl/Disasm-vixl.cpp @@ -0,0 +1,3488 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Disasm-vixl.h" + +#include <cstdlib> + +namespace vixl { + +Disassembler::Disassembler() { + buffer_size_ = 256; + buffer_ = reinterpret_cast<char*>(malloc(buffer_size_)); + buffer_pos_ = 0; + own_buffer_ = true; + code_address_offset_ = 0; +} + + +Disassembler::Disassembler(char* text_buffer, int buffer_size) { + buffer_size_ = buffer_size; + buffer_ = text_buffer; + buffer_pos_ = 0; + own_buffer_ = false; + code_address_offset_ = 0; +} + + +Disassembler::~Disassembler() { + if (own_buffer_) { + free(buffer_); + } +} + + +char* Disassembler::GetOutput() { + return buffer_; +} + + +void Disassembler::VisitAddSubImmediate(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) && + (instr->ImmAddSub() == 0) ? true : false; + const char *mnemonic = ""; + const char *form = "'Rds, 'Rns, 'IAddSub"; + const char *form_cmp = "'Rns, 'IAddSub"; + const char *form_mov = "'Rds, 'Rns"; + + switch (instr->Mask(AddSubImmediateMask)) { + case ADD_w_imm: + case ADD_x_imm: { + mnemonic = "add"; + if (stack_op) { + mnemonic = "mov"; + form = form_mov; + } + break; + } + case ADDS_w_imm: + case ADDS_x_imm: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_imm: + case SUB_x_imm: mnemonic = "sub"; break; + case SUBS_w_imm: + case SUBS_x_imm: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubShifted(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NDP"; + const char *form_cmp = "'Rn, 'Rm'NDP"; + const char *form_neg = "'Rd, 'Rm'NDP"; + + switch (instr->Mask(AddSubShiftedMask)) { + case ADD_w_shift: + case ADD_x_shift: mnemonic = "add"; break; + case ADDS_w_shift: + case ADDS_x_shift: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_shift: + case SUB_x_shift: { + mnemonic = "sub"; + if (rn_is_zr) { + mnemonic = "neg"; + form = form_neg; + } + break; + } + case SUBS_w_shift: + case SUBS_x_shift: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } else if (rn_is_zr) { + mnemonic = "negs"; + form = form_neg; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubExtended(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + const char *mnemonic = ""; + Extend mode = static_cast<Extend>(instr->ExtendMode()); + const char *form = ((mode == UXTX) || (mode == SXTX)) ? + "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext"; + const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ? + "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext"; + + switch (instr->Mask(AddSubExtendedMask)) { + case ADD_w_ext: + case ADD_x_ext: mnemonic = "add"; break; + case ADDS_w_ext: + case ADDS_x_ext: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_ext: + case SUB_x_ext: mnemonic = "sub"; break; + case SUBS_w_ext: + case SUBS_x_ext: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubWithCarry(const Instruction* instr) { + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_neg = "'Rd, 'Rm"; + + switch (instr->Mask(AddSubWithCarryMask)) { + case ADC_w: + case ADC_x: mnemonic = "adc"; break; + case ADCS_w: + case ADCS_x: mnemonic = "adcs"; break; + case SBC_w: + case SBC_x: { + mnemonic = "sbc"; + if (rn_is_zr) { + mnemonic = "ngc"; + form = form_neg; + } + break; + } + case SBCS_w: + case SBCS_x: { + mnemonic = "sbcs"; + if (rn_is_zr) { + mnemonic = "ngcs"; + form = form_neg; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLogicalImmediate(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rds, 'Rn, 'ITri"; + + if (instr->ImmLogical() == 0) { + // The immediate encoded in the instruction is not in the expected format. + Format(instr, "unallocated", "(LogicalImmediate)"); + return; + } + + switch (instr->Mask(LogicalImmediateMask)) { + case AND_w_imm: + case AND_x_imm: mnemonic = "and"; break; + case ORR_w_imm: + case ORR_x_imm: { + mnemonic = "orr"; + unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize + : kWRegSize; + if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) { + mnemonic = "mov"; + form = "'Rds, 'ITri"; + } + break; + } + case EOR_w_imm: + case EOR_x_imm: mnemonic = "eor"; break; + case ANDS_w_imm: + case ANDS_x_imm: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'ITri"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { + VIXL_ASSERT((reg_size == kXRegSize) || + ((reg_size == kWRegSize) && (value <= 0xffffffff))); + + // Test for movz: 16 bits set at positions 0, 16, 32 or 48. + if (((value & UINT64_C(0xffffffffffff0000)) == 0) || + ((value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((value & UINT64_C(0x0000ffffffffffff)) == 0)) { + return true; + } + + // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48). + if ((reg_size == kXRegSize) && + (((~value & UINT64_C(0xffffffffffff0000)) == 0) || + ((~value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((~value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) { + return true; + } + if ((reg_size == kWRegSize) && + (((value & 0xffff0000) == 0xffff0000) || + ((value & 0x0000ffff) == 0x0000ffff))) { + return true; + } + return false; +} + + +void Disassembler::VisitLogicalShifted(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NLo"; + + switch (instr->Mask(LogicalShiftedMask)) { + case AND_w: + case AND_x: mnemonic = "and"; break; + case BIC_w: + case BIC_x: mnemonic = "bic"; break; + case EOR_w: + case EOR_x: mnemonic = "eor"; break; + case EON_w: + case EON_x: mnemonic = "eon"; break; + case BICS_w: + case BICS_x: mnemonic = "bics"; break; + case ANDS_w: + case ANDS_x: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'Rm'NLo"; + } + break; + } + case ORR_w: + case ORR_x: { + mnemonic = "orr"; + if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) { + mnemonic = "mov"; + form = "'Rd, 'Rm"; + } + break; + } + case ORN_w: + case ORN_x: { + mnemonic = "orn"; + if (rn_is_zr) { + mnemonic = "mvn"; + form = "'Rd, 'Rm'NLo"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'Rm, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareRegisterMask)) { + case CCMN_w: + case CCMN_x: mnemonic = "ccmn"; break; + case CCMP_w: + case CCMP_x: mnemonic = "ccmp"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'IP, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareImmediateMask)) { + case CCMN_w_imm: + case CCMN_x_imm: mnemonic = "ccmn"; break; + case CCMP_w_imm: + case CCMP_x_imm: mnemonic = "ccmp"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalSelect(const Instruction* instr) { + bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr)); + bool rn_is_rm = (instr->Rn() == instr->Rm()); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'Cond"; + const char *form_test = "'Rd, 'CInv"; + const char *form_update = "'Rd, 'Rn, 'CInv"; + + Condition cond = static_cast<Condition>(instr->Condition()); + bool invertible_cond = (cond != al) && (cond != nv); + + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: mnemonic = "csel"; break; + case CSINC_w: + case CSINC_x: { + mnemonic = "csinc"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "cset"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinc"; + form = form_update; + } + break; + } + case CSINV_w: + case CSINV_x: { + mnemonic = "csinv"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "csetm"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinv"; + form = form_update; + } + break; + } + case CSNEG_w: + case CSNEG_x: { + mnemonic = "csneg"; + if (rn_is_rm && invertible_cond) { + mnemonic = "cneg"; + form = form_update; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitBitfield(const Instruction* instr) { + unsigned s = instr->ImmS(); + unsigned r = instr->ImmR(); + unsigned rd_size_minus_1 = + ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1; + const char *mnemonic = ""; + const char *form = ""; + const char *form_shift_right = "'Rd, 'Rn, 'IBr"; + const char *form_extend = "'Rd, 'Wn"; + const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1"; + const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1"; + const char *form_lsl = "'Rd, 'Rn, 'IBZ-r"; + + switch (instr->Mask(BitfieldMask)) { + case SBFM_w: + case SBFM_x: { + mnemonic = "sbfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "sxtb"; + } else if (s == 15) { + mnemonic = "sxth"; + } else if ((s == 31) && (instr->SixtyFourBits() == 1)) { + mnemonic = "sxtw"; + } else { + form = form_bfx; + } + } else if (s == rd_size_minus_1) { + mnemonic = "asr"; + form = form_shift_right; + } else if (s < r) { + mnemonic = "sbfiz"; + form = form_bfiz; + } + break; + } + case UBFM_w: + case UBFM_x: { + mnemonic = "ubfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "uxtb"; + } else if (s == 15) { + mnemonic = "uxth"; + } else { + form = form_bfx; + } + } + if (s == rd_size_minus_1) { + mnemonic = "lsr"; + form = form_shift_right; + } else if (r == s + 1) { + mnemonic = "lsl"; + form = form_lsl; + } else if (s < r) { + mnemonic = "ubfiz"; + form = form_bfiz; + } + break; + } + case BFM_w: + case BFM_x: { + mnemonic = "bfxil"; + form = form_bfx; + if (s < r) { + mnemonic = "bfi"; + form = form_bfiz; + } + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitExtract(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'IExtract"; + + switch (instr->Mask(ExtractMask)) { + case EXTR_w: + case EXTR_x: { + if (instr->Rn() == instr->Rm()) { + mnemonic = "ror"; + form = "'Rd, 'Rn, 'IExtract"; + } else { + mnemonic = "extr"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitPCRelAddressing(const Instruction* instr) { + switch (instr->Mask(PCRelAddressingMask)) { + case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break; + case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break; + default: Format(instr, "unimplemented", "(PCRelAddressing)"); + } +} + + +void Disassembler::VisitConditionalBranch(const Instruction* instr) { + switch (instr->Mask(ConditionalBranchMask)) { + case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break; + default: VIXL_UNREACHABLE(); + } +} + + +void Disassembler::VisitUnconditionalBranchToRegister( + const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Xn"; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BR: mnemonic = "br"; break; + case BLR: mnemonic = "blr"; break; + case RET: { + mnemonic = "ret"; + if (instr->Rn() == kLinkRegCode) { + form = NULL; + } + break; + } + default: form = "(UnconditionalBranchToRegister)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitUnconditionalBranch(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'TImmUncn"; + + switch (instr->Mask(UnconditionalBranchMask)) { + case B: mnemonic = "b"; break; + case BL: mnemonic = "bl"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing1Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn"; + + switch (instr->Mask(DataProcessing1SourceMask)) { + #define FORMAT(A, B) \ + case A##_w: \ + case A##_x: mnemonic = B; break; + FORMAT(RBIT, "rbit"); + FORMAT(REV16, "rev16"); + FORMAT(REV, "rev"); + FORMAT(CLZ, "clz"); + FORMAT(CLS, "cls"); + #undef FORMAT + case REV32_x: mnemonic = "rev32"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing2Source(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_wwx = "'Wd, 'Wn, 'Xm"; + + switch (instr->Mask(DataProcessing2SourceMask)) { + #define FORMAT(A, B) \ + case A##_w: \ + case A##_x: mnemonic = B; break; + FORMAT(UDIV, "udiv"); + FORMAT(SDIV, "sdiv"); + FORMAT(LSLV, "lsl"); + FORMAT(LSRV, "lsr"); + FORMAT(ASRV, "asr"); + FORMAT(RORV, "ror"); + #undef FORMAT + case CRC32B: mnemonic = "crc32b"; break; + case CRC32H: mnemonic = "crc32h"; break; + case CRC32W: mnemonic = "crc32w"; break; + case CRC32X: mnemonic = "crc32x"; form = form_wwx; break; + case CRC32CB: mnemonic = "crc32cb"; break; + case CRC32CH: mnemonic = "crc32ch"; break; + case CRC32CW: mnemonic = "crc32cw"; break; + case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break; + default: form = "(DataProcessing2Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing3Source(const Instruction* instr) { + bool ra_is_zr = RaIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Xd, 'Wn, 'Wm, 'Xa"; + const char *form_rrr = "'Rd, 'Rn, 'Rm"; + const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra"; + const char *form_xww = "'Xd, 'Wn, 'Wm"; + const char *form_xxx = "'Xd, 'Xn, 'Xm"; + + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: { + mnemonic = "madd"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mul"; + form = form_rrr; + } + break; + } + case MSUB_w: + case MSUB_x: { + mnemonic = "msub"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mneg"; + form = form_rrr; + } + break; + } + case SMADDL_x: { + mnemonic = "smaddl"; + if (ra_is_zr) { + mnemonic = "smull"; + form = form_xww; + } + break; + } + case SMSUBL_x: { + mnemonic = "smsubl"; + if (ra_is_zr) { + mnemonic = "smnegl"; + form = form_xww; + } + break; + } + case UMADDL_x: { + mnemonic = "umaddl"; + if (ra_is_zr) { + mnemonic = "umull"; + form = form_xww; + } + break; + } + case UMSUBL_x: { + mnemonic = "umsubl"; + if (ra_is_zr) { + mnemonic = "umnegl"; + form = form_xww; + } + break; + } + case SMULH_x: { + mnemonic = "smulh"; + form = form_xxx; + break; + } + case UMULH_x: { + mnemonic = "umulh"; + form = form_xxx; + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCompareBranch(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rt, 'TImmCmpa"; + + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + case CBZ_x: mnemonic = "cbz"; break; + case CBNZ_w: + case CBNZ_x: mnemonic = "cbnz"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitTestBranch(const Instruction* instr) { + const char *mnemonic = ""; + // If the top bit of the immediate is clear, the tested register is + // disassembled as Wt, otherwise Xt. As the top bit of the immediate is + // encoded in bit 31 of the instruction, we can reuse the Rt form, which + // uses bit 31 (normally "sf") to choose the register size. + const char *form = "'Rt, 'IS, 'TImmTest"; + + switch (instr->Mask(TestBranchMask)) { + case TBZ: mnemonic = "tbz"; break; + case TBNZ: mnemonic = "tbnz"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitMoveWideImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'IMoveImm"; + + // Print the shift separately for movk, to make it clear which half word will + // be overwritten. Movn and movz print the computed immediate, which includes + // shift calculation. + switch (instr->Mask(MoveWideImmediateMask)) { + case MOVN_w: + case MOVN_x: + if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) { + if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) { + mnemonic = "movn"; + } else { + mnemonic = "mov"; + form = "'Rd, 'IMoveNeg"; + } + } else { + mnemonic = "movn"; + } + break; + case MOVZ_w: + case MOVZ_x: + if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) + mnemonic = "mov"; + else + mnemonic = "movz"; + break; + case MOVK_w: + case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_LIST(V) \ + V(STRB_w, "strb", "'Wt") \ + V(STRH_w, "strh", "'Wt") \ + V(STR_w, "str", "'Wt") \ + V(STR_x, "str", "'Xt") \ + V(LDRB_w, "ldrb", "'Wt") \ + V(LDRH_w, "ldrh", "'Wt") \ + V(LDR_w, "ldr", "'Wt") \ + V(LDR_x, "ldr", "'Xt") \ + V(LDRSB_x, "ldrsb", "'Xt") \ + V(LDRSH_x, "ldrsh", "'Xt") \ + V(LDRSW_x, "ldrsw", "'Xt") \ + V(LDRSB_w, "ldrsb", "'Wt") \ + V(LDRSH_w, "ldrsh", "'Wt") \ + V(STR_b, "str", "'Bt") \ + V(STR_h, "str", "'Ht") \ + V(STR_s, "str", "'St") \ + V(STR_d, "str", "'Dt") \ + V(LDR_b, "ldr", "'Bt") \ + V(LDR_h, "ldr", "'Ht") \ + V(LDR_s, "ldr", "'St") \ + V(LDR_d, "ldr", "'Dt") \ + V(STR_q, "str", "'Qt") \ + V(LDR_q, "ldr", "'Qt") + +void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePreIndex)"; + + switch (instr->Mask(LoadStorePreIndexMask)) { + #define LS_PREINDEX(A, B, C) \ + case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break; + LOAD_STORE_LIST(LS_PREINDEX) + #undef LS_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePostIndex)"; + + switch (instr->Mask(LoadStorePostIndexMask)) { + #define LS_POSTINDEX(A, B, C) \ + case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break; + LOAD_STORE_LIST(LS_POSTINDEX) + #undef LS_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreUnsignedOffset)"; + + switch (instr->Mask(LoadStoreUnsignedOffsetMask)) { + #define LS_UNSIGNEDOFFSET(A, B, C) \ + case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break; + LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) + #undef LS_UNSIGNEDOFFSET + case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreRegisterOffset)"; + + switch (instr->Mask(LoadStoreRegisterOffsetMask)) { + #define LS_REGISTEROFFSET(A, B, C) \ + case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break; + LOAD_STORE_LIST(LS_REGISTEROFFSET) + #undef LS_REGISTEROFFSET + case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Wt, ['Xns'ILS]"; + const char *form_x = "'Xt, ['Xns'ILS]"; + const char *form_b = "'Bt, ['Xns'ILS]"; + const char *form_h = "'Ht, ['Xns'ILS]"; + const char *form_s = "'St, ['Xns'ILS]"; + const char *form_d = "'Dt, ['Xns'ILS]"; + const char *form_q = "'Qt, ['Xns'ILS]"; + const char *form_prefetch = "'PrefOp, ['Xns'ILS]"; + + switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { + case STURB_w: mnemonic = "sturb"; break; + case STURH_w: mnemonic = "sturh"; break; + case STUR_w: mnemonic = "stur"; break; + case STUR_x: mnemonic = "stur"; form = form_x; break; + case STUR_b: mnemonic = "stur"; form = form_b; break; + case STUR_h: mnemonic = "stur"; form = form_h; break; + case STUR_s: mnemonic = "stur"; form = form_s; break; + case STUR_d: mnemonic = "stur"; form = form_d; break; + case STUR_q: mnemonic = "stur"; form = form_q; break; + case LDURB_w: mnemonic = "ldurb"; break; + case LDURH_w: mnemonic = "ldurh"; break; + case LDUR_w: mnemonic = "ldur"; break; + case LDUR_x: mnemonic = "ldur"; form = form_x; break; + case LDUR_b: mnemonic = "ldur"; form = form_b; break; + case LDUR_h: mnemonic = "ldur"; form = form_h; break; + case LDUR_s: mnemonic = "ldur"; form = form_s; break; + case LDUR_d: mnemonic = "ldur"; form = form_d; break; + case LDUR_q: mnemonic = "ldur"; form = form_q; break; + case LDURSB_x: form = form_x; VIXL_FALLTHROUGH(); + case LDURSB_w: mnemonic = "ldursb"; break; + case LDURSH_x: form = form_x; VIXL_FALLTHROUGH(); + case LDURSH_w: mnemonic = "ldursh"; break; + case LDURSW_x: mnemonic = "ldursw"; form = form_x; break; + case PRFUM: mnemonic = "prfum"; form = form_prefetch; break; + default: form = "(LoadStoreUnscaledOffset)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadLiteral(const Instruction* instr) { + const char *mnemonic = "ldr"; + const char *form = "(LoadLiteral)"; + + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break; + case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break; + case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break; + case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break; + case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break; + case LDRSW_x_lit: { + mnemonic = "ldrsw"; + form = "'Xt, 'ILLiteral 'LValue"; + break; + } + case PRFM_lit: { + mnemonic = "prfm"; + form = "'PrefOp, 'ILLiteral 'LValue"; + break; + } + default: mnemonic = "unimplemented"; + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_PAIR_LIST(V) \ + V(STP_w, "stp", "'Wt, 'Wt2", "2") \ + V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \ + V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \ + V(STP_x, "stp", "'Xt, 'Xt2", "3") \ + V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \ + V(STP_s, "stp", "'St, 'St2", "2") \ + V(LDP_s, "ldp", "'St, 'St2", "2") \ + V(STP_d, "stp", "'Dt, 'Dt2", "3") \ + V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \ + V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \ + V(STP_q, "stp", "'Qt, 'Qt2", "4") + +void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPostIndex)"; + + switch (instr->Mask(LoadStorePairPostIndexMask)) { + #define LSP_POSTINDEX(A, B, C, D) \ + case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break; + LOAD_STORE_PAIR_LIST(LSP_POSTINDEX) + #undef LSP_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPreIndex)"; + + switch (instr->Mask(LoadStorePairPreIndexMask)) { + #define LSP_PREINDEX(A, B, C, D) \ + case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break; + LOAD_STORE_PAIR_LIST(LSP_PREINDEX) + #undef LSP_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairOffset)"; + + switch (instr->Mask(LoadStorePairOffsetMask)) { + #define LSP_OFFSET(A, B, C, D) \ + case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break; + LOAD_STORE_PAIR_LIST(LSP_OFFSET) + #undef LSP_OFFSET + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStorePairNonTemporalMask)) { + case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break; + case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break; + case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break; + case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break; + case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break; + case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break; + case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break; + case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break; + case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break; + case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break; + default: form = "(LoadStorePairNonTemporal)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStoreExclusiveMask)) { + case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break; + case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break; + case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break; + case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break; + case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break; + case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break; + case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break; + case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break; + case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break; + case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break; + case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break; + case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break; + case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break; + case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break; + case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break; + case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break; + default: form = "(LoadStoreExclusive)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPCompare(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fn, 'Fm"; + const char *form_zero = "'Fn, #0.0"; + + switch (instr->Mask(FPCompareMask)) { + case FCMP_s_zero: + case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH(); + case FCMP_s: + case FCMP_d: mnemonic = "fcmp"; break; + case FCMPE_s_zero: + case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH(); + case FCMPE_s: + case FCMPE_d: mnemonic = "fcmpe"; break; + default: form = "(FPCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalCompare(const Instruction* instr) { + const char *mnemonic = "unmplemented"; + const char *form = "'Fn, 'Fm, 'INzcv, 'Cond"; + + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_s: + case FCCMP_d: mnemonic = "fccmp"; break; + case FCCMPE_s: + case FCCMPE_d: mnemonic = "fccmpe"; break; + default: form = "(FPConditionalCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalSelect(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Cond"; + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_s: + case FCSEL_d: mnemonic = "fcsel"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fd, 'Fn"; + + switch (instr->Mask(FPDataProcessing1SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMOV, "fmov"); + FORMAT(FABS, "fabs"); + FORMAT(FNEG, "fneg"); + FORMAT(FSQRT, "fsqrt"); + FORMAT(FRINTN, "frintn"); + FORMAT(FRINTP, "frintp"); + FORMAT(FRINTM, "frintm"); + FORMAT(FRINTZ, "frintz"); + FORMAT(FRINTA, "frinta"); + FORMAT(FRINTX, "frintx"); + FORMAT(FRINTI, "frinti"); + #undef FORMAT + case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break; + case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break; + case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break; + case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break; + case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break; + case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break; + default: form = "(FPDataProcessing1Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm"; + + switch (instr->Mask(FPDataProcessing2SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMUL, "fmul"); + FORMAT(FDIV, "fdiv"); + FORMAT(FADD, "fadd"); + FORMAT(FSUB, "fsub"); + FORMAT(FMAX, "fmax"); + FORMAT(FMIN, "fmin"); + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FNMUL, "fnmul"); + #undef FORMAT + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Fa"; + + switch (instr->Mask(FPDataProcessing3SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMADD, "fmadd"); + FORMAT(FMSUB, "fmsub"); + FORMAT(FNMADD, "fnmadd"); + FORMAT(FNMSUB, "fnmsub"); + #undef FORMAT + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "(FPImmediate)"; + + switch (instr->Mask(FPImmediateMask)) { + case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break; + case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPIntegerConvert(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(FPIntegerConvert)"; + const char *form_rf = "'Rd, 'Fn"; + const char *form_fr = "'Fd, 'Rn"; + + switch (instr->Mask(FPIntegerConvertMask)) { + case FMOV_ws: + case FMOV_xd: mnemonic = "fmov"; form = form_rf; break; + case FMOV_sw: + case FMOV_dx: mnemonic = "fmov"; form = form_fr; break; + case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break; + case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break; + case FCVTAS_ws: + case FCVTAS_xs: + case FCVTAS_wd: + case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break; + case FCVTAU_ws: + case FCVTAU_xs: + case FCVTAU_wd: + case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break; + case FCVTMS_ws: + case FCVTMS_xs: + case FCVTMS_wd: + case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break; + case FCVTMU_ws: + case FCVTMU_xs: + case FCVTMU_wd: + case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break; + case FCVTNS_ws: + case FCVTNS_xs: + case FCVTNS_wd: + case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break; + case FCVTNU_ws: + case FCVTNU_xs: + case FCVTNU_wd: + case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break; + case FCVTZU_xd: + case FCVTZU_ws: + case FCVTZU_wd: + case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break; + case FCVTZS_xd: + case FCVTZS_wd: + case FCVTZS_xs: + case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break; + case FCVTPU_xd: + case FCVTPU_ws: + case FCVTPU_wd: + case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break; + case FCVTPS_xd: + case FCVTPS_wd: + case FCVTPS_xs: + case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break; + case SCVTF_sw: + case SCVTF_sx: + case SCVTF_dw: + case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break; + case UCVTF_sw: + case UCVTF_sx: + case UCVTF_dw: + case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Fn, 'IFPFBits"; + const char *form_fr = "'Fd, 'Rn, 'IFPFBits"; + + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_ws_fixed: + case FCVTZS_xs_fixed: + case FCVTZS_wd_fixed: + case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break; + case FCVTZU_ws_fixed: + case FCVTZU_xs_fixed: + case FCVTZU_wd_fixed: + case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break; + case SCVTF_sw_fixed: + case SCVTF_sx_fixed: + case SCVTF_dw_fixed: + case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break; + case UCVTF_sw_fixed: + case UCVTF_sx_fixed: + case UCVTF_dw_fixed: + case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitSystem(const Instruction* instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + const char *mnemonic = "unimplemented"; + const char *form = "(System)"; + + if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + mnemonic = "clrex"; + form = (instr->CRm() == 0xf) ? NULL : "'IX"; + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + mnemonic = "mrs"; + switch (instr->ImmSystemRegister()) { + case NZCV: form = "'Xt, nzcv"; break; + case FPCR: form = "'Xt, fpcr"; break; + default: form = "'Xt, (unknown)"; break; + } + break; + } + case MSR: { + mnemonic = "msr"; + switch (instr->ImmSystemRegister()) { + case NZCV: form = "nzcv, 'Xt"; break; + case FPCR: form = "fpcr, 'Xt"; break; + default: form = "(unknown), 'Xt"; break; + } + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + switch (instr->ImmHint()) { + case NOP: { + mnemonic = "nop"; + form = NULL; + break; + } + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + switch (instr->Mask(MemBarrierMask)) { + case DMB: { + mnemonic = "dmb"; + form = "'M"; + break; + } + case DSB: { + mnemonic = "dsb"; + form = "'M"; + break; + } + case ISB: { + mnemonic = "isb"; + form = NULL; + break; + } + } + } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) { + switch (instr->SysOp()) { + case IVAU: + mnemonic = "ic"; + form = "ivau, 'Xt"; + break; + case CVAC: + mnemonic = "dc"; + form = "cvac, 'Xt"; + break; + case CVAU: + mnemonic = "dc"; + form = "cvau, 'Xt"; + break; + case CIVAC: + mnemonic = "dc"; + form = "civac, 'Xt"; + break; + case ZVA: + mnemonic = "dc"; + form = "zva, 'Xt"; + break; + default: + mnemonic = "sys"; + if (instr->Rt() == 31) { + form = "'G1, 'Kn, 'Km, 'G2"; + } else { + form = "'G1, 'Kn, 'Km, 'G2, 'Xt"; + } + break; + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitException(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'IDebug"; + + switch (instr->Mask(ExceptionMask)) { + case HLT: mnemonic = "hlt"; break; + case BRK: mnemonic = "brk"; break; + case SVC: mnemonic = "svc"; break; + case HVC: mnemonic = "hvc"; break; + case SMC: mnemonic = "smc"; break; + case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break; + case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break; + case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break; + default: form = "(Exception)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCryptoAES(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitNEON2RegMisc(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s"; + const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0"; + const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0"; + NEONFormatDecoder nfd(instr); + + static const NEONFormatMap map_lp_ta = { + {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D} + }; + + static const NEONFormatMap map_cvt_ta = { + {22}, {NF_4S, NF_2D} + }; + + static const NEONFormatMap map_cvt_tb = { + {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S} + }; + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: mnemonic = "rev64"; break; + case NEON_REV32: mnemonic = "rev32"; break; + case NEON_REV16: mnemonic = "rev16"; break; + case NEON_SADDLP: + mnemonic = "saddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADDLP: + mnemonic = "uaddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SUQADD: mnemonic = "suqadd"; break; + case NEON_USQADD: mnemonic = "usqadd"; break; + case NEON_CLS: mnemonic = "cls"; break; + case NEON_CLZ: mnemonic = "clz"; break; + case NEON_CNT: mnemonic = "cnt"; break; + case NEON_SADALP: + mnemonic = "sadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADALP: + mnemonic = "uadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SQABS: mnemonic = "sqabs"; break; + case NEON_SQNEG: mnemonic = "sqneg"; break; + case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break; + case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break; + case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break; + case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break; + case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break; + case NEON_ABS: mnemonic = "abs"; break; + case NEON_NEG: mnemonic = "neg"; break; + case NEON_RBIT_NOT: + switch (instr->FPType()) { + case 0: mnemonic = "mvn"; break; + case 1: mnemonic = "rbit"; break; + default: form = "(NEON2RegMisc)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + break; + } + } else { + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPFormatMap()); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: mnemonic = "fabs"; break; + case NEON_FNEG: mnemonic = "fneg"; break; + case NEON_FCVTN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTXN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTL: + mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl"; + nfd.SetFormatMap(0, &map_cvt_ta); + nfd.SetFormatMap(1, &map_cvt_tb); + break; + case NEON_FRINTN: mnemonic = "frintn"; break; + case NEON_FRINTA: mnemonic = "frinta"; break; + case NEON_FRINTP: mnemonic = "frintp"; break; + case NEON_FRINTM: mnemonic = "frintm"; break; + case NEON_FRINTX: mnemonic = "frintx"; break; + case NEON_FRINTZ: mnemonic = "frintz"; break; + case NEON_FRINTI: mnemonic = "frinti"; break; + case NEON_FCVTNS: mnemonic = "fcvtns"; break; + case NEON_FCVTNU: mnemonic = "fcvtnu"; break; + case NEON_FCVTPS: mnemonic = "fcvtps"; break; + case NEON_FCVTPU: mnemonic = "fcvtpu"; break; + case NEON_FCVTMS: mnemonic = "fcvtms"; break; + case NEON_FCVTMU: mnemonic = "fcvtmu"; break; + case NEON_FCVTZS: mnemonic = "fcvtzs"; break; + case NEON_FCVTZU: mnemonic = "fcvtzu"; break; + case NEON_FCVTAS: mnemonic = "fcvtas"; break; + case NEON_FCVTAU: mnemonic = "fcvtau"; break; + case NEON_FSQRT: mnemonic = "fsqrt"; break; + case NEON_SCVTF: mnemonic = "scvtf"; break; + case NEON_UCVTF: mnemonic = "ucvtf"; break; + case NEON_URSQRTE: mnemonic = "ursqrte"; break; + case NEON_URECPE: mnemonic = "urecpe"; break; + case NEON_FRSQRTE: mnemonic = "frsqrte"; break; + case NEON_FRECPE: mnemonic = "frecpe"; break; + case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break; + case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break; + case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break; + case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break; + case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: mnemonic = "xtn"; break; + case NEON_SQXTN: mnemonic = "sqxtn"; break; + case NEON_UQXTN: mnemonic = "uqxtn"; break; + case NEON_SQXTUN: mnemonic = "sqxtun"; break; + case NEON_SHLL: + mnemonic = "shll"; + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(1, nfd.IntegerFormatMap()); + switch (instr->NEONSize()) { + case 0: form = "'Vd.%s, 'Vn.%s, #8"; break; + case 1: form = "'Vd.%s, 'Vn.%s, #16"; break; + case 2: form = "'Vd.%s, 'Vn.%s, #32"; break; + default: form = "(NEON2RegMisc)"; + } + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + return; + } else { + form = "(NEON2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Same(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: mnemonic = "and"; break; + case NEON_ORR: + mnemonic = "orr"; + if (instr->Rm() == instr->Rn()) { + mnemonic = "mov"; + form = "'Vd.%s, 'Vn.%s"; + } + break; + case NEON_ORN: mnemonic = "orn"; break; + case NEON_EOR: mnemonic = "eor"; break; + case NEON_BIC: mnemonic = "bic"; break; + case NEON_BIF: mnemonic = "bif"; break; + case NEON_BIT: mnemonic = "bit"; break; + case NEON_BSL: mnemonic = "bsl"; break; + default: form = "(NEON3Same)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + } else { + static const char *mnemonics[] = { + "shadd", "uhadd", "shadd", "uhadd", + "sqadd", "uqadd", "sqadd", "uqadd", + "srhadd", "urhadd", "srhadd", "urhadd", + NULL, NULL, NULL, NULL, // Handled by logical cases above. + "shsub", "uhsub", "shsub", "uhsub", + "sqsub", "uqsub", "sqsub", "uqsub", + "cmgt", "cmhi", "cmgt", "cmhi", + "cmge", "cmhs", "cmge", "cmhs", + "sshl", "ushl", "sshl", "ushl", + "sqshl", "uqshl", "sqshl", "uqshl", + "srshl", "urshl", "srshl", "urshl", + "sqrshl", "uqrshl", "sqrshl", "uqrshl", + "smax", "umax", "smax", "umax", + "smin", "umin", "smin", "umin", + "sabd", "uabd", "sabd", "uabd", + "saba", "uaba", "saba", "uaba", + "add", "sub", "add", "sub", + "cmtst", "cmeq", "cmtst", "cmeq", + "mla", "mls", "mla", "mls", + "mul", "pmul", "mul", "pmul", + "smaxp", "umaxp", "smaxp", "umaxp", + "sminp", "uminp", "sminp", "uminp", + "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh", + "addp", "unallocated", "addp", "unallocated", + "fmaxnm", "fmaxnmp", "fminnm", "fminnmp", + "fmla", "unallocated", "fmls", "unallocated", + "fadd", "faddp", "fsub", "fabd", + "fmulx", "fmul", "unallocated", "unallocated", + "fcmeq", "fcmge", "unallocated", "fcmgt", + "unallocated", "facge", "unallocated", "facgt", + "fmax", "fmaxp", "fmin", "fminp", + "frecps", "fdiv", "frsqrts", "unallocated"}; + + // Operation is determined by the opcode bits (15-11), the top bit of + // size (23) and the U bit (29). + unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) | + instr->Bit(29); + VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0]))); + mnemonic = mnemonics[index]; + // Assert that index is not one of the previously handled logical + // instructions. + VIXL_ASSERT(mnemonic != NULL); + + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPFormatMap()); + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Different(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + + NEONFormatDecoder nfd(instr); + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + + // Ignore the Q bit. Appending a "2" suffix is handled later. + switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) { + case NEON_PMULL: mnemonic = "pmull"; break; + case NEON_SABAL: mnemonic = "sabal"; break; + case NEON_SABDL: mnemonic = "sabdl"; break; + case NEON_SADDL: mnemonic = "saddl"; break; + case NEON_SMLAL: mnemonic = "smlal"; break; + case NEON_SMLSL: mnemonic = "smlsl"; break; + case NEON_SMULL: mnemonic = "smull"; break; + case NEON_SSUBL: mnemonic = "ssubl"; break; + case NEON_SQDMLAL: mnemonic = "sqdmlal"; break; + case NEON_SQDMLSL: mnemonic = "sqdmlsl"; break; + case NEON_SQDMULL: mnemonic = "sqdmull"; break; + case NEON_UABAL: mnemonic = "uabal"; break; + case NEON_UABDL: mnemonic = "uabdl"; break; + case NEON_UADDL: mnemonic = "uaddl"; break; + case NEON_UMLAL: mnemonic = "umlal"; break; + case NEON_UMLSL: mnemonic = "umlsl"; break; + case NEON_UMULL: mnemonic = "umull"; break; + case NEON_USUBL: mnemonic = "usubl"; break; + case NEON_SADDW: + mnemonic = "saddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_SSUBW: + mnemonic = "ssubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_UADDW: + mnemonic = "uaddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_USUBW: + mnemonic = "usubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_ADDHN: + mnemonic = "addhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RADDHN: + mnemonic = "raddhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RSUBHN: + mnemonic = "rsubhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_SUBHN: + mnemonic = "subhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + default: form = "(NEON3Different)"; + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(), + NEONFormatDecoder::IntegerFormatMap()); + + if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + nfd.SetFormatMap(1, nfd.FPFormatMap()); + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: mnemonic = "fmaxv"; break; + case NEON_FMINV: mnemonic = "fminv"; break; + case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break; + case NEON_FMINNMV: mnemonic = "fminnmv"; break; + default: form = "(NEONAcrossLanes)"; break; + } + } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) { + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: mnemonic = "addv"; break; + case NEON_SMAXV: mnemonic = "smaxv"; break; + case NEON_SMINV: mnemonic = "sminv"; break; + case NEON_UMAXV: mnemonic = "umaxv"; break; + case NEON_UMINV: mnemonic = "uminv"; break; + case NEON_SADDLV: + mnemonic = "saddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + case NEON_UADDLV: + mnemonic = "uaddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + default: form = "(NEONAcrossLanes)"; break; + } + } + Format(instr, mnemonic, nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat)); +} + + +void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + bool l_instr = false; + bool fp_instr = false; + + const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]"; + + static const NEONFormatMap map_ta = { + {23, 22}, {NF_UNDEF, NF_4S, NF_2D} + }; + NEONFormatDecoder nfd(instr, &map_ta, + NEONFormatDecoder::IntegerFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SMULL_byelement: mnemonic = "smull"; l_instr = true; break; + case NEON_UMULL_byelement: mnemonic = "umull"; l_instr = true; break; + case NEON_SMLAL_byelement: mnemonic = "smlal"; l_instr = true; break; + case NEON_UMLAL_byelement: mnemonic = "umlal"; l_instr = true; break; + case NEON_SMLSL_byelement: mnemonic = "smlsl"; l_instr = true; break; + case NEON_UMLSL_byelement: mnemonic = "umlsl"; l_instr = true; break; + case NEON_SQDMULL_byelement: mnemonic = "sqdmull"; l_instr = true; break; + case NEON_SQDMLAL_byelement: mnemonic = "sqdmlal"; l_instr = true; break; + case NEON_SQDMLSL_byelement: mnemonic = "sqdmlsl"; l_instr = true; break; + case NEON_MUL_byelement: mnemonic = "mul"; break; + case NEON_MLA_byelement: mnemonic = "mla"; break; + case NEON_MLS_byelement: mnemonic = "mls"; break; + case NEON_SQDMULH_byelement: mnemonic = "sqdmulh"; break; + case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break; + default: + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: mnemonic = "fmul"; fp_instr = true; break; + case NEON_FMLA_byelement: mnemonic = "fmla"; fp_instr = true; break; + case NEON_FMLS_byelement: mnemonic = "fmls"; fp_instr = true; break; + case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break; + } + } + + if (l_instr) { + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + } else if (fp_instr) { + nfd.SetFormatMap(0, nfd.FPFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } else { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } +} + + +void Disassembler::VisitNEONCopy(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(), + NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]"; + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Vd.%s['IVInsIndex1], 'Xn"; + } else { + form = "'Vd.%s['IVInsIndex1], 'Wn"; + } + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) { + mnemonic = "mov"; + } else { + mnemonic = "umov"; + } + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Xd, 'Vn.%s['IVInsIndex1]"; + } else { + form = "'Wd, 'Vn.%s['IVInsIndex1]"; + } + } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) { + mnemonic = "smov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Rdq, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + mnemonic = "dup"; + form = "'Vd.%s, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + mnemonic = "dup"; + if (nfd.GetVectorFormat() == kFormat2D) { + form = "'Vd.%s, 'Xn"; + } else { + form = "'Vd.%s, 'Wn"; + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONExtract(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONExtract)"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + mnemonic = "ext"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONLoadStoreMultiStruct)"; + const char *form_1v = "{'Vt.%1$s}, ['Xns]"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]"; + const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break; + case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break; + case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break; + case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break; + case NEON_LD2: mnemonic = "ld2"; form = form_2v; break; + case NEON_LD3: mnemonic = "ld3"; form = form_3v; break; + case NEON_LD4: mnemonic = "ld4"; form = form_4v; break; + case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break; + case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break; + case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break; + case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break; + case NEON_ST2: mnemonic = "st2"; form = form_2v; break; + case NEON_ST3: mnemonic = "st3"; form = form_3v; break; + case NEON_ST4: mnemonic = "st4"; form = form_4v; break; + default: break; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONLoadStoreMultiStructPostIndex)"; + const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3"; + const char *form_4v = + "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break; + case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break; + case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break; + case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break; + case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break; + case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break; + case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break; + case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break; + case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break; + case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break; + case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break; + case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break; + case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break; + case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break; + default: break; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONLoadStoreSingleStruct)"; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break; + case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break; + case NEON_LD1_s: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break; + case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break; + case NEON_ST1_s: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns]"; + break; + case NEON_LD2_b: + case NEON_ST2_b: + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD2_h: + case NEON_ST2_h: + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD2_s: + case NEON_ST2_s: + VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d); + VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d); + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]"; + else + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]"; + break; + case NEON_LD2R: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns]"; + break; + case NEON_LD3_b: + case NEON_ST3_b: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD3_h: + case NEON_ST3_h: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD3_s: + case NEON_ST3_s: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]"; + break; + case NEON_LD3R: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]"; + break; + case NEON_LD4_b: + case NEON_ST4_b: + mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD4_h: + case NEON_ST4_h: + mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD4_s: + case NEON_ST4_s: + VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d); + VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d); + mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]"; + break; + case NEON_LD4R: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + break; + default: break; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONLoadStoreSingleStructPostIndex)"; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break; + case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break; + case NEON_LD1_s_post: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break; + case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break; + case NEON_ST1_s_post: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R_post: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns], 'Xmz1"; + break; + case NEON_LD2_b_post: + case NEON_ST2_b_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2"; + break; + case NEON_ST2_h_post: + case NEON_LD2_h_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4"; + break; + case NEON_LD2_s_post: + case NEON_ST2_s_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8"; + else + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16"; + break; + case NEON_LD2R_post: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2"; + break; + case NEON_LD3_b_post: + case NEON_ST3_b_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3"; + break; + case NEON_LD3_h_post: + case NEON_ST3_h_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6"; + break; + case NEON_LD3_s_post: + case NEON_ST3_s_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3"; + break; + case NEON_LD3R_post: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3"; + break; + case NEON_LD4_b_post: + case NEON_ST4_b_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4"; + break; + case NEON_LD4_h_post: + case NEON_ST4_h_post: + mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8"; + break; + case NEON_LD4_s_post: + case NEON_ST4_s_post: + mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32"; + break; + case NEON_LD4R_post: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4"; + break; + default: break; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1"; + + int cmode = instr->NEONCmode(); + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->NEONQ(); + int op = instr->NEONModImmOp(); + + static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} }; + static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} }; + static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} }; + NEONFormatDecoder nfd(instr, &map_b); + + if (cmode_3 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_s); + } else { // cmode<3> == '1'. + if (cmode_2 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_h); + } else { // cmode<2> == '1'. + if (cmode_1 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2"; + nfd.SetFormatMap(0, &map_s); + } else { // cmode<1> == '1'. + if (cmode_0 == 0) { + mnemonic = "movi"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImm8"; + } else { + form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm"; + } + } else { // cmode<0> == '1' + mnemonic = "fmov"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImmFPSingle"; + nfd.SetFormatMap(0, &map_s); + } else { + if (q == 1) { + form = "'Vt.2d, 'IVMIImmFPDouble"; + } + } + } + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn"; + const char *form_0 = "%sd, %sn, #0"; + const char *form_fp0 = "%sd, %sn, #0.0"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break; + case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break; + case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break; + case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break; + case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break; + case NEON_NEG_scalar: mnemonic = "neg"; break; + case NEON_SQNEG_scalar: mnemonic = "sqneg"; break; + case NEON_ABS_scalar: mnemonic = "abs"; break; + case NEON_SQABS_scalar: mnemonic = "sqabs"; break; + case NEON_SUQADD_scalar: mnemonic = "suqadd"; break; + case NEON_USQADD_scalar: mnemonic = "usqadd"; break; + default: form = "(NEONScalar2RegMisc)"; + } + } else { + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRSQRTE_scalar: mnemonic = "frsqrte"; break; + case NEON_FRECPE_scalar: mnemonic = "frecpe"; break; + case NEON_SCVTF_scalar: mnemonic = "scvtf"; break; + case NEON_UCVTF_scalar: mnemonic = "ucvtf"; break; + case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break; + case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break; + case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break; + case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break; + case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break; + case NEON_FRECPX_scalar: mnemonic = "frecpx"; break; + case NEON_FCVTNS_scalar: mnemonic = "fcvtns"; break; + case NEON_FCVTNU_scalar: mnemonic = "fcvtnu"; break; + case NEON_FCVTPS_scalar: mnemonic = "fcvtps"; break; + case NEON_FCVTPU_scalar: mnemonic = "fcvtpu"; break; + case NEON_FCVTMS_scalar: mnemonic = "fcvtms"; break; + case NEON_FCVTMU_scalar: mnemonic = "fcvtmu"; break; + case NEON_FCVTZS_scalar: mnemonic = "fcvtzs"; break; + case NEON_FCVTZU_scalar: mnemonic = "fcvtzu"; break; + case NEON_FCVTAS_scalar: mnemonic = "fcvtas"; break; + case NEON_FCVTAU_scalar: mnemonic = "fcvtau"; break; + case NEON_FCVTXN_scalar: + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + mnemonic = "fcvtxn"; + break; + default: + nfd.SetFormatMap(0, nfd.ScalarFormatMap()); + nfd.SetFormatMap(1, nfd.LongScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: mnemonic = "sqxtn"; break; + case NEON_UQXTN_scalar: mnemonic = "uqxtn"; break; + case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break; + default: form = "(NEONScalar2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar : mnemonic = "sqdmlal"; break; + case NEON_SQDMLSL_scalar : mnemonic = "sqdmlsl"; break; + case NEON_SQDMULL_scalar : mnemonic = "sqdmull"; break; + default: form = "(NEONScalar3Diff)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalar3Same(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FACGE_scalar: mnemonic = "facge"; break; + case NEON_FACGT_scalar: mnemonic = "facgt"; break; + case NEON_FCMEQ_scalar: mnemonic = "fcmeq"; break; + case NEON_FCMGE_scalar: mnemonic = "fcmge"; break; + case NEON_FCMGT_scalar: mnemonic = "fcmgt"; break; + case NEON_FMULX_scalar: mnemonic = "fmulx"; break; + case NEON_FRECPS_scalar: mnemonic = "frecps"; break; + case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break; + case NEON_FABD_scalar: mnemonic = "fabd"; break; + default: form = "(NEONScalar3Same)"; + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: mnemonic = "add"; break; + case NEON_SUB_scalar: mnemonic = "sub"; break; + case NEON_CMEQ_scalar: mnemonic = "cmeq"; break; + case NEON_CMGE_scalar: mnemonic = "cmge"; break; + case NEON_CMGT_scalar: mnemonic = "cmgt"; break; + case NEON_CMHI_scalar: mnemonic = "cmhi"; break; + case NEON_CMHS_scalar: mnemonic = "cmhs"; break; + case NEON_CMTST_scalar: mnemonic = "cmtst"; break; + case NEON_UQADD_scalar: mnemonic = "uqadd"; break; + case NEON_SQADD_scalar: mnemonic = "sqadd"; break; + case NEON_UQSUB_scalar: mnemonic = "uqsub"; break; + case NEON_SQSUB_scalar: mnemonic = "sqsub"; break; + case NEON_USHL_scalar: mnemonic = "ushl"; break; + case NEON_SSHL_scalar: mnemonic = "sshl"; break; + case NEON_UQSHL_scalar: mnemonic = "uqshl"; break; + case NEON_SQSHL_scalar: mnemonic = "sqshl"; break; + case NEON_URSHL_scalar: mnemonic = "urshl"; break; + case NEON_SRSHL_scalar: mnemonic = "srshl"; break; + case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break; + case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break; + case NEON_SQDMULH_scalar: mnemonic = "sqdmulh"; break; + case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break; + default: form = "(NEONScalar3Same)"; + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + bool long_instr = false; + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + mnemonic = "sqdmull"; + long_instr = true; + break; + case NEON_SQDMLAL_byelement_scalar: + mnemonic = "sqdmlal"; + long_instr = true; + break; + case NEON_SQDMLSL_byelement_scalar: + mnemonic = "sqdmlsl"; + long_instr = true; + break; + case NEON_SQDMULH_byelement_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement_scalar: + mnemonic = "sqrdmulh"; + break; + default: + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break; + case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break; + case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break; + case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break; + default: form = "(NEONScalarByIndexedElement)"; + } + } + + if (long_instr) { + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + } + + Format(instr, mnemonic, nfd.Substitute( + form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarCopy(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONScalarCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + mnemonic = "mov"; + form = "%sd, 'Vn.%s['IVInsIndex1]"; + } + + Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + NEONFormatMap map = { {22}, {NF_2S, NF_2D} }; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map); + + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: mnemonic = "addp"; break; + case NEON_FADDP_scalar: mnemonic = "faddp"; break; + case NEON_FMAXP_scalar: mnemonic = "fmaxp"; break; + case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break; + case NEON_FMINP_scalar: mnemonic = "fminp"; break; + case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break; + default: form = "(NEONScalarPairwise)"; + } + Format(instr, mnemonic, nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat)); +} + + +void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Is1"; + const char *form_2 = "%sd, %sn, 'Is2"; + + static const NEONFormatMap map_shift = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, + NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D} + }; + static const NEONFormatMap map_shift_narrow = { + {21, 20, 19}, + {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D} + }; + NEONFormatDecoder nfd(instr, &map_shift); + + if (instr->ImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu"; break; + case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs"; break; + case NEON_SCVTF_imm_scalar: mnemonic = "scvtf"; break; + case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf"; break; + case NEON_SRI_scalar: mnemonic = "sri"; break; + case NEON_SSHR_scalar: mnemonic = "sshr"; break; + case NEON_USHR_scalar: mnemonic = "ushr"; break; + case NEON_SRSHR_scalar: mnemonic = "srshr"; break; + case NEON_URSHR_scalar: mnemonic = "urshr"; break; + case NEON_SSRA_scalar: mnemonic = "ssra"; break; + case NEON_USRA_scalar: mnemonic = "usra"; break; + case NEON_SRSRA_scalar: mnemonic = "srsra"; break; + case NEON_URSRA_scalar: mnemonic = "ursra"; break; + case NEON_SHL_scalar: mnemonic = "shl"; form = form_2; break; + case NEON_SLI_scalar: mnemonic = "sli"; form = form_2; break; + case NEON_SQSHLU_scalar: mnemonic = "sqshlu"; form = form_2; break; + case NEON_SQSHL_imm_scalar: mnemonic = "sqshl"; form = form_2; break; + case NEON_UQSHL_imm_scalar: mnemonic = "uqshl"; form = form_2; break; + case NEON_UQSHRN_scalar: + mnemonic = "uqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_UQRSHRN_scalar: + mnemonic = "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRN_scalar: + mnemonic = "sqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRN_scalar: + mnemonic = "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRUN_scalar: + mnemonic = "sqshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRUN_scalar: + mnemonic = "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + default: + form = "(NEONScalarShiftImmediate)"; + } + } else { + form = "(NEONScalarShiftImmediate)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Is1"; + const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2"; + const char *form_xtl = "'Vd.%s, 'Vn.%s"; + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_shift_ta = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D} + }; + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map_shift_tb = { + {22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D} + }; + + NEONFormatDecoder nfd(instr, &map_shift_tb); + + if (instr->ImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SQSHLU: mnemonic = "sqshlu"; form = form_shift_2; break; + case NEON_SQSHL_imm: mnemonic = "sqshl"; form = form_shift_2; break; + case NEON_UQSHL_imm: mnemonic = "uqshl"; form = form_shift_2; break; + case NEON_SHL: mnemonic = "shl"; form = form_shift_2; break; + case NEON_SLI: mnemonic = "sli"; form = form_shift_2; break; + case NEON_SCVTF_imm: mnemonic = "scvtf"; break; + case NEON_UCVTF_imm: mnemonic = "ucvtf"; break; + case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break; + case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break; + case NEON_SRI: mnemonic = "sri"; break; + case NEON_SSHR: mnemonic = "sshr"; break; + case NEON_USHR: mnemonic = "ushr"; break; + case NEON_SRSHR: mnemonic = "srshr"; break; + case NEON_URSHR: mnemonic = "urshr"; break; + case NEON_SSRA: mnemonic = "ssra"; break; + case NEON_USRA: mnemonic = "usra"; break; + case NEON_SRSRA: mnemonic = "srsra"; break; + case NEON_URSRA: mnemonic = "ursra"; break; + case NEON_SHRN: + mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_RSHRN: + mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SSHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->ImmNEONImmb() == 0 && + CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl"; + } else { // sshll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll"; + } + break; + case NEON_USHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->ImmNEONImmb() == 0 && + CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl"; + } else { // ushll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll"; + } + break; + default: form = "(NEONShiftImmediate)"; + } + } else { + form = "(NEONShiftImmediate)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONTable(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONTable)"; + const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s"; + const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s"; + const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + const char form_4v[] = + "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} }; + NEONFormatDecoder nfd(instr, &map_b); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break; + case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break; + case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break; + case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break; + case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break; + case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break; + case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break; + case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break; + default: break; + } + + char re_form[sizeof(form_4v) + 6]; + int reg_num = instr->Rn(); + snprintf(re_form, sizeof(re_form), form, + (reg_num + 1) % kNumberOfVRegisters, + (reg_num + 2) % kNumberOfVRegisters, + (reg_num + 3) % kNumberOfVRegisters); + + Format(instr, mnemonic, nfd.Substitute(re_form)); +} + + +void Disassembler::VisitNEONPerm(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: mnemonic = "trn1"; break; + case NEON_TRN2: mnemonic = "trn2"; break; + case NEON_UZP1: mnemonic = "uzp1"; break; + case NEON_UZP2: mnemonic = "uzp2"; break; + case NEON_ZIP1: mnemonic = "zip1"; break; + case NEON_ZIP2: mnemonic = "zip2"; break; + default: form = "(NEONPerm)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitUnimplemented(const Instruction* instr) { + Format(instr, "unimplemented", "(Unimplemented)"); +} + + +void Disassembler::VisitUnallocated(const Instruction* instr) { + Format(instr, "unallocated", "(Unallocated)"); +} + + +void Disassembler::ProcessOutput(const Instruction* /*instr*/) { + // The base disasm does nothing more than disassembling into a buffer. +} + + +void Disassembler::AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg) { + USE(instr); + VIXL_ASSERT(reg.IsValid()); + char reg_char; + + if (reg.IsRegister()) { + reg_char = reg.Is64Bits() ? 'x' : 'w'; + } else { + VIXL_ASSERT(reg.IsVRegister()); + switch (reg.SizeInBits()) { + case kBRegSize: reg_char = 'b'; break; + case kHRegSize: reg_char = 'h'; break; + case kSRegSize: reg_char = 's'; break; + case kDRegSize: reg_char = 'd'; break; + default: + VIXL_ASSERT(reg.Is128Bits()); + reg_char = 'q'; + } + } + + if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) { + // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31. + AppendToOutput("%c%d", reg_char, reg.code()); + } else if (reg.Aliases(sp)) { + // Disassemble w31/x31 as stack pointer wsp/sp. + AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp"); + } else { + // Disassemble w31/x31 as zero register wzr/xzr. + AppendToOutput("%czr", reg_char); + } +} + + +void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset) { + USE(instr); + char sign = (offset < 0) ? '-' : '+'; + AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset)); +} + + +void Disassembler::AppendAddressToOutput(const Instruction* instr, + const void* addr) { + USE(instr); + AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr)); +} + + +void Disassembler::AppendCodeAddressToOutput(const Instruction* instr, + const void* addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendDataAddressToOutput(const Instruction* instr, + const void* addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr) { + USE(instr); + int64_t rel_addr = CodeRelativeAddress(addr); + if (rel_addr >= 0) { + AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr); + } else { + AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr); + } +} + + +void Disassembler::AppendCodeRelativeCodeAddressToOutput( + const Instruction* instr, const void* addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeDataAddressToOutput( + const Instruction* instr, const void* addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::MapCodeAddress(int64_t base_address, + const Instruction* instr_address) { + set_code_address_offset( + base_address - reinterpret_cast<intptr_t>(instr_address)); +} +int64_t Disassembler::CodeRelativeAddress(const void* addr) { + return reinterpret_cast<intptr_t>(addr) + code_address_offset(); +} + + +void Disassembler::Format(const Instruction* instr, const char* mnemonic, + const char* format) { + VIXL_ASSERT(mnemonic != NULL); + ResetOutput(); + Substitute(instr, mnemonic); + if (format != NULL) { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = ' '; + Substitute(instr, format); + } + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_] = 0; + ProcessOutput(instr); +} + + +void Disassembler::Substitute(const Instruction* instr, const char* string) { + char chr = *string++; + while (chr != '\0') { + if (chr == '\'') { + string += SubstituteField(instr, string); + } else { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = chr; + } + chr = *string++; + } +} + + +int Disassembler::SubstituteField(const Instruction* instr, + const char* format) { + switch (format[0]) { + // NB. The remaining substitution prefix characters are: GJKUZ. + case 'R': // Register. X or W, selected by sf bit. + case 'F': // FP register. S or D, selected by type field. + case 'V': // Vector register, V, vector format. + case 'W': + case 'X': + case 'B': + case 'H': + case 'S': + case 'D': + case 'Q': return SubstituteRegisterField(instr, format); + case 'I': return SubstituteImmediateField(instr, format); + case 'L': return SubstituteLiteralField(instr, format); + case 'N': return SubstituteShiftField(instr, format); + case 'P': return SubstitutePrefetchField(instr, format); + case 'C': return SubstituteConditionField(instr, format); + case 'E': return SubstituteExtendField(instr, format); + case 'A': return SubstitutePCRelAddressField(instr, format); + case 'T': return SubstituteBranchTargetField(instr, format); + case 'O': return SubstituteLSRegOffsetField(instr, format); + case 'M': return SubstituteBarrierField(instr, format); + case 'K': return SubstituteCrField(instr, format); + case 'G': return SubstituteSysOpField(instr, format); + default: { + VIXL_UNREACHABLE(); + return 1; + } + } +} + + +int Disassembler::SubstituteRegisterField(const Instruction* instr, + const char* format) { + char reg_prefix = format[0]; + unsigned reg_num = 0; + unsigned field_len = 2; + + switch (format[1]) { + case 'd': + reg_num = instr->Rd(); + if (format[2] == 'q') { + reg_prefix = instr->NEONQ() ? 'X' : 'W'; + field_len = 3; + } + break; + case 'n': reg_num = instr->Rn(); break; + case 'm': + reg_num = instr->Rm(); + switch (format[2]) { + // Handle registers tagged with b (bytes), z (instruction), or + // r (registers), used for address updates in + // NEON load/store instructions. + case 'r': + case 'b': + case 'z': { + field_len = 3; + char* eimm; + int imm = static_cast<int>(strtol(&format[3], &eimm, 10)); + field_len += eimm - &format[3]; + if (reg_num == 31) { + switch (format[2]) { + case 'z': + imm *= (1 << instr->NEONLSSize()); + break; + case 'r': + imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes + : kQRegSizeInBytes; + break; + case 'b': + break; + } + AppendToOutput("#%d", imm); + return field_len; + } + break; + } + } + break; + case 'e': + // This is register Rm, but using a 4-bit specifier. Used in NEON + // by-element instructions. + reg_num = (instr->Rm() & 0xf); + break; + case 'a': reg_num = instr->Ra(); break; + case 's': reg_num = instr->Rs(); break; + case 't': + reg_num = instr->Rt(); + if (format[0] == 'V') { + if ((format[2] >= '2') && (format[2] <= '4')) { + // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4. + reg_num = (reg_num + format[2] - '1') % 32; + field_len = 3; + } + } else { + if (format[2] == '2') { + // Handle register specifier Rt2. + reg_num = instr->Rt2(); + field_len = 3; + } + } + break; + default: VIXL_UNREACHABLE(); + } + + // Increase field length for registers tagged as stack. + if (format[2] == 's') { + field_len = 3; + } + + CPURegister::RegisterType reg_type = CPURegister::kRegister; + unsigned reg_size = kXRegSize; + + if (reg_prefix == 'R') { + reg_prefix = instr->SixtyFourBits() ? 'X' : 'W'; + } else if (reg_prefix == 'F') { + reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D'; + } + + switch (reg_prefix) { + case 'W': + reg_type = CPURegister::kRegister; reg_size = kWRegSize; break; + case 'X': + reg_type = CPURegister::kRegister; reg_size = kXRegSize; break; + case 'B': + reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break; + case 'H': + reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break; + case 'S': + reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break; + case 'D': + reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break; + case 'Q': + reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break; + case 'V': + AppendToOutput("v%d", reg_num); + return field_len; + default: + VIXL_UNREACHABLE(); + } + + if ((reg_type == CPURegister::kRegister) && + (reg_num == kZeroRegCode) && (format[2] == 's')) { + reg_num = kSPRegInternalCode; + } + + AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type)); + + return field_len; +} + + +int Disassembler::SubstituteImmediateField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'I'); + + switch (format[1]) { + case 'M': { // IMoveImm, IMoveNeg or IMoveLSL. + if (format[5] == 'L') { + AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide()); + if (instr->ShiftMoveWide() > 0) { + AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide()); + } + } else { + VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N')); + uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide()) << + (16 * instr->ShiftMoveWide()); + if (format[5] == 'N') + imm = ~imm; + if (!instr->SixtyFourBits()) + imm &= UINT64_C(0xffffffff); + AppendToOutput("#0x%" PRIx64, imm); + } + return 8; + } + case 'L': { + switch (format[2]) { + case 'L': { // ILLiteral - Immediate Load Literal. + AppendToOutput("pc%+" PRId32, + instr->ImmLLiteral() << kLiteralEntrySizeLog2); + return 9; + } + case 'S': { // ILS - Immediate Load/Store. + if (instr->ImmLS() != 0) { + AppendToOutput(", #%" PRId32, instr->ImmLS()); + } + return 3; + } + case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. + if (instr->ImmLSPair() != 0) { + // format[3] is the scale value. Convert to a number. + int scale = 1 << (format[3] - '0'); + AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale); + } + return 4; + } + case 'U': { // ILU - Immediate Load/Store Unsigned. + if (instr->ImmLSUnsigned() != 0) { + int shift = instr->SizeLS(); + AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift); + } + return 3; + } + } + } + case 'C': { // ICondB - Immediate Conditional Branch. + int64_t offset = instr->ImmCondBranch() << 2; + AppendPCRelativeOffsetToOutput(instr, offset); + return 6; + } + case 'A': { // IAddSub. + VIXL_ASSERT(instr->ShiftAddSub() <= 1); + int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub()); + AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); + return 7; + } + case 'F': { // IFPSingle, IFPDouble or IFPFBits. + if (format[3] == 'F') { // IFPFbits. + AppendToOutput("#%" PRId32, 64 - instr->FPScale()); + return 8; + } else { + AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(), + format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64()); + return 9; + } + } + case 'T': { // ITri - Immediate Triangular Encoded. + AppendToOutput("#0x%" PRIx64, instr->ImmLogical()); + return 4; + } + case 'N': { // INzcv. + int nzcv = (instr->Nzcv() << Flags_offset); + AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N', + ((nzcv & ZFlag) == 0) ? 'z' : 'Z', + ((nzcv & CFlag) == 0) ? 'c' : 'C', + ((nzcv & VFlag) == 0) ? 'v' : 'V'); + return 5; + } + case 'P': { // IP - Conditional compare. + AppendToOutput("#%" PRId32, instr->ImmCondCmp()); + return 2; + } + case 'B': { // Bitfields. + return SubstituteBitfieldImmediateField(instr, format); + } + case 'E': { // IExtract. + AppendToOutput("#%" PRId32, instr->ImmS()); + return 8; + } + case 'S': { // IS - Test and branch bit. + AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) | + instr->ImmTestBranchBit40()); + return 2; + } + case 's': { // Is - Shift (immediate). + switch (format[2]) { + case '1': { // Is1 - SSHR. + int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh()); + shift -= instr->ImmNEONImmhImmb(); + AppendToOutput("#%d", shift); + return 3; + } + case '2': { // Is2 - SLI. + int shift = instr->ImmNEONImmhImmb(); + shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh()); + AppendToOutput("#%d", shift); + return 3; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'D': { // IDebug - HLT and BRK instructions. + AppendToOutput("#0x%" PRIx32, instr->ImmException()); + return 6; + } + case 'V': { // Immediate Vector. + switch (format[2]) { + case 'E': { // IVExtract. + AppendToOutput("#%" PRId32, instr->ImmNEONExt()); + return 9; + } + case 'B': { // IVByElemIndex. + int vm_index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + vm_index = (vm_index << 1) | instr->NEONM(); + } + AppendToOutput("%d", vm_index); + return strlen("IVByElemIndex"); + } + case 'I': { // INS element. + if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) { + int rd_index, rn_index; + int imm5 = instr->ImmNEON5(); + int imm4 = instr->ImmNEON4(); + int tz = CountTrailingZeros(imm5, 32); + rd_index = imm5 >> (tz + 1); + rn_index = imm4 >> tz; + if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) { + AppendToOutput("%d", rd_index); + return strlen("IVInsIndex1"); + } else if (strncmp(format, "IVInsIndex2", + strlen("IVInsIndex2")) == 0) { + AppendToOutput("%d", rn_index); + return strlen("IVInsIndex2"); + } else { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + VIXL_FALLTHROUGH(); + } + case 'L': { // IVLSLane[0123] - suffix indicates access size shift. + AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0')); + return 9; + } + case 'M': { // Modified Immediate cases. + if (strncmp(format, + "IVMIImmFPSingle", + strlen("IVMIImmFPSingle")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), + instr->ImmNEONFP32()); + return strlen("IVMIImmFPSingle"); + } else if (strncmp(format, + "IVMIImmFPDouble", + strlen("IVMIImmFPDouble")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), + instr->ImmNEONFP64()); + return strlen("IVMIImmFPDouble"); + } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) { + uint64_t imm8 = instr->ImmNEONabcdefgh(); + AppendToOutput("#0x%" PRIx64, imm8); + return strlen("IVMIImm8"); + } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) { + uint64_t imm8 = instr->ImmNEONabcdefgh(); + uint64_t imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + AppendToOutput("#0x%" PRIx64, imm); + return strlen("IVMIImm"); + } else if (strncmp(format, "IVMIShiftAmt1", + strlen("IVMIShiftAmt1")) == 0) { + int cmode = instr->NEONCmode(); + int shift_amount = 8 * ((cmode >> 1) & 3); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt1"); + } else if (strncmp(format, "IVMIShiftAmt2", + strlen("IVMIShiftAmt2")) == 0) { + int cmode = instr->NEONCmode(); + int shift_amount = 8 << (cmode & 1); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt2"); + } else { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'X': { // IX - CLREX instruction. + AppendToOutput("#0x%" PRIx32, instr->CRm()); + return 2; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } +} + + +int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr, + const char* format) { + VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B')); + unsigned r = instr->ImmR(); + unsigned s = instr->ImmS(); + + switch (format[2]) { + case 'r': { // IBr. + AppendToOutput("#%d", r); + return 3; + } + case 's': { // IBs+1 or IBs-r+1. + if (format[3] == '+') { + AppendToOutput("#%d", s + 1); + return 5; + } else { + VIXL_ASSERT(format[3] == '-'); + AppendToOutput("#%d", s - r + 1); + return 7; + } + } + case 'Z': { // IBZ-r. + VIXL_ASSERT((format[3] == '-') && (format[4] == 'r')); + unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize; + AppendToOutput("#%d", reg_size - r); + return 5; + } + default: { + VIXL_UNREACHABLE(); + return 0; + } + } +} + + +int Disassembler::SubstituteLiteralField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); + USE(format); + + const void * address = instr->LiteralAddress<const void *>(); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + case LDR_x_lit: + case LDRSW_x_lit: + case LDR_s_lit: + case LDR_d_lit: + case LDR_q_lit: + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case PRFM_lit: { + // Use the prefetch hint to decide how to print the address. + switch (instr->PrefetchHint()) { + case 0x0: // PLD: prefetch for load. + case 0x2: // PST: prepare for store. + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case 0x1: // PLI: preload instructions. + AppendCodeRelativeCodeAddressToOutput(instr, address); + break; + case 0x3: // Unallocated hint. + AppendCodeRelativeAddressToOutput(instr, address); + break; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + return 6; +} + + +int Disassembler::SubstituteShiftField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'N'); + VIXL_ASSERT(instr->ShiftDP() <= 0x3); + + switch (format[1]) { + case 'D': { // HDP. + VIXL_ASSERT(instr->ShiftDP() != ROR); + VIXL_FALLTHROUGH(); + } + case 'L': { // HLo. + if (instr->ImmDPShift() != 0) { + const char* shift_type[] = {"lsl", "lsr", "asr", "ror"}; + AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()], + instr->ImmDPShift()); + } + return 3; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } +} + + +int Disassembler::SubstituteConditionField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'C'); + const char* condition_code[] = { "eq", "ne", "hs", "lo", + "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", + "gt", "le", "al", "nv" }; + int cond; + switch (format[1]) { + case 'B': cond = instr->ConditionBranch(); break; + case 'I': { + cond = InvertCondition(static_cast<Condition>(instr->Condition())); + break; + } + default: cond = instr->Condition(); + } + AppendToOutput("%s", condition_code[cond]); + return 4; +} + + +int Disassembler::SubstitutePCRelAddressField(const Instruction* instr, + const char* format) { + VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. + (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. + + int64_t offset = instr->ImmPCRel(); + + // Compute the target address based on the effective address (after applying + // code_address_offset). This is required for correct behaviour of adrp. + const Instruction* base = instr + code_address_offset(); + if (format[9] == 'P') { + offset *= kPageSize; + base = AlignDown(base, kPageSize); + } + // Strip code_address_offset before printing, so we can use the + // semantically-correct AppendCodeRelativeAddressToOutput. + const void* target = + reinterpret_cast<const void*>(base + offset - code_address_offset()); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeAddressToOutput(instr, target); + return 13; +} + + +int Disassembler::SubstituteBranchTargetField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "TImm", 4) == 0); + + int64_t offset = 0; + switch (format[5]) { + // BImmUncn - unconditional branch immediate. + case 'n': offset = instr->ImmUncondBranch(); break; + // BImmCond - conditional branch immediate. + case 'o': offset = instr->ImmCondBranch(); break; + // BImmCmpa - compare and branch immediate. + case 'm': offset = instr->ImmCmpBranch(); break; + // BImmTest - test and branch immediate. + case 'e': offset = instr->ImmTestBranch(); break; + default: VIXL_UNIMPLEMENTED(); + } + offset <<= kInstructionSizeLog2; + const void* target_address = reinterpret_cast<const void*>(instr + offset); + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeCodeAddressToOutput(instr, target_address); + + return 8; +} + + +int Disassembler::SubstituteExtendField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "Ext", 3) == 0); + VIXL_ASSERT(instr->ExtendMode() <= 7); + USE(format); + + const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx", + "sxtb", "sxth", "sxtw", "sxtx" }; + + // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit + // registers becomes lsl. + if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) && + (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) || + (instr->ExtendMode() == UXTX))) { + if (instr->ImmExtendShift() > 0) { + AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift()); + } + } else { + AppendToOutput(", %s", extend_mode[instr->ExtendMode()]); + if (instr->ImmExtendShift() > 0) { + AppendToOutput(" #%" PRId32, instr->ImmExtendShift()); + } + } + return 3; +} + + +int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0); + const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl", + "undefined", "undefined", "sxtw", "sxtx" }; + USE(format); + + unsigned shift = instr->ImmShiftLS(); + Extend ext = static_cast<Extend>(instr->ExtendMode()); + char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x'; + + unsigned rm = instr->Rm(); + if (rm == kZeroRegCode) { + AppendToOutput("%czr", reg_type); + } else { + AppendToOutput("%c%d", reg_type, rm); + } + + // Extend mode UXTX is an alias for shift mode LSL here. + if (!((ext == UXTX) && (shift == 0))) { + AppendToOutput(", %s", extend_mode[ext]); + if (shift != 0) { + AppendToOutput(" #%d", instr->SizeLS()); + } + } + return 9; +} + + +int Disassembler::SubstitutePrefetchField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'P'); + USE(format); + + static const char* hints[] = {"ld", "li", "st"}; + static const char* stream_options[] = {"keep", "strm"}; + + unsigned hint = instr->PrefetchHint(); + unsigned target = instr->PrefetchTarget() + 1; + unsigned stream = instr->PrefetchStream(); + + if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) { + // Unallocated prefetch operations. + int prefetch_mode = instr->ImmPrefetchOperation(); + AppendToOutput("#0b%c%c%c%c%c", + (prefetch_mode & (1 << 4)) ? '1' : '0', + (prefetch_mode & (1 << 3)) ? '1' : '0', + (prefetch_mode & (1 << 2)) ? '1' : '0', + (prefetch_mode & (1 << 1)) ? '1' : '0', + (prefetch_mode & (1 << 0)) ? '1' : '0'); + } else { + VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0]))); + AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]); + } + return 6; +} + +int Disassembler::SubstituteBarrierField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'M'); + USE(format); + + static const char* options[4][4] = { + { "sy (0b0000)", "oshld", "oshst", "osh" }, + { "sy (0b0100)", "nshld", "nshst", "nsh" }, + { "sy (0b1000)", "ishld", "ishst", "ish" }, + { "sy (0b1100)", "ld", "st", "sy" } + }; + int domain = instr->ImmBarrierDomain(); + int type = instr->ImmBarrierType(); + + AppendToOutput("%s", options[domain][type]); + return 1; +} + +int Disassembler::SubstituteSysOpField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'G'); + int op = -1; + switch (format[1]) { + case '1': op = instr->SysOp1(); break; + case '2': op = instr->SysOp2(); break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("#%d", op); + return 2; +} + +int Disassembler::SubstituteCrField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'K'); + int cr = -1; + switch (format[1]) { + case 'n': cr = instr->CRn(); break; + case 'm': cr = instr->CRm(); break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("C%d", cr); + return 2; +} + +void Disassembler::ResetOutput() { + buffer_pos_ = 0; + buffer_[buffer_pos_] = 0; +} + + +void Disassembler::AppendToOutput(const char* format, ...) { + va_list args; + va_start(args, format); + buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_, + format, args); + va_end(args); +} + + +void PrintDisassembler::ProcessOutput(const Instruction* instr) { + fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n", + reinterpret_cast<uint64_t>(instr), + instr->InstructionBits(), + GetOutput()); +} + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.h b/js/src/jit/arm64/vixl/Disasm-vixl.h new file mode 100644 index 000000000..1f2cd81fe --- /dev/null +++ b/js/src/jit/arm64/vixl/Disasm-vixl.h @@ -0,0 +1,177 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_DISASM_A64_H +#define VIXL_A64_DISASM_A64_H + +#include "jit/arm64/vixl/Assembler-vixl.h" +#include "jit/arm64/vixl/Decoder-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { + +class Disassembler: public DecoderVisitor { + public: + Disassembler(); + Disassembler(char* text_buffer, int buffer_size); + virtual ~Disassembler(); + char* GetOutput(); + + // Declare all Visitor functions. + #define DECLARE(A) virtual void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) + #undef DECLARE + + protected: + virtual void ProcessOutput(const Instruction* instr); + + // Default output functions. The functions below implement a default way of + // printing elements in the disassembly. A sub-class can override these to + // customize the disassembly output. + + // Prints the name of a register. + // TODO: This currently doesn't allow renaming of V registers. + virtual void AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg); + + // Prints a PC-relative offset. This is used for example when disassembling + // branches to immediate offsets. + virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset); + + // Prints an address, in the general case. It can be code or data. This is + // used for example to print the target address of an ADR instruction. + virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some code. + // This is used for example to print the target address of a branch to an + // immediate offset. + // A sub-class can for example override this method to lookup the address and + // print an appropriate name. + virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some data. + // This is used for example to print the source address of a load literal + // instruction. + virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, + const void* addr); + + // Same as the above, but for addresses that are not relative to the code + // buffer. They are currently not used by VIXL. + virtual void AppendAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendCodeAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendDataAddressToOutput(const Instruction* instr, + const void* addr); + + public: + // Get/Set the offset that should be added to code addresses when printing + // code-relative addresses in the AppendCodeRelative<Type>AddressToOutput() + // helpers. + // Below is an example of how a branch immediate instruction in memory at + // address 0xb010200 would disassemble with different offsets. + // Base address | Disassembly + // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc) + // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc) + // 0xb010200 | 0x0: b #+0xcc (addr 0xcc) + void MapCodeAddress(int64_t base_address, const Instruction* instr_address); + int64_t CodeRelativeAddress(const void* instr); + + private: + void Format( + const Instruction* instr, const char* mnemonic, const char* format); + void Substitute(const Instruction* instr, const char* string); + int SubstituteField(const Instruction* instr, const char* format); + int SubstituteRegisterField(const Instruction* instr, const char* format); + int SubstituteImmediateField(const Instruction* instr, const char* format); + int SubstituteLiteralField(const Instruction* instr, const char* format); + int SubstituteBitfieldImmediateField( + const Instruction* instr, const char* format); + int SubstituteShiftField(const Instruction* instr, const char* format); + int SubstituteExtendField(const Instruction* instr, const char* format); + int SubstituteConditionField(const Instruction* instr, const char* format); + int SubstitutePCRelAddressField(const Instruction* instr, const char* format); + int SubstituteBranchTargetField(const Instruction* instr, const char* format); + int SubstituteLSRegOffsetField(const Instruction* instr, const char* format); + int SubstitutePrefetchField(const Instruction* instr, const char* format); + int SubstituteBarrierField(const Instruction* instr, const char* format); + int SubstituteSysOpField(const Instruction* instr, const char* format); + int SubstituteCrField(const Instruction* instr, const char* format); + bool RdIsZROrSP(const Instruction* instr) const { + return (instr->Rd() == kZeroRegCode); + } + + bool RnIsZROrSP(const Instruction* instr) const { + return (instr->Rn() == kZeroRegCode); + } + + bool RmIsZROrSP(const Instruction* instr) const { + return (instr->Rm() == kZeroRegCode); + } + + bool RaIsZROrSP(const Instruction* instr) const { + return (instr->Ra() == kZeroRegCode); + } + + bool IsMovzMovnImm(unsigned reg_size, uint64_t value); + + int64_t code_address_offset() const { return code_address_offset_; } + + protected: + void ResetOutput(); + void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); + + void set_code_address_offset(int64_t code_address_offset) { + code_address_offset_ = code_address_offset; + } + + char* buffer_; + uint32_t buffer_pos_; + uint32_t buffer_size_; + bool own_buffer_; + + int64_t code_address_offset_; +}; + + +class PrintDisassembler: public Disassembler { + public: + explicit PrintDisassembler(FILE* stream) : stream_(stream) { } + + protected: + virtual void ProcessOutput(const Instruction* instr); + + private: + FILE *stream_; +}; +} // namespace vixl + +#endif // VIXL_A64_DISASM_A64_H diff --git a/js/src/jit/arm64/vixl/Globals-vixl.h b/js/src/jit/arm64/vixl/Globals-vixl.h new file mode 100644 index 000000000..8a7418eb8 --- /dev/null +++ b/js/src/jit/arm64/vixl/Globals-vixl.h @@ -0,0 +1,122 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_GLOBALS_H +#define VIXL_GLOBALS_H + +// Get standard C99 macros for integer types. +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include "mozilla/Assertions.h" + +#include <inttypes.h> +#include <stdarg.h> +#include <stddef.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> + +#include "js-config.h" + +#include "jit/arm64/vixl/Platform-vixl.h" +#include "js/Utility.h" + + +typedef uint8_t byte; + +// Type for half-precision (16 bit) floating point numbers. +typedef uint16_t float16; + +const int KBytes = 1024; +const int MBytes = 1024 * KBytes; + +#define VIXL_ABORT() \ + do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false) +#ifdef DEBUG + #define VIXL_ASSERT(condition) MOZ_ASSERT(condition) + #define VIXL_CHECK(condition) VIXL_ASSERT(condition) + #define VIXL_UNIMPLEMENTED() \ + do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false) + #define VIXL_UNREACHABLE() \ + do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false) +#else + #define VIXL_ASSERT(condition) ((void) 0) + #define VIXL_CHECK(condition) ((void) 0) + #define VIXL_UNIMPLEMENTED() ((void) 0) + #define VIXL_UNREACHABLE() ((void) 0) +#endif +// This is not as powerful as template based assertions, but it is simple. +// It assumes that the descriptions are unique. If this starts being a problem, +// we can switch to a different implemention. +#define VIXL_CONCAT(a, b) a##b +#define VIXL_STATIC_ASSERT_LINE(line, condition) \ + typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \ + __attribute__((unused)) +#define VIXL_STATIC_ASSERT(condition) \ + VIXL_STATIC_ASSERT_LINE(__LINE__, condition) + +template <typename T1> +inline void USE(T1) {} + +template <typename T1, typename T2> +inline void USE(T1, T2) {} + +template <typename T1, typename T2, typename T3> +inline void USE(T1, T2, T3) {} + +template <typename T1, typename T2, typename T3, typename T4> +inline void USE(T1, T2, T3, T4) {} + +#define VIXL_ALIGNMENT_EXCEPTION() \ + do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0) + +// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough +// argument to annotate intentional fall-through between switch labels. +// For more information please refer to: +// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +#ifndef __has_warning + #define __has_warning(x) 0 +#endif + +// Note: This option is only available for Clang. And will only be enabled for +// C++11(201103L). +#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L + #define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT +#else + #define VIXL_FALLTHROUGH() do {} while (0) +#endif + +#endif // VIXL_GLOBALS_H diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.cpp b/js/src/jit/arm64/vixl/Instructions-vixl.cpp new file mode 100644 index 000000000..25b8e307d --- /dev/null +++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp @@ -0,0 +1,670 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Instructions-vixl.h" + +#include "jit/arm64/vixl/Assembler-vixl.h" + +namespace vixl { + + +// Floating-point infinity values. +const float16 kFP16PositiveInfinity = 0x7c00; +const float16 kFP16NegativeInfinity = 0xfc00; +const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000); +const float kFP32NegativeInfinity = rawbits_to_float(0xff800000); +const double kFP64PositiveInfinity = + rawbits_to_double(UINT64_C(0x7ff0000000000000)); +const double kFP64NegativeInfinity = + rawbits_to_double(UINT64_C(0xfff0000000000000)); + + +// The default NaN values (for FPCR.DN=1). +const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000)); +const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000); +const float16 kFP16DefaultNaN = 0x7e00; + + +static uint64_t RotateRight(uint64_t value, + unsigned int rotate, + unsigned int width) { + VIXL_ASSERT(width <= 64); + rotate &= 63; + return ((value & ((UINT64_C(1) << rotate) - 1)) << + (width - rotate)) | (value >> rotate); +} + + +static uint64_t RepeatBitsAcrossReg(unsigned reg_size, + uint64_t value, + unsigned width) { + VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || + (width == 32)); + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + uint64_t result = value & ((UINT64_C(1) << width) - 1); + for (unsigned i = width; i < reg_size; i *= 2) { + result |= (result << i); + } + return result; +} + + +bool Instruction::IsLoad() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) != 0; + } else { + LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_w: + case LDRSB_x: + case LDRSH_w: + case LDRSH_x: + case LDRSW_x: + case LDR_b: + case LDR_h: + case LDR_s: + case LDR_d: + case LDR_q: return true; + default: return false; + } + } +} + + +bool Instruction::IsStore() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) == 0; + } else { + LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask)); + switch (op) { + case STRB_w: + case STRH_w: + case STR_w: + case STR_x: + case STR_b: + case STR_h: + case STR_s: + case STR_d: + case STR_q: return true; + default: return false; + } + } +} + + +// Logical immediates can't encode zero, so a return value of zero is used to +// indicate a failure case. Specifically, where the constraints on imm_s are +// not met. +uint64_t Instruction::ImmLogical() const { + unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; + int32_t n = BitN(); + int32_t imm_s = ImmSetBits(); + int32_t imm_r = ImmRotate(); + + // An integer is constructed from the n, imm_s and imm_r bits according to + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 + // bits are set. The pattern is rotated right by R, and repeated across a + // 32 or 64-bit value, depending on destination register width. + // + + if (n == 1) { + if (imm_s == 0x3f) { + return 0; + } + uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; + return RotateRight(bits, imm_r, 64); + } else { + if ((imm_s >> 1) == 0x1f) { + return 0; + } + for (int width = 0x20; width >= 0x2; width >>= 1) { + if ((imm_s & width) == 0) { + int mask = width - 1; + if ((imm_s & mask) == mask) { + return 0; + } + uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; + return RepeatBitsAcrossReg(reg_size, + RotateRight(bits, imm_r & mask, width), + width); + } + } + } + VIXL_UNREACHABLE(); + return 0; +} + + +uint32_t Instruction::ImmNEONabcdefgh() const { + return ImmNEONabc() << 5 | ImmNEONdefgh(); +} + + +float Instruction::Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return rawbits_to_float(result); +} + + +float Instruction::ImmFP32() const { + return Imm8ToFP32(ImmFP()); +} + + +double Instruction::Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + + return rawbits_to_double(result); +} + + +double Instruction::ImmFP64() const { + return Imm8ToFP64(ImmFP()); +} + + +float Instruction::ImmNEONFP32() const { + return Imm8ToFP32(ImmNEONabcdefgh()); +} + + +double Instruction::ImmNEONFP64() const { + return Imm8ToFP64(ImmNEONabcdefgh()); +} + + +unsigned CalcLSDataSize(LoadStoreOp op) { + VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); + unsigned size = static_cast<Instr>(op) >> LSSize_offset; + if ((op & LSVector_mask) != 0) { + // Vector register memory operations encode the access size in the "size" + // and "opc" fields. + if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { + size = kQRegSizeInBytesLog2; + } + } + return size; +} + + +unsigned CalcLSPairDataSize(LoadStorePairOp op) { + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + switch (op) { + case STP_q: + case LDP_q: return kQRegSizeInBytesLog2; + case STP_x: + case LDP_x: + case STP_d: + case LDP_d: return kXRegSizeInBytesLog2; + default: return kWRegSizeInBytesLog2; + } +} + + +int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) { + switch (branch_type) { + case UncondBranchType: + return ImmUncondBranch_width; + case CondBranchType: + return ImmCondBranch_width; + case CompareBranchType: + return ImmCmpBranch_width; + case TestBranchType: + return ImmTestBranch_width; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) { + int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1); + return encoded_max * kInstructionSize; +} + + +bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, + int64_t offset) { + return is_intn(ImmBranchRangeBitwidth(branch_type), offset); +} + +ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type) +{ + switch (branch_type) { + case UncondBranchType: + return UncondBranchRangeType; + case CondBranchType: + case CompareBranchType: + return CondBranchRangeType; + case TestBranchType: + return TestBranchRangeType; + default: + return UnknownBranchRangeType; + } +} + +int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type) +{ + // Branches encode a pc-relative two's complement number of 32-bit + // instructions. Compute the number of bytes corresponding to the largest + // positive number of instructions that can be encoded. + switch(range_type) { + case TestBranchRangeType: + return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize; + case CondBranchRangeType: + return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize; + case UncondBranchRangeType: + return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + +int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type) +{ + switch(range_type) { + case TestBranchRangeType: + return -int32_t(1 << ImmTestBranch_width) / 2 * kInstructionSize; + case CondBranchRangeType: + return -int32_t(1 << ImmCondBranch_width) / 2 * kInstructionSize; + case UncondBranchRangeType: + return -int32_t(1 << ImmUncondBranch_width) / 2 * kInstructionSize; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + +const Instruction* Instruction::ImmPCOffsetTarget() const { + const Instruction * base = this; + ptrdiff_t offset; + if (IsPCRelAddressing()) { + // ADR and ADRP. + offset = ImmPCRel(); + if (Mask(PCRelAddressingMask) == ADRP) { + base = AlignDown(base, kPageSize); + offset *= kPageSize; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); + } + } else { + // All PC-relative branches. + VIXL_ASSERT(BranchType() != UnknownBranchType); + // Relative branch offsets are instruction-size-aligned. + offset = ImmBranch() << kInstructionSizeLog2; + } + return base + offset; +} + + +int Instruction::ImmBranch() const { + switch (BranchType()) { + case CondBranchType: return ImmCondBranch(); + case UncondBranchType: return ImmUncondBranch(); + case CompareBranchType: return ImmCmpBranch(); + case TestBranchType: return ImmTestBranch(); + default: VIXL_UNREACHABLE(); + } + return 0; +} + + +void Instruction::SetImmPCOffsetTarget(const Instruction* target) { + if (IsPCRelAddressing()) { + SetPCRelImmTarget(target); + } else { + SetBranchImmTarget(target); + } +} + + +void Instruction::SetPCRelImmTarget(const Instruction* target) { + ptrdiff_t imm21; + if ((Mask(PCRelAddressingMask) == ADR)) { + imm21 = target - this; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); + uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize; + uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize; + imm21 = target_page - this_page; + } + Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21)); + + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); +} + + +void Instruction::SetBranchImmTarget(const Instruction* target) { + VIXL_ASSERT(((target - this) & 3) == 0); + Instr branch_imm = 0; + uint32_t imm_mask = 0; + int offset = static_cast<int>((target - this) >> kInstructionSizeLog2); + switch (BranchType()) { + case CondBranchType: { + branch_imm = Assembler::ImmCondBranch(offset); + imm_mask = ImmCondBranch_mask; + break; + } + case UncondBranchType: { + branch_imm = Assembler::ImmUncondBranch(offset); + imm_mask = ImmUncondBranch_mask; + break; + } + case CompareBranchType: { + branch_imm = Assembler::ImmCmpBranch(offset); + imm_mask = ImmCmpBranch_mask; + break; + } + case TestBranchType: { + branch_imm = Assembler::ImmTestBranch(offset); + imm_mask = ImmTestBranch_mask; + break; + } + default: VIXL_UNREACHABLE(); + } + SetInstructionBits(Mask(~imm_mask) | branch_imm); +} + + +void Instruction::SetImmLLiteral(const Instruction* source) { + VIXL_ASSERT(IsWordAligned(source)); + ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2; + Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset)); + Instr mask = ImmLLiteral_mask; + + SetInstructionBits(Mask(~mask) | imm); +} + + +VectorFormat VectorFormatHalfWidth(const VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D || + vform == kFormatH || vform == kFormatS || vform == kFormatD); + switch (vform) { + case kFormat8H: return kFormat8B; + case kFormat4S: return kFormat4H; + case kFormat2D: return kFormat2S; + case kFormatH: return kFormatB; + case kFormatS: return kFormatH; + case kFormatD: return kFormatS; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + + +VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S || + vform == kFormatB || vform == kFormatH || vform == kFormatS); + switch (vform) { + case kFormat8B: return kFormat8H; + case kFormat4H: return kFormat4S; + case kFormat2S: return kFormat2D; + case kFormatB: return kFormatH; + case kFormatH: return kFormatS; + case kFormatS: return kFormatD; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + + +VectorFormat VectorFormatFillQ(const VectorFormat vform) { + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: return kFormat16B; + case kFormatH: + case kFormat4H: + case kFormat8H: return kFormat8H; + case kFormatS: + case kFormat2S: + case kFormat4S: return kFormat4S; + case kFormatD: + case kFormat1D: + case kFormat2D: return kFormat2D; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + +VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) { + switch (vform) { + case kFormat4H: return kFormat8B; + case kFormat8H: return kFormat16B; + case kFormat2S: return kFormat4H; + case kFormat4S: return kFormat8H; + case kFormat1D: return kFormat2S; + case kFormat2D: return kFormat4S; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + +VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S); + switch (vform) { + case kFormat8B: return kFormat16B; + case kFormat4H: return kFormat8H; + case kFormat2S: return kFormat4S; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + + +VectorFormat VectorFormatHalfLanes(const VectorFormat vform) { + VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S); + switch (vform) { + case kFormat16B: return kFormat8B; + case kFormat8H: return kFormat4H; + case kFormat4S: return kFormat2S; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + + +VectorFormat ScalarFormatFromLaneSize(int laneSize) { + switch (laneSize) { + case 8: return kFormatB; + case 16: return kFormatH; + case 32: return kFormatS; + case 64: return kFormatD; + default: VIXL_UNREACHABLE(); return kFormatUndefined; + } +} + + +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: return kBRegSize; + case kFormatH: return kHRegSize; + case kFormatS: return kSRegSize; + case kFormatD: return kDRegSize; + case kFormat8B: + case kFormat4H: + case kFormat2S: + case kFormat1D: return kDRegSize; + default: return kQRegSize; + } +} + + +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) { + return RegisterSizeInBitsFromFormat(vform) / 8; +} + + +unsigned LaneSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: return 8; + case kFormatH: + case kFormat4H: + case kFormat8H: return 16; + case kFormatS: + case kFormat2S: + case kFormat4S: return 32; + case kFormatD: + case kFormat1D: + case kFormat2D: return 64; + default: VIXL_UNREACHABLE(); return 0; + } +} + + +int LaneSizeInBytesFromFormat(VectorFormat vform) { + return LaneSizeInBitsFromFormat(vform) / 8; +} + + +int LaneSizeInBytesLog2FromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: return 0; + case kFormatH: + case kFormat4H: + case kFormat8H: return 1; + case kFormatS: + case kFormat2S: + case kFormat4S: return 2; + case kFormatD: + case kFormat1D: + case kFormat2D: return 3; + default: VIXL_UNREACHABLE(); return 0; + } +} + + +int LaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormat16B: return 16; + case kFormat8B: + case kFormat8H: return 8; + case kFormat4H: + case kFormat4S: return 4; + case kFormat2S: + case kFormat2D: return 2; + case kFormat1D: + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: return 1; + default: VIXL_UNREACHABLE(); return 0; + } +} + + +int MaxLaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: return 16; + case kFormatH: + case kFormat4H: + case kFormat8H: return 8; + case kFormatS: + case kFormat2S: + case kFormat4S: return 4; + case kFormatD: + case kFormat1D: + case kFormat2D: return 2; + default: VIXL_UNREACHABLE(); return 0; + } +} + + +// Does 'vform' indicate a vector format or a scalar format? +bool IsVectorFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: return false; + default: return true; + } +} + + +int64_t MaxIntFromFormat(VectorFormat vform) { + return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +int64_t MinIntFromFormat(VectorFormat vform) { + return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +uint64_t MaxUintFromFormat(VectorFormat vform) { + return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} +} // namespace vixl + diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.h b/js/src/jit/arm64/vixl/Instructions-vixl.h new file mode 100644 index 000000000..d55b6d75d --- /dev/null +++ b/js/src/jit/arm64/vixl/Instructions-vixl.h @@ -0,0 +1,830 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_INSTRUCTIONS_A64_H_ +#define VIXL_A64_INSTRUCTIONS_A64_H_ + +#include "jit/arm64/vixl/Constants-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { +// ISA constants. -------------------------------------------------------------- + +typedef uint32_t Instr; +const unsigned kInstructionSize = 4; +const unsigned kInstructionSizeLog2 = 2; +const unsigned kLiteralEntrySize = 4; +const unsigned kLiteralEntrySizeLog2 = 2; +const unsigned kMaxLoadLiteralRange = 1 * MBytes; + +// This is the nominal page size (as used by the adrp instruction); the actual +// size of the memory pages allocated by the kernel is likely to differ. +const unsigned kPageSize = 4 * KBytes; +const unsigned kPageSizeLog2 = 12; + +const unsigned kBRegSize = 8; +const unsigned kBRegSizeLog2 = 3; +const unsigned kBRegSizeInBytes = kBRegSize / 8; +const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3; +const unsigned kHRegSize = 16; +const unsigned kHRegSizeLog2 = 4; +const unsigned kHRegSizeInBytes = kHRegSize / 8; +const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3; +const unsigned kWRegSize = 32; +const unsigned kWRegSizeLog2 = 5; +const unsigned kWRegSizeInBytes = kWRegSize / 8; +const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3; +const unsigned kXRegSize = 64; +const unsigned kXRegSizeLog2 = 6; +const unsigned kXRegSizeInBytes = kXRegSize / 8; +const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3; +const unsigned kSRegSize = 32; +const unsigned kSRegSizeLog2 = 5; +const unsigned kSRegSizeInBytes = kSRegSize / 8; +const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3; +const unsigned kDRegSize = 64; +const unsigned kDRegSizeLog2 = 6; +const unsigned kDRegSizeInBytes = kDRegSize / 8; +const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3; +const unsigned kQRegSize = 128; +const unsigned kQRegSizeLog2 = 7; +const unsigned kQRegSizeInBytes = kQRegSize / 8; +const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3; +const uint64_t kWRegMask = UINT64_C(0xffffffff); +const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSRegMask = UINT64_C(0xffffffff); +const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSSignMask = UINT64_C(0x80000000); +const uint64_t kDSignMask = UINT64_C(0x8000000000000000); +const uint64_t kWSignMask = UINT64_C(0x80000000); +const uint64_t kXSignMask = UINT64_C(0x8000000000000000); +const uint64_t kByteMask = UINT64_C(0xff); +const uint64_t kHalfWordMask = UINT64_C(0xffff); +const uint64_t kWordMask = UINT64_C(0xffffffff); +const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff); +const uint64_t kWMaxUInt = UINT64_C(0xffffffff); +const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff); +const int64_t kXMinInt = INT64_C(0x8000000000000000); +const int32_t kWMaxInt = INT32_C(0x7fffffff); +const int32_t kWMinInt = INT32_C(0x80000000); +const unsigned kLinkRegCode = 30; +const unsigned kZeroRegCode = 31; +const unsigned kSPRegInternalCode = 63; +const unsigned kRegCodeMask = 0x1f; + +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = + ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset; +VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); + +// AArch64 floating-point specifics. These match IEEE-754. +const unsigned kDoubleMantissaBits = 52; +const unsigned kDoubleExponentBits = 11; +const unsigned kFloatMantissaBits = 23; +const unsigned kFloatExponentBits = 8; +const unsigned kFloat16MantissaBits = 10; +const unsigned kFloat16ExponentBits = 5; + +// Floating-point infinity values. +extern const float16 kFP16PositiveInfinity; +extern const float16 kFP16NegativeInfinity; +extern const float kFP32PositiveInfinity; +extern const float kFP32NegativeInfinity; +extern const double kFP64PositiveInfinity; +extern const double kFP64NegativeInfinity; + +// The default NaN values (for FPCR.DN=1). +extern const float16 kFP16DefaultNaN; +extern const float kFP32DefaultNaN; +extern const double kFP64DefaultNaN; + +unsigned CalcLSDataSize(LoadStoreOp op); +unsigned CalcLSPairDataSize(LoadStorePairOp op); + +enum ImmBranchType { + UnknownBranchType = 0, + CondBranchType = 1, + UncondBranchType = 2, + CompareBranchType = 3, + TestBranchType = 4 +}; + +// The classes of immediate branch ranges, in order of increasing range. +// Note that CondBranchType and CompareBranchType have the same range. +enum ImmBranchRangeType { + TestBranchRangeType, // tbz/tbnz: imm14 = +/- 32KB. + CondBranchRangeType, // b.cond/cbz/cbnz: imm19 = +/- 1MB. + UncondBranchRangeType, // b/bl: imm26 = +/- 128MB. + UnknownBranchRangeType, + + // Number of 'short-range' branch range types. + // We don't consider unconditional branches 'short-range'. + NumShortBranchRangeTypes = UncondBranchRangeType +}; + +enum AddrMode { + Offset, + PreIndex, + PostIndex +}; + +enum FPRounding { + // The first four values are encodable directly by FPCR<RMode>. + FPTieEven = 0x0, + FPPositiveInfinity = 0x1, + FPNegativeInfinity = 0x2, + FPZero = 0x3, + + // The final rounding modes are only available when explicitly specified by + // the instruction (such as with fcvta). It cannot be set in FPCR. + FPTieAway, + FPRoundOdd +}; + +enum Reg31Mode { + Reg31IsStackPointer, + Reg31IsZeroRegister +}; + +// Instructions. --------------------------------------------------------------- + +class Instruction { + public: + Instr InstructionBits() const { + return *(reinterpret_cast<const Instr*>(this)); + } + + void SetInstructionBits(Instr new_instr) { + *(reinterpret_cast<Instr*>(this)) = new_instr; + } + + int Bit(int pos) const { + return (InstructionBits() >> pos) & 1; + } + + uint32_t Bits(int msb, int lsb) const { + return unsigned_bitextract_32(msb, lsb, InstructionBits()); + } + + int32_t SignedBits(int msb, int lsb) const { + int32_t bits = *(reinterpret_cast<const int32_t*>(this)); + return signed_bitextract_32(msb, lsb, bits); + } + + Instr Mask(uint32_t mask) const { + return InstructionBits() & mask; + } + + #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + int32_t Name() const { return Func(HighBit, LowBit); } + INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) + #undef DEFINE_GETTER + + #define DEFINE_SETTER(Name, HighBit, LowBit, Func) \ + inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); } + INSTRUCTION_FIELDS_LIST(DEFINE_SETTER) + #undef DEFINE_SETTER + + // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmPCRelLo and ImmPCRelHi. + int ImmPCRel() const { + int offset = + static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); + int width = ImmPCRelLo_width + ImmPCRelHi_width; + return signed_bitextract_32(width - 1, 0, offset); + } + + uint64_t ImmLogical() const; + unsigned ImmNEONabcdefgh() const; + float ImmFP32() const; + double ImmFP64() const; + float ImmNEONFP32() const; + double ImmNEONFP64() const; + + unsigned SizeLS() const { + return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask))); + } + + unsigned SizeLSPair() const { + return CalcLSPairDataSize( + static_cast<LoadStorePairOp>(Mask(LoadStorePairMask))); + } + + int NEONLSIndex(int access_size_shift) const { + int64_t q = NEONQ(); + int64_t s = NEONS(); + int64_t size = NEONLSSize(); + int64_t index = (q << 3) | (s << 2) | size; + return static_cast<int>(index >> access_size_shift); + } + + // Helpers. + bool IsCondBranchImm() const { + return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; + } + + bool IsUncondBranchImm() const { + return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; + } + + bool IsCompareBranch() const { + return Mask(CompareBranchFMask) == CompareBranchFixed; + } + + bool IsTestBranch() const { + return Mask(TestBranchFMask) == TestBranchFixed; + } + + bool IsImmBranch() const { + return BranchType() != UnknownBranchType; + } + + bool IsPCRelAddressing() const { + return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; + } + + bool IsLogicalImmediate() const { + return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; + } + + bool IsAddSubImmediate() const { + return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; + } + + bool IsAddSubExtended() const { + return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; + } + + bool IsLoadOrStore() const { + return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; + } + + bool IsLoad() const; + bool IsStore() const; + + bool IsLoadLiteral() const { + // This includes PRFM_lit. + return Mask(LoadLiteralFMask) == LoadLiteralFixed; + } + + bool IsMovn() const { + return (Mask(MoveWideImmediateMask) == MOVN_x) || + (Mask(MoveWideImmediateMask) == MOVN_w); + } + + // Mozilla modifications. + bool IsUncondB() const; + bool IsCondB() const; + bool IsBL() const; + bool IsBR() const; + bool IsBLR() const; + bool IsTBZ() const; + bool IsTBNZ() const; + bool IsCBZ() const; + bool IsCBNZ() const; + bool IsLDR() const; + bool IsNOP() const; + bool IsADR() const; + bool IsADRP() const; + bool IsBranchLinkImm() const; + bool IsTargetReachable(Instruction* target) const; + ptrdiff_t ImmPCRawOffset() const; + void SetImmPCRawOffset(ptrdiff_t offset); + void SetBits32(int msb, int lsb, unsigned value); + + // Is this a stack pointer synchronization instruction as inserted by + // MacroAssembler::syncStackPtr()? + bool IsStackPtrSync() const; + + static int ImmBranchRangeBitwidth(ImmBranchType branch_type); + static int32_t ImmBranchForwardRange(ImmBranchType branch_type); + + // Check if offset can be encoded as a RAW offset in a branch_type + // instruction. The offset must be encodeable directly as the immediate field + // in the instruction, it is not scaled by kInstructionSize first. + static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset); + + // Get the range type corresponding to a branch type. + static ImmBranchRangeType ImmBranchTypeToRange(ImmBranchType); + + // Get the maximum realizable forward PC offset (in bytes) for an immediate + // branch of the given range type. + // This is the largest positive multiple of kInstructionSize, offset, such + // that: + // + // IsValidImmPCOffset(xxx, offset / kInstructionSize) + // + // returns true for the same branch type. + static int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType range_type); + + // Get the minimuum realizable backward PC offset (in bytes) for an immediate + // branch of the given range type. + // This is the smallest (i.e., largest in magnitude) negative multiple of + // kInstructionSize, offset, such that: + // + // IsValidImmPCOffset(xxx, offset / kInstructionSize) + // + // returns true for the same branch type. + static int32_t ImmBranchMinBackwardOffset(ImmBranchRangeType range_type); + + // Indicate whether Rd can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rd field. + Reg31Mode RdMode() const { + // The following instructions use sp or wsp as Rd: + // Add/sub (immediate) when not setting the flags. + // Add/sub (extended) when not setting the flags. + // Logical (immediate) when not setting the flags. + // Otherwise, r31 is the zero register. + if (IsAddSubImmediate() || IsAddSubExtended()) { + if (Mask(AddSubSetFlagsBit)) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + if (IsLogicalImmediate()) { + // Of the logical (immediate) instructions, only ANDS (and its aliases) + // can set the flags. The others can all write into sp. + // Note that some logical operations are not available to + // immediate-operand instructions, so we have to combine two masks here. + if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + return Reg31IsZeroRegister; + } + + // Indicate whether Rn can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rn field. + Reg31Mode RnMode() const { + // The following instructions use sp or wsp as Rn: + // All loads and stores. + // Add/sub (immediate). + // Add/sub (extended). + // Otherwise, r31 is the zero register. + if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { + return Reg31IsStackPointer; + } + return Reg31IsZeroRegister; + } + + ImmBranchType BranchType() const { + if (IsCondBranchImm()) { + return CondBranchType; + } else if (IsUncondBranchImm()) { + return UncondBranchType; + } else if (IsCompareBranch()) { + return CompareBranchType; + } else if (IsTestBranch()) { + return TestBranchType; + } else { + return UnknownBranchType; + } + } + + // Find the target of this instruction. 'this' may be a branch or a + // PC-relative addressing instruction. + const Instruction* ImmPCOffsetTarget() const; + + // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or + // a PC-relative addressing instruction. + void SetImmPCOffsetTarget(const Instruction* target); + // Patch a literal load instruction to load from 'source'. + void SetImmLLiteral(const Instruction* source); + + // The range of a load literal instruction, expressed as 'instr +- range'. + // The range is actually the 'positive' range; the branch instruction can + // target [instr - range - kInstructionSize, instr + range]. + static const int kLoadLiteralImmBitwidth = 19; + static const int kLoadLiteralRange = + (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize; + + // Calculate the address of a literal referred to by a load-literal + // instruction, and return it as the specified type. + // + // The literal itself is safely mutable only if the backing buffer is safely + // mutable. + template <typename T> + T LiteralAddress() const { + uint64_t base_raw = reinterpret_cast<uint64_t>(this); + int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2; + uint64_t address_raw = base_raw + offset; + + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + T address = (T)(address_raw); + + // Assert that the address can be represented by the specified type. + VIXL_ASSERT((uint64_t)(address) == address_raw); + + return address; + } + + uint32_t Literal32() const { + uint32_t literal; + memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal)); + return literal; + } + + uint64_t Literal64() const { + uint64_t literal; + memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal)); + return literal; + } + + float LiteralFP32() const { + return rawbits_to_float(Literal32()); + } + + double LiteralFP64() const { + return rawbits_to_double(Literal64()); + } + + const Instruction* NextInstruction() const { + return this + kInstructionSize; + } + + // Skip any constant pools with artificial guards at this point. + // Return either |this| or the first instruction after the pool. + const Instruction* skipPool() const; + + const Instruction* InstructionAtOffset(int64_t offset) const { + VIXL_ASSERT(IsWordAligned(this + offset)); + return this + offset; + } + + template<typename T> static Instruction* Cast(T src) { + return reinterpret_cast<Instruction*>(src); + } + + template<typename T> static const Instruction* CastConst(T src) { + return reinterpret_cast<const Instruction*>(src); + } + + private: + int ImmBranch() const; + + static float Imm8ToFP32(uint32_t imm8); + static double Imm8ToFP64(uint32_t imm8); + + void SetPCRelImmTarget(const Instruction* target); + void SetBranchImmTarget(const Instruction* target); +}; + + +// Functions for handling NEON vector format information. +enum VectorFormat { + kFormatUndefined = 0xffffffff, + kFormat8B = NEON_8B, + kFormat16B = NEON_16B, + kFormat4H = NEON_4H, + kFormat8H = NEON_8H, + kFormat2S = NEON_2S, + kFormat4S = NEON_4S, + kFormat1D = NEON_1D, + kFormat2D = NEON_2D, + + // Scalar formats. We add the scalar bit to distinguish between scalar and + // vector enumerations; the bit is always set in the encoding of scalar ops + // and always clear for vector ops. Although kFormatD and kFormat1D appear + // to be the same, their meaning is subtly different. The first is a scalar + // operation, the second a vector operation that only affects one lane. + kFormatB = NEON_B | NEONScalar, + kFormatH = NEON_H | NEONScalar, + kFormatS = NEON_S | NEONScalar, + kFormatD = NEON_D | NEONScalar +}; + +VectorFormat VectorFormatHalfWidth(const VectorFormat vform); +VectorFormat VectorFormatDoubleWidth(const VectorFormat vform); +VectorFormat VectorFormatDoubleLanes(const VectorFormat vform); +VectorFormat VectorFormatHalfLanes(const VectorFormat vform); +VectorFormat ScalarFormatFromLaneSize(int lanesize); +VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform); +VectorFormat VectorFormatFillQ(const VectorFormat vform); +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); +// TODO: Make the return types of these functions consistent. +unsigned LaneSizeInBitsFromFormat(VectorFormat vform); +int LaneSizeInBytesFromFormat(VectorFormat vform); +int LaneSizeInBytesLog2FromFormat(VectorFormat vform); +int LaneCountFromFormat(VectorFormat vform); +int MaxLaneCountFromFormat(VectorFormat vform); +bool IsVectorFormat(VectorFormat vform); +int64_t MaxIntFromFormat(VectorFormat vform); +int64_t MinIntFromFormat(VectorFormat vform); +uint64_t MaxUintFromFormat(VectorFormat vform); + + +enum NEONFormat { + NF_UNDEF = 0, + NF_8B = 1, + NF_16B = 2, + NF_4H = 3, + NF_8H = 4, + NF_2S = 5, + NF_4S = 6, + NF_1D = 7, + NF_2D = 8, + NF_B = 9, + NF_H = 10, + NF_S = 11, + NF_D = 12 +}; + +static const unsigned kNEONFormatMaxBits = 6; + +struct NEONFormatMap { + // The bit positions in the instruction to consider. + uint8_t bits[kNEONFormatMaxBits]; + + // Mapping from concatenated bits to format. + NEONFormat map[1 << kNEONFormatMaxBits]; +}; + +class NEONFormatDecoder { + public: + enum SubstitutionMode { + kPlaceholder, + kFormat + }; + + // Construct a format decoder with increasingly specific format maps for each + // subsitution. If no format map is specified, the default is the integer + // format map. + explicit NEONFormatDecoder(const Instruction* instr) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(IntegerFormatMap()); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format0, format1); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1, + const NEONFormatMap* format2) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format0, format1, format2); + } + + // Set the format mapping for all or individual substitutions. + void SetFormatMaps(const NEONFormatMap* format0, + const NEONFormatMap* format1 = NULL, + const NEONFormatMap* format2 = NULL) { + VIXL_ASSERT(format0 != NULL); + formats_[0] = format0; + formats_[1] = (format1 == NULL) ? formats_[0] : format1; + formats_[2] = (format2 == NULL) ? formats_[1] : format2; + } + void SetFormatMap(unsigned index, const NEONFormatMap* format) { + VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0]))); + VIXL_ASSERT(format != NULL); + formats_[index] = format; + } + + // Substitute %s in the input string with the placeholder string for each + // register, ie. "'B", "'H", etc. + const char* SubstitutePlaceholders(const char* string) { + return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder); + } + + // Substitute %s in the input string with a new string based on the + // substitution mode. + const char* Substitute(const char* string, + SubstitutionMode mode0 = kFormat, + SubstitutionMode mode1 = kFormat, + SubstitutionMode mode2 = kFormat) { + snprintf(form_buffer_, sizeof(form_buffer_), string, + GetSubstitute(0, mode0), + GetSubstitute(1, mode1), + GetSubstitute(2, mode2)); + return form_buffer_; + } + + // Append a "2" to a mnemonic string based of the state of the Q bit. + const char* Mnemonic(const char* mnemonic) { + if ((instrbits_ & NEON_Q) != 0) { + snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); + return mne_buffer_; + } + return mnemonic; + } + + VectorFormat GetVectorFormat(int format_index = 0) { + return GetVectorFormat(formats_[format_index]); + } + + VectorFormat GetVectorFormat(const NEONFormatMap* format_map) { + static const VectorFormat vform[] = { + kFormatUndefined, + kFormat8B, kFormat16B, kFormat4H, kFormat8H, + kFormat2S, kFormat4S, kFormat1D, kFormat2D, + kFormatB, kFormatH, kFormatS, kFormatD + }; + VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0]))); + return vform[GetNEONFormat(format_map)]; + } + + // Built in mappings for common cases. + + // The integer format map uses three bits (Q, size<1:0>) to encode the + // "standard" set of NEON integer vector formats. + static const NEONFormatMap* IntegerFormatMap() { + static const NEONFormatMap map = { + {23, 22, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D} + }; + return ↦ + } + + // The long integer format map uses two bits (size<1:0>) to encode the + // long set of NEON integer vector formats. These are used in narrow, wide + // and long operations. + static const NEONFormatMap* LongIntegerFormatMap() { + static const NEONFormatMap map = { + {23, 22}, {NF_8H, NF_4S, NF_2D} + }; + return ↦ + } + + // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector + // formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap* FPFormatMap() { + // The FP format map assumes two bits (Q, size<0>) are used to encode the + // NEON FP vector formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap map = { + {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D} + }; + return ↦ + } + + // The load/store format map uses three bits (Q, 11, 10) to encode the + // set of NEON vector formats. + static const NEONFormatMap* LoadStoreFormatMap() { + static const NEONFormatMap map = { + {11, 10, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D} + }; + return ↦ + } + + // The logical format map uses one bit (Q) to encode the NEON vector format: + // NF_8B, NF_16B. + static const NEONFormatMap* LogicalFormatMap() { + static const NEONFormatMap map = { + {30}, {NF_8B, NF_16B} + }; + return ↦ + } + + // The triangular format map uses between two and five bits to encode the NEON + // vector format: + // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H + // x1000->2S, x1001->4S, 10001->2D, all others undefined. + static const NEONFormatMap* TriangularFormatMap() { + static const NEONFormatMap map = { + {19, 18, 17, 16, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, + NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D, + NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B, + NF_4H, NF_8H, NF_8B, NF_16B} + }; + return ↦ + } + + // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar + // formats: NF_B, NF_H, NF_S, NF_D. + static const NEONFormatMap* ScalarFormatMap() { + static const NEONFormatMap map = { + {23, 22}, {NF_B, NF_H, NF_S, NF_D} + }; + return ↦ + } + + // The long scalar format map uses two bits (size<1:0>) to encode the longer + // NEON scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* LongScalarFormatMap() { + static const NEONFormatMap map = { + {23, 22}, {NF_H, NF_S, NF_D} + }; + return ↦ + } + + // The FP scalar format map assumes one bit (size<0>) is used to encode the + // NEON FP scalar formats: NF_S, NF_D. + static const NEONFormatMap* FPScalarFormatMap() { + static const NEONFormatMap map = { + {22}, {NF_S, NF_D} + }; + return ↦ + } + + // The triangular scalar format map uses between one and four bits to encode + // the NEON FP scalar formats: + // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined. + static const NEONFormatMap* TriangularScalarFormatMap() { + static const NEONFormatMap map = { + {19, 18, 17, 16}, + {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, + NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B} + }; + return ↦ + } + + private: + // Get a pointer to a string that represents the format or placeholder for + // the specified substitution index, based on the format map and instruction. + const char* GetSubstitute(int index, SubstitutionMode mode) { + if (mode == kFormat) { + return NEONFormatAsString(GetNEONFormat(formats_[index])); + } + VIXL_ASSERT(mode == kPlaceholder); + return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index])); + } + + // Get the NEONFormat enumerated value for bits obtained from the + // instruction based on the specified format mapping. + NEONFormat GetNEONFormat(const NEONFormatMap* format_map) { + return format_map->map[PickBits(format_map->bits)]; + } + + // Convert a NEONFormat into a string. + static const char* NEONFormatAsString(NEONFormat format) { + static const char* formats[] = { + "undefined", + "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d", + "b", "h", "s", "d" + }; + VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0]))); + return formats[format]; + } + + // Convert a NEONFormat into a register placeholder string. + static const char* NEONFormatAsPlaceholder(NEONFormat format) { + VIXL_ASSERT((format == NF_B) || (format == NF_H) || + (format == NF_S) || (format == NF_D) || + (format == NF_UNDEF)); + static const char* formats[] = { + "undefined", + "undefined", "undefined", "undefined", "undefined", + "undefined", "undefined", "undefined", "undefined", + "'B", "'H", "'S", "'D" + }; + return formats[format]; + } + + // Select bits from instrbits_ defined by the bits array, concatenate them, + // and return the value. + uint8_t PickBits(const uint8_t bits[]) { + uint8_t result = 0; + for (unsigned b = 0; b < kNEONFormatMaxBits; b++) { + if (bits[b] == 0) break; + result <<= 1; + result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1; + } + return result; + } + + Instr instrbits_; + const NEONFormatMap* formats_[3]; + char form_buffer_[64]; + char mne_buffer_[16]; +}; +} // namespace vixl + +#endif // VIXL_A64_INSTRUCTIONS_A64_H_ diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.cpp b/js/src/jit/arm64/vixl/Instrument-vixl.cpp new file mode 100644 index 000000000..7653e0856 --- /dev/null +++ b/js/src/jit/arm64/vixl/Instrument-vixl.cpp @@ -0,0 +1,844 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Instrument-vixl.h" + +namespace vixl { + +Counter::Counter(const char* name, CounterType type) + : count_(0), enabled_(false), type_(type) { + VIXL_ASSERT(name != NULL); + strncpy(name_, name, kCounterNameMaxLength); +} + + +void Counter::Enable() { + enabled_ = true; +} + + +void Counter::Disable() { + enabled_ = false; +} + + +bool Counter::IsEnabled() { + return enabled_; +} + + +void Counter::Increment() { + if (enabled_) { + count_++; + } +} + + +uint64_t Counter::count() { + uint64_t result = count_; + if (type_ == Gauge) { + // If the counter is a Gauge, reset the count after reading. + count_ = 0; + } + return result; +} + + +const char* Counter::name() { + return name_; +} + + +CounterType Counter::type() { + return type_; +} + + +struct CounterDescriptor { + const char* name; + CounterType type; +}; + + +static const CounterDescriptor kCounterList[] = { + {"Instruction", Cumulative}, + + {"Move Immediate", Gauge}, + {"Add/Sub DP", Gauge}, + {"Logical DP", Gauge}, + {"Other Int DP", Gauge}, + {"FP DP", Gauge}, + + {"Conditional Select", Gauge}, + {"Conditional Compare", Gauge}, + + {"Unconditional Branch", Gauge}, + {"Compare and Branch", Gauge}, + {"Test and Branch", Gauge}, + {"Conditional Branch", Gauge}, + + {"Load Integer", Gauge}, + {"Load FP", Gauge}, + {"Load Pair", Gauge}, + {"Load Literal", Gauge}, + + {"Store Integer", Gauge}, + {"Store FP", Gauge}, + {"Store Pair", Gauge}, + + {"PC Addressing", Gauge}, + {"Other", Gauge}, + {"NEON", Gauge}, + {"Crypto", Gauge} +}; + + +Instrument::Instrument(const char* datafile, uint64_t sample_period) + : output_stream_(stdout), sample_period_(sample_period) { + + // Set up the output stream. If datafile is non-NULL, use that file. If it + // can't be opened, or datafile is NULL, use stdout. + if (datafile != NULL) { + output_stream_ = fopen(datafile, "w"); + if (output_stream_ == NULL) { + printf("Can't open output file %s. Using stdout.\n", datafile); + output_stream_ = stdout; + } + } + + static const int num_counters = + sizeof(kCounterList) / sizeof(CounterDescriptor); + + // Dump an instrumentation description comment at the top of the file. + fprintf(output_stream_, "# counters=%d\n", num_counters); + fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_); + + // Construct Counter objects from counter description array. + for (int i = 0; i < num_counters; i++) { + if (Counter* counter = js_new<Counter>(kCounterList[i].name, kCounterList[i].type)) + counters_.append(counter); + } + + DumpCounterNames(); +} + + +Instrument::~Instrument() { + // Dump any remaining instruction data to the output file. + DumpCounters(); + + // Free all the counter objects. + for (auto counter : counters_) { + js_delete(counter); + } + + if (output_stream_ != stdout) { + fclose(output_stream_); + } +} + + +void Instrument::Update() { + // Increment the instruction counter, and dump all counters if a sample period + // has elapsed. + static Counter* counter = GetCounter("Instruction"); + VIXL_ASSERT(counter->type() == Cumulative); + counter->Increment(); + + if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) { + DumpCounters(); + } +} + + +void Instrument::DumpCounters() { + // Iterate through the counter objects, dumping their values to the output + // stream. + for (auto counter : counters_) { + fprintf(output_stream_, "%" PRIu64 ",", counter->count()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::DumpCounterNames() { + // Iterate through the counter objects, dumping the counter names to the + // output stream. + for (auto counter : counters_) { + fprintf(output_stream_, "%s,", counter->name()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::HandleInstrumentationEvent(unsigned event) { + switch (event) { + case InstrumentStateEnable: Enable(); break; + case InstrumentStateDisable: Disable(); break; + default: DumpEventMarker(event); + } +} + + +void Instrument::DumpEventMarker(unsigned marker) { + // Dumpan event marker to the output stream as a specially formatted comment + // line. + static Counter* counter = GetCounter("Instruction"); + + fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff, + (marker >> 8) & 0xff, counter->count()); +} + + +Counter* Instrument::GetCounter(const char* name) { + // Get a Counter object by name from the counter list. + for (auto counter : counters_) { + if (strcmp(counter->name(), name) == 0) { + return counter; + } + } + + // A Counter by that name does not exist: print an error message to stderr + // and the output file, and exit. + static const char* error_message = + "# Error: Unknown counter \"%s\". Exiting.\n"; + fprintf(stderr, error_message, name); + fprintf(output_stream_, error_message, name); + exit(1); +} + + +void Instrument::Enable() { + for (auto counter : counters_) { + counter->Enable(); + } +} + + +void Instrument::Disable() { + for (auto counter : counters_) { + counter->Disable(); + } +} + + +void Instrument::VisitPCRelAddressing(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("PC Addressing"); + counter->Increment(); +} + + +void Instrument::VisitAddSubImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitLogicalImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitMoveWideImmediate(const Instruction* instr) { + Update(); + static Counter* counter = GetCounter("Move Immediate"); + + if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) { + unsigned imm = instr->ImmMoveWide(); + HandleInstrumentationEvent(imm); + } else { + counter->Increment(); + } +} + + +void Instrument::VisitBitfield(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitCompareBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Compare and Branch"); + counter->Increment(); +} + + +void Instrument::VisitTestBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Test and Branch"); + counter->Increment(); +} + + +void Instrument::VisitConditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitSystem(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitException(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStorePair(const Instruction* instr) { + static Counter* load_pair_counter = GetCounter("Load Pair"); + static Counter* store_pair_counter = GetCounter("Store Pair"); + + if (instr->Mask(LoadStorePairLBit) != 0) { + load_pair_counter->Increment(); + } else { + store_pair_counter->Increment(); + } +} + + +void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairOffset(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStoreExclusive(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitLoadLiteral(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Load Literal"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStore(const Instruction* instr) { + static Counter* load_int_counter = GetCounter("Load Integer"); + static Counter* store_int_counter = GetCounter("Store Integer"); + static Counter* load_fp_counter = GetCounter("Load FP"); + static Counter* store_fp_counter = GetCounter("Store FP"); + + switch (instr->Mask(LoadStoreMask)) { + case STRB_w: + case STRH_w: + case STR_w: + VIXL_FALLTHROUGH(); + case STR_x: store_int_counter->Increment(); break; + case STR_s: + VIXL_FALLTHROUGH(); + case STR_d: store_fp_counter->Increment(); break; + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_x: + case LDRSH_x: + case LDRSW_x: + case LDRSB_w: + VIXL_FALLTHROUGH(); + case LDRSH_w: load_int_counter->Increment(); break; + case LDR_s: + VIXL_FALLTHROUGH(); + case LDR_d: load_fp_counter->Increment(); break; + } +} + + +void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePostIndex(const Instruction* instr) { + USE(instr); + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLogicalShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubExtended(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubWithCarry(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitFPCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitFPImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPIntegerConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPFixedPointConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitCrypto2RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCrypto3RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCryptoAES(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitNEON2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Different(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONAcrossLanes(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Diff(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarPairwise(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONTable(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONPerm(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitUnallocated(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitUnimplemented(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.h b/js/src/jit/arm64/vixl/Instrument-vixl.h new file mode 100644 index 000000000..68b04c60c --- /dev/null +++ b/js/src/jit/arm64/vixl/Instrument-vixl.h @@ -0,0 +1,110 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_INSTRUMENT_A64_H_ +#define VIXL_A64_INSTRUMENT_A64_H_ + +#include "mozilla/Vector.h" + +#include "jsalloc.h" + +#include "jit/arm64/vixl/Constants-vixl.h" +#include "jit/arm64/vixl/Decoder-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" + +namespace vixl { + +const int kCounterNameMaxLength = 256; +const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22; + + +enum InstrumentState { + InstrumentStateDisable = 0, + InstrumentStateEnable = 1 +}; + + +enum CounterType { + Gauge = 0, // Gauge counters reset themselves after reading. + Cumulative = 1 // Cumulative counters keep their value after reading. +}; + + +class Counter { + public: + explicit Counter(const char* name, CounterType type = Gauge); + + void Increment(); + void Enable(); + void Disable(); + bool IsEnabled(); + uint64_t count(); + const char* name(); + CounterType type(); + + private: + char name_[kCounterNameMaxLength]; + uint64_t count_; + bool enabled_; + CounterType type_; +}; + + +class Instrument: public DecoderVisitor { + public: + explicit Instrument(const char* datafile = NULL, + uint64_t sample_period = kDefaultInstrumentationSamplingPeriod); + ~Instrument(); + + void Enable(); + void Disable(); + + // Declare all Visitor functions. + #define DECLARE(A) void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) + #undef DECLARE + + private: + void Update(); + void DumpCounters(); + void DumpCounterNames(); + void DumpEventMarker(unsigned marker); + void HandleInstrumentationEvent(unsigned event); + Counter* GetCounter(const char* name); + + void InstrumentLoadStore(const Instruction* instr); + void InstrumentLoadStorePair(const Instruction* instr); + + mozilla::Vector<Counter*, 8, js::SystemAllocPolicy> counters_; + + FILE *output_stream_; + uint64_t sample_period_; +}; + +} // namespace vixl + +#endif // VIXL_A64_INSTRUMENT_A64_H_ diff --git a/js/src/jit/arm64/vixl/Logic-vixl.cpp b/js/src/jit/arm64/vixl/Logic-vixl.cpp new file mode 100644 index 000000000..539e145ec --- /dev/null +++ b/js/src/jit/arm64/vixl/Logic-vixl.cpp @@ -0,0 +1,4878 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef JS_SIMULATOR_ARM64 + +#include <cmath> + +#include "jit/arm64/vixl/Simulator-vixl.h" + +namespace vixl { + +template<> double Simulator::FPDefaultNaN<double>() { + return kFP64DefaultNaN; +} + + +template<> float Simulator::FPDefaultNaN<float>() { + return kFP32DefaultNaN; +} + +// See FPRound for a description of this function. +static inline double FPRoundToDouble(int64_t sign, int64_t exponent, + uint64_t mantissa, FPRounding round_mode) { + int64_t bits = + FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign, + exponent, + mantissa, + round_mode); + return rawbits_to_double(bits); +} + + +// See FPRound for a description of this function. +static inline float FPRoundToFloat(int64_t sign, int64_t exponent, + uint64_t mantissa, FPRounding round_mode) { + int32_t bits = + FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign, + exponent, + mantissa, + round_mode); + return rawbits_to_float(bits); +} + + +// See FPRound for a description of this function. +static inline float16 FPRoundToFloat16(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + return FPRound<float16, kFloat16ExponentBits, kFloat16MantissaBits>( + sign, exponent, mantissa, round_mode); +} + + +double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToDouble(src, fbits, round); + } else { + // This works for all negative values, including INT64_MIN. + return -UFixedToDouble(-src, fbits, round); + } +} + + +double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int64_t exponent = highest_significant_bit - fbits; + + return FPRoundToDouble(0, exponent, src, round); +} + + +float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat(src, fbits, round); + } else { + // This works for all negative values, including INT64_MIN. + return -UFixedToFloat(-src, fbits, round); + } +} + + +float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int32_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat(0, exponent, src, round); +} + + +double Simulator::FPToDouble(float value) { + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP64DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + uint32_t raw = float_to_rawbits(value); + + uint64_t sign = raw >> 31; + uint64_t exponent = (1 << 11) - 1; + uint64_t payload = unsigned_bitextract_64(21, 0, raw); + payload <<= (52 - 23); // The unused low-order bits should be 0. + payload |= (UINT64_C(1) << 51); // Force a quiet NaN. + + return rawbits_to_double((sign << 63) | (exponent << 52) | payload); + } + + case FP_ZERO: + case FP_NORMAL: + case FP_SUBNORMAL: + case FP_INFINITE: { + // All other inputs are preserved in a standard cast, because every value + // representable using an IEEE-754 float is also representable using an + // IEEE-754 double. + return static_cast<double>(value); + } + } + + VIXL_UNREACHABLE(); + return static_cast<double>(value); +} + + +float Simulator::FPToFloat(float16 value) { + uint32_t sign = value >> 15; + uint32_t exponent = unsigned_bitextract_32( + kFloat16MantissaBits + kFloat16ExponentBits - 1, kFloat16MantissaBits, + value); + uint32_t mantissa = unsigned_bitextract_32( + kFloat16MantissaBits - 1, 0, value); + + switch (float16classify(value)) { + case FP_ZERO: + return (sign == 0) ? 0.0f : -0.0f; + + case FP_INFINITE: + return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity; + + case FP_SUBNORMAL: { + // Calculate shift required to put mantissa into the most-significant bits + // of the destination mantissa. + int shift = CountLeadingZeros(mantissa << (32 - 10)); + + // Shift mantissa and discard implicit '1'. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1; + mantissa &= (1 << kFloatMantissaBits) - 1; + + // Adjust the exponent for the shift applied, and rebias. + exponent = exponent - shift + (-15 + 127); + break; + } + + case FP_NAN: + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + exponent = (1 << kFloatExponentBits) - 1; + + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + mantissa |= 1 << 22; // Force a quiet NaN. + break; + + case FP_NORMAL: + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + + // Change exponent bias. + exponent += (-15 + 127); + break; + + default: VIXL_UNREACHABLE(); + } + return rawbits_to_float((sign << 31) | + (exponent << kFloatMantissaBits) | + mantissa); +} + + +float16 Simulator::FPToFloat16(float value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint32_t raw = float_to_rawbits(value); + int32_t sign = raw >> 31; + int32_t exponent = unsigned_bitextract_32(30, 23, raw) - 127; + uint32_t mantissa = unsigned_bitextract_32(22, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + float16 result = (sign == 0) ? kFP16PositiveInfinity + : kFP16NegativeInfinity; + result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return result; + } + + case FP_ZERO: + return (sign == 0) ? 0 : 0x8000; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert float-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (1 << 23); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return 0; +} + + +float16 Simulator::FPToFloat16(double value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint64_t raw = double_to_rawbits(value); + int32_t sign = raw >> 63; + int64_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023; + uint64_t mantissa = unsigned_bitextract_64(51, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + float16 result = (sign == 0) ? kFP16PositiveInfinity + : kFP16NegativeInfinity; + result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return result; + } + + case FP_ZERO: + return (sign == 0) ? 0 : 0x8000; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (UINT64_C(1) << 52); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return 0; +} + + +float Simulator::FPToFloat(double value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + USE(round_mode); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint64_t raw = double_to_rawbits(value); + + uint32_t sign = raw >> 63; + uint32_t exponent = (1 << 8) - 1; + uint32_t payload = + static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw)); + payload |= (1 << 22); // Force a quiet NaN. + + return rawbits_to_float((sign << 31) | (exponent << 23) | payload); + } + + case FP_ZERO: + case FP_INFINITE: { + // In a C++ cast, any value representable in the target type will be + // unchanged. This is always the case for +/-0.0 and infinities. + return static_cast<float>(value); + } + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-float as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + uint64_t raw = double_to_rawbits(value); + // Extract the IEEE-754 double components. + uint32_t sign = raw >> 63; + // Extract the exponent and remove the IEEE-754 encoding bias. + int32_t exponent = + static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023; + // Extract the mantissa and add the implicit '1' bit. + uint64_t mantissa = unsigned_bitextract_64(51, 0, raw); + if (std::fpclassify(value) == FP_NORMAL) { + mantissa |= (UINT64_C(1) << 52); + } + return FPRoundToFloat(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return value; +} + + +void Simulator::ld1(VectorFormat vform, + LogicVRegister dst, + uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::ld1(VectorFormat vform, + LogicVRegister dst, + int index, + uint64_t addr) { + dst.ReadUintFromMem(vform, index, addr); +} + + +void Simulator::ld1r(VectorFormat vform, + LogicVRegister dst, + uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + addr1 += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); +} + + +void Simulator::ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + addr1 += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); +} + + +void Simulator::ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + addr1 += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); + dst4.ReadUintFromMem(vform, index, addr4); +} + + +void Simulator::ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + } +} + + +void Simulator::st1(VectorFormat vform, + LogicVRegister src, + uint64_t addr) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + src.WriteUintToMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::st1(VectorFormat vform, + LogicVRegister src, + int index, + uint64_t addr) { + src.WriteUintToMem(vform, index, addr); +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + addr += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + addr += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + dst4.WriteUintToMem(vform, i, addr4); + addr += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); + dst4.WriteUintToMem(vform, index, addr + 3 * esize); +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = src1.Int(vform, i); + int64_t sb = src2.Int(vform, i); + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + bool result = false; + switch (cond) { + case eq: result = (ua == ub); break; + case ge: result = (sa >= sb); break; + case gt: result = (sa > sb) ; break; + case hi: result = (ua > ub) ; break; + case hs: result = (ua >= ub); break; + case lt: result = (sa < sb) ; break; + case le: result = (sa <= sb); break; + default: VIXL_UNREACHABLE(); break; + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond) { + SimVRegister temp; + LogicVRegister imm_reg = dup_immediate(vform, temp, imm); + return cmp(vform, dst, src1, imm_reg, cond); +} + + +LogicVRegister Simulator::cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + // TODO(all): consider assigning the result of LaneCountFromFormat to a local. + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua + ub; + if (ur < ua) { + dst.SetUnsignedSat(i, true); + } + + // Test for signed saturation. + int64_t sa = src1.IntLeftJustified(vform, i); + int64_t sb = src2.IntLeftJustified(vform, i); + int64_t sr = sa + sb; + // If the signs of the operands are the same, but different from the result, + // there was an overflow. + if (((sa >= 0) == (sb >= 0)) && ((sa >= 0) != (sr >= 0))) { + dst.SetSignedSat(i, sa >= 0); + } + + dst.SetInt(vform, i, src1.Int(vform, i) + src2.Int(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uzp1(vform, temp1, src1, src2); + uzp2(vform, temp2, src1, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + sub(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mul(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mla(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mls(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) { + uint16_t result = 0; + uint16_t extended_op2 = op2; + for (int i = 0; i < 8; ++i) { + if ((op1 >> i) & 1) { + result = result ^ (extended_op2 << i); + } + } + return result; +} + + +LogicVRegister Simulator::pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, + PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidth(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, i), + src2.Uint(vform_src, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform); + dst.ClearForWrite(vform); + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, lane_count + i), + src2.Uint(vform_src, lane_count + i))); + } + return dst; +} + + +LogicVRegister Simulator::sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + if (src2.Uint(vform, i) > src1.Uint(vform, i)) { + dst.SetUnsignedSat(i, false); + } + + // Test for signed saturation. + int64_t sa = src1.IntLeftJustified(vform, i); + int64_t sb = src2.IntLeftJustified(vform, i); + int64_t sr = sa - sb; + // If the signs of the operands are different, and the sign of the first + // operand doesn't match the result, there was an overflow. + if (((sa >= 0) != (sb >= 0)) && ((sa >= 0) != (sr >= 0))) { + dst.SetSignedSat(i, sr < 0); + } + + dst.SetInt(vform, i, src1.Int(vform, i) - src2.Int(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) & ~imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = ~src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = src2.Uint(vform, i); + uint64_t operand2 = dst.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src1_val = src1.Int(vform, i); + int64_t src2_val = src2.Int(vform, i); + int64_t dst_val; + if (max == true) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetInt(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::sminmaxp(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + bool max) { + for (int i = 0; i < LaneCountFromFormat(vform); i += 2) { + int64_t src1_val = src.Int(vform, i); + int64_t src2_val = src.Int(vform, i + 1); + int64_t dst_val; + if (max == true) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetInt(vform, dst_index + (i >> 1), dst_val); + } + return dst; +} + + +LogicVRegister Simulator::smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + sminmaxp(vform, dst, 0, src1, true); + sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true); + return dst; +} + + +LogicVRegister Simulator::sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + sminmaxp(vform, dst, 0, src1, false); + sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false); + return dst; +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(vform == kFormatD); + + int64_t dst_val = src.Int(kFormat2D, 0) + src.Int(kFormat2D, 1); + dst.ClearForWrite(vform); + dst.SetInt(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst + = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); + + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst + = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst + = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + uint64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Uint(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetUint(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + dst.ClearForWrite(vform); + int64_t dst_val = max ? INT64_MIN : INT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetInt(vform, i, 0); + int64_t src_val = src.Int(vform, i); + if (max == true) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.SetInt(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src1_val = src1.Uint(vform, i); + uint64_t src2_val = src2.Uint(vform, i); + uint64_t dst_val; + if (max == true) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetUint(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::uminmaxp(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + bool max) { + for (int i = 0; i < LaneCountFromFormat(vform); i += 2) { + uint64_t src1_val = src.Uint(vform, i); + uint64_t src2_val = src.Uint(vform, i + 1); + uint64_t dst_val; + if (max == true) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetUint(vform, dst_index + (i >> 1), dst_val); + } + return dst; +} + + +LogicVRegister Simulator::umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + uminmaxp(vform, dst, 0, src1, true); + uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true); + return dst; +} + + +LogicVRegister Simulator::uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + uminmaxp(vform, dst, 0, src1, false); + uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false); + return dst; +} + + +LogicVRegister Simulator::uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + dst.ClearForWrite(vform); + uint64_t dst_val = max ? 0 : UINT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, 0); + uint64_t src_val = src.Uint(vform, i); + if (max == true) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.SetUint(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl2(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll(vform, dst, src, shift); +} + + +LogicVRegister Simulator::shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll2(vform, dst, src, shift); +} + + +LogicVRegister Simulator::ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl2(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted = src_lane << shift; + uint64_t mask = MaxUintFromFormat(vform) << shift; + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).SignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + VIXL_ASSERT((shift > 0) && + (shift <= static_cast<int>(LaneSizeInBitsFromFormat(vform)))); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted; + uint64_t mask; + if (shift == 64) { + shifted = 0; + mask = 0; + } else { + shifted = src_lane >> shift; + mask = MaxUintFromFormat(vform) >> shift; + } + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return sshl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t value = src.Uint(vform, i); + result[i] = 0; + for (int j = 0; j < laneSizeInBits; j++) { + result[i] += (value & 1); + value >>= 1; + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + int64_t lj_src_val = src1.IntLeftJustified(vform, i); + + // Set signed saturation state. + if ((shift_val > CountLeadingSignBits(lj_src_val)) && + (lj_src_val != 0)) { + dst.SetSignedSat(i, lj_src_val >= 0); + } + + // Set unsigned saturation state. + if (lj_src_val < 0) { + dst.SetUnsignedSat(i, false); + } else if ((shift_val > CountLeadingZeros(lj_src_val)) && + (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + int64_t src_val = src1.Int(vform, i); + if (shift_val > 63) { + dst.SetInt(vform, i, 0); + } else if (shift_val < -63) { + dst.SetRounding(i, src_val < 0); + dst.SetInt(vform, i, (src_val < 0) ? -1 : 0); + } else { + if (shift_val < 0) { + // Set rounding state. Rounding only needed on right shifts. + if (((src_val >> (-shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + src_val >>= -shift_val; + } else { + src_val <<= shift_val; + } + dst.SetInt(vform, i, src_val); + } + } + return dst; +} + + +LogicVRegister Simulator::ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + uint64_t lj_src_val = src1.UintLeftJustified(vform, i); + + // Set saturation state. + if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + uint64_t src_val = src1.Uint(vform, i); + if ((shift_val > 63) || (shift_val < -64)) { + dst.SetUint(vform, i, 0); + } else { + if (shift_val < 0) { + // Set rounding state. Rounding only needed on right shifts. + if (((src_val >> (-shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + if (shift_val == -64) { + src_val = 0; + } else { + src_val >>= -shift_val; + } + } else { + src_val <<= shift_val; + } + dst.SetUint(vform, i, src_val); + } + } + return dst; +} + + +LogicVRegister Simulator::neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + dst.SetInt(vform, i, -sa); + } + return dst; +} + + +LogicVRegister Simulator::suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = dst.IntLeftJustified(vform, i); + uint64_t ub = src.UintLeftJustified(vform, i); + int64_t sr = sa + ub; + + if (sr < sa) { // Test for signed positive saturation. + dst.SetInt(vform, i, MaxIntFromFormat(vform)); + } else { + dst.SetInt(vform, i, dst.Int(vform, i) + src.Int(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = dst.UintLeftJustified(vform, i); + int64_t sb = src.IntLeftJustified(vform, i); + uint64_t ur = ua + sb; + + if ((sb > 0) && (ur <= ua)) { + dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation. + } else if ((sb < 0) && (ur >= ua)) { + dst.SetUint(vform, i, 0); // Negative saturation. + } else { + dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + if (sa < 0) { + dst.SetInt(vform, i, -sa); + } else { + dst.SetInt(vform, i, sa); + } + } + return dst; +} + + +LogicVRegister Simulator::extractnarrow(VectorFormat dstform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned) { + bool upperhalf = false; + VectorFormat srcform = kFormatUndefined; + int64_t ssrc[8]; + uint64_t usrc[8]; + + switch (dstform) { + case kFormat8B : upperhalf = false; srcform = kFormat8H; break; + case kFormat16B: upperhalf = true; srcform = kFormat8H; break; + case kFormat4H : upperhalf = false; srcform = kFormat4S; break; + case kFormat8H : upperhalf = true; srcform = kFormat4S; break; + case kFormat2S : upperhalf = false; srcform = kFormat2D; break; + case kFormat4S : upperhalf = true; srcform = kFormat2D; break; + case kFormatB : upperhalf = false; srcform = kFormatH; break; + case kFormatH : upperhalf = false; srcform = kFormatS; break; + case kFormatS : upperhalf = false; srcform = kFormatD; break; + default:VIXL_UNIMPLEMENTED(); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + ssrc[i] = src.Int(srcform, i); + usrc[i] = src.Uint(srcform, i); + } + + int offset; + if (upperhalf) { + offset = LaneCountFromFormat(dstform) / 2; + } else { + offset = 0; + dst.ClearForWrite(dstform); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + // Test for signed saturation + if (ssrc[i] > MaxIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, true); + } else if (ssrc[i] < MinIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, false); + } + + // Test for unsigned saturation + if (srcIsSigned) { + if (ssrc[i] > static_cast<int64_t>(MaxUintFromFormat(dstform))) { + dst.SetUnsignedSat(offset + i, true); + } else if (ssrc[i] < 0) { + dst.SetUnsignedSat(offset + i, false); + } + } else { + if (usrc[i] > MaxUintFromFormat(dstform)) { + dst.SetUnsignedSat(offset + i, true); + } + } + + int64_t result; + if (srcIsSigned) { + result = ssrc[i] & MaxUintFromFormat(dstform); + } else { + result = usrc[i] & MaxUintFromFormat(dstform); + } + + if (dstIsSigned) { + dst.SetInt(dstform, offset + i, result); + } else { + dst.SetUint(dstform, offset + i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true); +} + + +LogicVRegister Simulator::sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (issigned) { + int64_t sr = src1.Int(vform, i) - src2.Int(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetInt(vform, i, sr); + } else { + int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetUint(vform, i, sr); + } + } + return dst; +} + + +LogicVRegister Simulator::saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, true); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, false); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, ~src.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + uint64_t reversed_value; + uint64_t value; + for (int i = 0; i < laneCount; i++) { + value = src.Uint(vform, i); + reversed_value = 0; + for (int j = 0; j < laneSizeInBits; j++) { + reversed_value = (reversed_value << 1) | (value & 1); + value >>= 1; + } + result[i] = reversed_value; + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSize = LaneSizeInBytesFromFormat(vform); + int lanesPerLoop = revSize / laneSize; + for (int i = 0; i < laneCount; i += lanesPerLoop) { + for (int j = 0; j < lanesPerLoop; j++) { + result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j); + } + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 2); +} + + +LogicVRegister Simulator::rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 4); +} + + +LogicVRegister Simulator::rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 8); +} + + +LogicVRegister Simulator::addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate) { + VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform); + + int64_t sr[16]; + uint64_t ur[16]; + + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + if (is_signed) { + sr[i] = src.Int(vformsrc, 2 * i) + src.Int(vformsrc, 2 * i + 1); + } else { + ur[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1); + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + if (do_accumulate) { + if (is_signed) { + dst.SetInt(vform, i, dst.Int(vform, i) + sr[i]); + } else { + dst.SetUint(vform, i, dst.Uint(vform, i) + ur[i]); + } + } else { + if (is_signed) { + dst.SetInt(vform, i, sr[i]); + } else { + dst.SetUint(vform, i, ur[i]); + } + } + } + + return dst; +} + + +LogicVRegister Simulator::saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, false); +} + + +LogicVRegister Simulator::uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, false); +} + + +LogicVRegister Simulator::sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, true); +} + + +LogicVRegister Simulator::uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, true); +} + + +LogicVRegister Simulator::ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + uint8_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount - index; ++i) { + result[i] = src1.Uint(vform, i + index); + } + for (int i = 0; i < index; ++i) { + result[laneCount - index + i] = src2.Uint(vform, i); + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = src.Uint(vform, src_index); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = imm & MaxUintFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index) { + dst.SetUint(vform, dst_index, src.Uint(vform, src_index)); + return dst; +} + + +LogicVRegister Simulator::ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm) { + uint64_t value = imm & MaxUintFromFormat(vform); + dst.SetUint(vform, dst_index, value); + return dst; +} + + +LogicVRegister Simulator::movi(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, imm); + } + return dst; +} + + +LogicVRegister Simulator::mvni(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, ~imm); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) | imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src.Uint(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetInt(vform, i, src.Int(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetInt(vform, i, src.Int(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vform_src = VectorFormatDoubleWidth(vform); + VectorFormat vform_dst = vform; + LogicVRegister shifted_src = ushr(vform_src, temp, src, shift); + return extractnarrow(vform_dst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + movi(vform, dst, 0); + return tbx(vform, dst, tab, ind); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + movi(vform, dst, 0); + return tbx(vform, dst, tab, tab2, ind); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + movi(vform, dst, 0); + return tbx(vform, dst, tab, tab2, tab3, ind); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + movi(vform, dst, 0); + return tbx(vform, dst, tab, tab2, tab3, tab4, ind); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + switch (j >> 4) { + case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break; + } + } + return dst; +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + switch (j >> 4) { + case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break; + case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break; + } + } + return dst; +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + switch (j >> 4) { + case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break; + case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break; + case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break; + } + } + return dst; +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + switch (j >> 4) { + case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break; + case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break; + case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break; + case 3: dst.SetUint(vform, i, tab4.Uint(kFormat16B, j & 15)); break; + } + } + return dst; +} + + +LogicVRegister Simulator::uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::uabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::sabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::sabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull2(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t product; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + product = src1.Int(vform, i) * src2.Int(vform, i); + product += round_const; + product = product >> (esize - 1); + + if (product > MaxIntFromFormat(vform)) { + product = MaxIntFromFormat(vform); + } else if (product < MinIntFromFormat(vform)) { + product = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, product); + } + return dst; +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sqrdmulh(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::addhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::addhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, 2 * i); + result[(2 * i) + 1] = src2.Uint(vform, 2 * i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, (2 * i) + 1); + result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, i); + result[(2 * i) + 1] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, pairs + i); + result[(2 * i) + 1] = src2.Uint(vform, pairs + i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[2 * i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[ (2 * i) + 1]); + } + return dst; +} + + +template <typename T> +T Simulator::FPAdd(T op1, T op2) { + T result = FPProcessNaNs(op1, op2); + if (std::isnan(result)) return result; + + if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { + // inf + -inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 + op2; + } +} + + +template <typename T> +T Simulator::FPSub(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2)); + + if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { + // inf - inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 - op2; + } +} + + +template <typename T> +T Simulator::FPMul(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2)); + + if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns the default NaN. + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 * op2; + } +} + + +template<typename T> +T Simulator::FPMulx(T op1, T op2) { + if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns +/-2.0. + T two = 2.0; + return copysign(1.0, op1) * copysign(1.0, op2) * two; + } + return FPMul(op1, op2); +} + + +template<typename T> +T Simulator::FPMulAdd(T a, T op1, T op2) { + T result = FPProcessNaNs3(a, op1, op2); + + T sign_a = copysign(1.0, a); + T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); + bool isinf_prod = std::isinf(op1) || std::isinf(op2); + bool operation_generates_nan = + (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0 + (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf + (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf + + if (std::isnan(result)) { + // Generated NaNs override quiet NaNs propagated from a. + if (operation_generates_nan && IsQuietNaN(a)) { + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + return result; + } + } + + // If the operation would produce a NaN, return the default NaN. + if (operation_generates_nan) { + FPProcessException(); + return FPDefaultNaN<T>(); + } + + // Work around broken fma implementations for exact zero results: The sign of + // exact 0.0 results is positive unless both a and op1 * op2 are negative. + if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { + return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; + } + + result = FusedMultiplyAdd(op1, op2, a); + VIXL_ASSERT(!std::isnan(result)); + + // Work around broken fma implementations for rounded zero results: If a is + // 0.0, the sign of the result is the sign of op1 * op2 before rounding. + if ((a == 0.0) && (result == 0.0)) { + return copysign(0.0, sign_prod); + } + + return result; +} + + +template <typename T> +T Simulator::FPDiv(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2)); + + if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { + // inf / inf and 0.0 / 0.0 return the default NaN. + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + if (op2 == 0.0) FPProcessException(); + + // Other cases should be handled by standard arithmetic. + return op1 / op2; + } +} + + +template <typename T> +T Simulator::FPSqrt(T op) { + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (op < 0.0) { + FPProcessException(); + return FPDefaultNaN<T>(); + } else { + return sqrt(op); + } +} + + +template <typename T> +T Simulator::FPMax(T a, T b) { + T result = FPProcessNaNs(a, b); + if (std::isnan(result)) return result; + + if ((a == 0.0) && (b == 0.0) && + (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return +0.0. + return 0.0; + } else { + return (a > b) ? a : b; + } +} + + +template <typename T> +T Simulator::FPMaxNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64NegativeInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64NegativeInfinity; + } + + T result = FPProcessNaNs(a, b); + return std::isnan(result) ? result : FPMax(a, b); +} + + +template <typename T> +T Simulator::FPMin(T a, T b) { + T result = FPProcessNaNs(a, b); + if (std::isnan(result)) return result; + + if ((a == 0.0) && (b == 0.0) && + (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return -0.0. + return -0.0; + } else { + return (a < b) ? a : b; + } +} + + +template <typename T> +T Simulator::FPMinNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64PositiveInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64PositiveInfinity; + } + + T result = FPProcessNaNs(a, b); + return std::isnan(result) ? result : FPMin(a, b); +} + + +template <typename T> +T Simulator::FPRecipStepFused(T op1, T op2) { + const T two = 2.0; + if ((std::isinf(op1) && (op2 == 0.0)) + || ((op1 == 0.0) && (std::isinf(op2)))) { + return two; + } else if (std::isinf(op1) || std::isinf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + return FusedMultiplyAdd(op1, op2, two); + } +} + + +template <typename T> +T Simulator::FPRSqrtStepFused(T op1, T op2) { + const T one_point_five = 1.5; + const T two = 2.0; + + if ((std::isinf(op1) && (op2 == 0.0)) + || ((op1 == 0.0) && (std::isinf(op2)))) { + return one_point_five; + } else if (std::isinf(op1) || std::isinf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + // The multiply-add-halve operation must be fully fused, so avoid interim + // rounding by checking which operand can be losslessly divided by two + // before doing the multiply-add. + if (std::isnormal(op1 / two)) { + return FusedMultiplyAdd(op1 / two, op2, one_point_five); + } else if (std::isnormal(op2 / two)) { + return FusedMultiplyAdd(op1, op2 / two, one_point_five); + } else { + // Neither operand is normal after halving: the result is dominated by + // the addition term, so just return that. + return one_point_five; + } + } +} + + +double Simulator::FPRoundInt(double value, FPRounding round_mode) { + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + return value; + } else if (std::isnan(value)) { + return FPProcessNaN(value); + } + + double int_result = std::floor(value); + double error = value - int_result; + switch (round_mode) { + case FPTieAway: { + // Take care of correctly handling the range ]-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 < value) && (value < 0.0)) { + int_result = -0.0; + + } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is positive, round up. + int_result++; + } + break; + } + case FPTieEven: { + // Take care of correctly handling the range [-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 <= value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is odd, round up. + } else if ((error > 0.5) || + ((error == 0.5) && (std::fmod(int_result, 2) != 0))) { + int_result++; + } + break; + } + case FPZero: { + // If value>0 then we take floor(value) + // otherwise, ceil(value). + if (value < 0) { + int_result = ceil(value); + } + break; + } + case FPNegativeInfinity: { + // We always use floor(value). + break; + } + case FPPositiveInfinity: { + // Take care of correctly handling the range ]-1.0, -0.0], which must + // yield -0.0. + if ((-1.0 < value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is non-zero, round up. + } else if (error > 0.0) { + int_result++; + } + break; + } + default: VIXL_UNIMPLEMENTED(); + } + return int_result; +} + + +int32_t Simulator::FPToInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxInt) { + return kWMaxInt; + } else if (value < kWMinInt) { + return kWMinInt; + } + return std::isnan(value) ? 0 : static_cast<int32_t>(value); +} + + +int64_t Simulator::FPToInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxInt) { + return kXMaxInt; + } else if (value < kXMinInt) { + return kXMinInt; + } + return std::isnan(value) ? 0 : static_cast<int64_t>(value); +} + + +uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxUInt) { + return kWMaxUInt; + } else if (value < 0.0) { + return 0; + } + return std::isnan(value) ? 0 : static_cast<uint32_t>(value); +} + + +uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxUInt) { + return kXMaxUInt; + } else if (value < 0.0) { + return 0; + } + return std::isnan(value) ? 0 : static_cast<uint64_t>(value); +} + + +#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ +template <typename T> \ +LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + dst.ClearForWrite(vform); \ + for (int i = 0; i < LaneCountFromFormat(vform); i++) { \ + T op1 = src1.Float<T>(i); \ + T op2 = src2.Float<T>(i); \ + T result; \ + if (PROCNAN) { \ + result = FPProcessNaNs(op1, op2); \ + if (!std::isnan(result)) { \ + result = OP(op1, op2); \ + } \ + } else { \ + result = OP(op1, op2); \ + } \ + dst.SetFloat(i, result); \ + } \ + return dst; \ +} \ + \ +LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \ + FN<float>(vform, dst, src1, src2); \ + } else { \ + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \ + FN<double>(vform, dst, src1, src2); \ + } \ + return dst; \ +} +NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP) +#undef DEFINE_NEON_FP_VECTOR_OP + + +LogicVRegister Simulator::fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = fmul(vform, temp, src1, src2); + return fneg(vform, dst, product); +} + + +template <typename T> +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float<T>(i); + T op2 = src2.Float<T>(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecps<float>(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecps<double>(vform, dst, src1, src2); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float<T>(i); + T op2 = src2.Float<T>(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frsqrts<float>(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frsqrts<double>(vform, dst, src1, src2); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + bool result = false; + T op1 = src1.Float<T>(i); + T op2 = src2.Float<T>(i); + T nan_result = FPProcessNaNs(op1, op2); + if (!std::isnan(nan_result)) { + switch (cond) { + case eq: result = (op1 == op2); break; + case ge: result = (op1 >= op2); break; + case gt: result = (op1 > op2) ; break; + case le: result = (op1 <= op2); break; + case lt: result = (op1 < op2) ; break; + default: VIXL_UNREACHABLE(); break; + } + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmp<float>(vform, dst, src1, src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fcmp<double>(vform, dst, src1, src2, cond); + } + return dst; +} + + +LogicVRegister Simulator::fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond) { + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister zero_reg = dup_immediate(vform, temp, float_to_rawbits(0.0)); + fcmp<float>(vform, dst, src, zero_reg, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister zero_reg = dup_immediate(vform, temp, + double_to_rawbits(0.0)); + fcmp<double>(vform, dst, src, zero_reg, cond); + } + return dst; +} + + +LogicVRegister Simulator::fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + SimVRegister temp1, temp2; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister abs_src1 = fabs_<float>(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_<float>(vform, temp2, src2); + fcmp<float>(vform, dst, abs_src1, abs_src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister abs_src1 = fabs_<double>(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_<double>(vform, temp2, src2); + fcmp<double>(vform, dst, abs_src1, abs_src2, cond); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = src1.Float<T>(i); + T op2 = src2.Float<T>(i); + T acc = dst.Float<T>(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmla<float>(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmla<double>(vform, dst, src1, src2); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float<T>(i); + T op2 = src2.Float<T>(i); + T acc = dst.Float<T>(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmls<float>(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmls<double>(vform, dst, src1, src2); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float<T>(i); + op = -op; + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fneg<float>(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fneg<double>(vform, dst, src); + } + return dst; +} + + +template <typename T> +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float<T>(i); + if (copysign(1.0, op) < 0.0) { + op = -op; + } + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fabs_<float>(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fabs_<double>(vform, dst, src); + } + return dst; +} + + +LogicVRegister Simulator::fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + fsub(vform, temp, src1, src2); + fabs_(vform, dst, temp); + return dst; +} + + +LogicVRegister Simulator::fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float result = FPSqrt(src.Float<float>(i)); + dst.SetFloat(i, result); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double result = FPSqrt(src.Float<double>(i)); + dst.SetFloat(i, result); + } + } + return dst; +} + + +#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \ +LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + SimVRegister temp1, temp2; \ + uzp1(vform, temp1, src1, src2); \ + uzp2(vform, temp2, src1, src2); \ + FN(vform, dst, temp1, temp2); \ + return dst; \ +} \ + \ +LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src) { \ + if (vform == kFormatS) { \ + float result = OP(src.Float<float>(0), src.Float<float>(1)); \ + dst.SetFloat(0, result); \ + } else { \ + VIXL_ASSERT(vform == kFormatD); \ + double result = OP(src.Float<double>(0), src.Float<double>(1)); \ + dst.SetFloat(0, result); \ + } \ + dst.ClearForWrite(vform); \ + return dst; \ +} +NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP) +#undef DEFINE_NEON_FP_PAIR_OP + + +LogicVRegister Simulator::fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPMinMaxOp Op) { + VIXL_ASSERT(vform == kFormat4S); + USE(vform); + float result1 = (this->*Op)(src.Float<float>(0), src.Float<float>(1)); + float result2 = (this->*Op)(src.Float<float>(2), src.Float<float>(3)); + float result = (this->*Op)(result1, result2); + dst.ClearForWrite(kFormatS); + dst.SetFloat<float>(0, result); + return dst; +} + + +LogicVRegister Simulator::fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return fminmaxv(vform, dst, src, &Simulator::FPMax); +} + + +LogicVRegister Simulator::fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return fminmaxv(vform, dst, src, &Simulator::FPMin); +} + + +LogicVRegister Simulator::fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return fminmaxv(vform, dst, src, &Simulator::FPMaxNM); +} + + +LogicVRegister Simulator::fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return fminmaxv(vform, dst, src, &Simulator::FPMinNM); +} + + +LogicVRegister Simulator::fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmul<float>(vform, dst, src1, index_reg); + + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmul<double>(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmla<float>(vform, dst, src1, index_reg); + + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmla<double>(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmls<float>(vform, dst, src1, index_reg); + + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmls<double>(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmulx<float>(vform, dst, src1, index_reg); + + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmulx<double>(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float<float>(i); + float rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !std::isnan(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat<float>(i, rounded); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float<double>(i); + double rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !std::isnan(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat<double>(i, rounded); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float<float>(i) * std::pow(2.0f, fbits); + dst.SetInt(vform, i, FPToInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float<double>(i) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float<float>(i) * std::pow(2.0f, fbits); + dst.SetUint(vform, i, FPToUInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float<double>(i) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToFloat(src.Float<float16>(i))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToDouble(src.Float<float>(i))); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToFloat(src.Float<float16>(i + lane_count))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count))); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat16(src.Float<float>(i), FPTieEven)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform) / 2; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat16(src.Float<float>(i), FPTieEven)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPTieEven)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPRoundOdd)); + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + int lane_count = LaneCountFromFormat(vform) / 2; + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPRoundOdd)); + } + return dst; +} + + +// Based on reference C function recip_sqrt_estimate from ARM ARM. +double Simulator::recip_sqrt_estimate(double a) { + int q0, q1, s; + double r; + if (a < 0.5) { + q0 = static_cast<int>(a * 512.0); + r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0); + } else { + q1 = static_cast<int>(a * 256.0); + r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0); + } + s = static_cast<int>(256.0 * r + 0.5); + return static_cast<double>(s) / 256.0; +} + + +static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) { + return unsigned_bitextract_64(start_bit, end_bit, val); +} + + +template <typename T> +T Simulator::FPRecipSqrtEstimate(T op) { + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (op == 0.0) { + if (copysign(1.0, op) < 0.0) { + return kFP64NegativeInfinity; + } else { + return kFP64PositiveInfinity; + } + } else if (copysign(1.0, op) < 0.0) { + FPProcessException(); + return FPDefaultNaN<T>(); + } else if (std::isinf(op)) { + return 0.0; + } else { + uint64_t fraction; + int exp, result_exp; + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + exp = float_exp(op); + fraction = float_mantissa(op); + fraction <<= 29; + } else { + exp = double_exp(op); + fraction = double_mantissa(op); + } + + if (exp == 0) { + while (Bits(fraction, 51, 51) == 0) { + fraction = Bits(fraction, 50, 0) << 1; + exp -= 1; + } + fraction = Bits(fraction, 50, 0) << 1; + } + + double scaled; + if (Bits(exp, 0, 0) == 0) { + scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44); + } else { + scaled = double_pack(0, 1021, Bits(fraction, 51, 44) << 44); + } + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + result_exp = (380 - exp) / 2; + } else { + result_exp = (3068 - exp) / 2; + } + + uint64_t estimate = double_to_rawbits(recip_sqrt_estimate(scaled)); + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0)); + uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29)); + return float_pack(0, exp_bits, est_bits); + } else { + return double_pack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float<float>(i); + dst.SetFloat(i, FPRecipSqrtEstimate<float>(input)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float<double>(i); + dst.SetFloat(i, FPRecipSqrtEstimate<double>(input)); + } + } + return dst; +} + +template <typename T> +T Simulator::FPRecipEstimate(T op, FPRounding rounding) { + uint32_t sign; + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + sign = float_sign(op); + } else { + sign = double_sign(op); + } + + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (std::isinf(op)) { + return (sign == 1) ? -0.0 : 0.0; + } else if (op == 0.0) { + FPProcessException(); // FPExc_DivideByZero exception. + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else if (((sizeof(T) == sizeof(float)) && // NOLINT(runtime/sizeof) + (std::fabs(op) < std::pow(2.0, -128.0))) || + ((sizeof(T) == sizeof(double)) && // NOLINT(runtime/sizeof) + (std::fabs(op) < std::pow(2.0, -1024.0)))) { + bool overflow_to_inf = false; + switch (rounding) { + case FPTieEven: overflow_to_inf = true; break; + case FPPositiveInfinity: overflow_to_inf = (sign == 0); break; + case FPNegativeInfinity: overflow_to_inf = (sign == 1); break; + case FPZero: overflow_to_inf = false; break; + default: break; + } + FPProcessException(); // FPExc_Overflow and FPExc_Inexact. + if (overflow_to_inf) { + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else { + // Return FPMaxNormal(sign). + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + return float_pack(sign, 0xfe, 0x07fffff); + } else { + return double_pack(sign, 0x7fe, 0x0fffffffffffffl); + } + } + } else { + uint64_t fraction; + int exp, result_exp; + uint32_t sign; + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + sign = float_sign(op); + exp = float_exp(op); + fraction = float_mantissa(op); + fraction <<= 29; + } else { + sign = double_sign(op); + exp = double_exp(op); + fraction = double_mantissa(op); + } + + if (exp == 0) { + if (Bits(fraction, 51, 51) == 0) { + exp -= 1; + fraction = Bits(fraction, 49, 0) << 2; + } else { + fraction = Bits(fraction, 50, 0) << 1; + } + } + + double scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44); + + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254. + } else { + result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046. + } + + double estimate = recip_estimate(scaled); + + fraction = double_mantissa(estimate); + if (result_exp == 0) { + fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1); + } else if (result_exp == -1) { + fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2); + result_exp = 0; + } + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0)); + uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29)); + return float_pack(sign, exp_bits, frac_bits); + } else { + return double_pack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding round) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float<float>(i); + dst.SetFloat(i, FPRecipEstimate<float>(input, round)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float<double>(i); + dst.SetFloat(i, FPRecipEstimate<double>(input, round)); + } + } + return dst; +} + + +LogicVRegister Simulator::ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x3FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast<uint32_t>(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + + +// Based on reference C function recip_estimate from ARM ARM. +double Simulator::recip_estimate(double a) { + int q, s; + double r; + q = static_cast<int>(a * 512.0); + r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0); + s = static_cast<int>(256.0 * r + 0.5); + return static_cast<double>(s) / 256.0; +} + + +LogicVRegister Simulator::urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x7FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast<uint32_t>(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + +template <typename T> +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float<T>(i); + T result; + if (std::isnan(op)) { + result = FPProcessNaN(op); + } else { + int exp; + uint32_t sign; + if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof) + sign = float_sign(op); + exp = float_exp(op); + exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0)); + result = float_pack(sign, exp, 0); + } else { + sign = double_sign(op); + exp = double_exp(op); + exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0)); + result = double_pack(sign, exp, 0); + } + } + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecpx<float>(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecpx<double>(vform, dst, src); + } + return dst; +} + +LogicVRegister Simulator::scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = FixedToFloat(src.Int(kFormatS, i), fbits, round); + dst.SetFloat<float>(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = FixedToDouble(src.Int(kFormatD, i), fbits, round); + dst.SetFloat<double>(i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round); + dst.SetFloat<float>(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round); + dst.SetFloat<double>(i, result); + } + } + return dst; +} + + +} // namespace vixl + +#endif // JS_SIMULATOR_ARM64 diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp new file mode 100644 index 000000000..02c62ecdb --- /dev/null +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp @@ -0,0 +1,2007 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/MacroAssembler-vixl.h" + +#include <ctype.h> + +namespace vixl { + +MacroAssembler::MacroAssembler() + : js::jit::Assembler(), + sp_(x28), + tmp_list_(ip0, ip1), + fptmp_list_(d31) +{ +} + + +void MacroAssembler::FinalizeCode() { + Assembler::FinalizeCode(); +} + + +int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm, + const Register &rd, + uint64_t imm) { + bool emit_code = (masm != NULL); + VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); + // The worst case for size is mov 64-bit immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(masm); + + // Immediates on Aarch64 can be produced using an initial value, and zero to + // three move keep operations. + // + // Initial values can be generated with: + // 1. 64-bit move zero (movz). + // 2. 32-bit move inverted (movn). + // 3. 64-bit move inverted. + // 4. 32-bit orr immediate. + // 5. 64-bit orr immediate. + // Move-keep may then be used to modify each of the 16-bit half words. + // + // The code below supports all five initial value generators, and + // applying move-keep operations to move-zero and move-inverted initial + // values. + + // Try to move the immediate in one instruction, and if that fails, switch to + // using multiple instructions. + if (OneInstrMoveImmediateHelper(masm, rd, imm)) { + return 1; + } else { + int instruction_count = 0; + unsigned reg_size = rd.size(); + + // Generic immediate case. Imm will be represented by + // [imm3, imm2, imm1, imm0], where each imm is 16 bits. + // A move-zero or move-inverted is generated for the first non-zero or + // non-0xffff immX, and a move-keep for subsequent non-zero immX. + + uint64_t ignored_halfword = 0; + bool invert_move = false; + // If the number of 0xffff halfwords is greater than the number of 0x0000 + // halfwords, it's more efficient to use move-inverted. + if (CountClearHalfWords(~imm, reg_size) > + CountClearHalfWords(imm, reg_size)) { + ignored_halfword = 0xffff; + invert_move = true; + } + + // Mov instructions can't move values into the stack pointer, so set up a + // temporary register, if needed. + UseScratchRegisterScope temps; + Register temp; + if (emit_code) { + temps.Open(masm); + temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; + } + + // Iterate through the halfwords. Use movn/movz for the first non-ignored + // halfword, and movk for subsequent halfwords. + VIXL_ASSERT((reg_size % 16) == 0); + bool first_mov_done = false; + for (unsigned i = 0; i < (temp.size() / 16); i++) { + uint64_t imm16 = (imm >> (16 * i)) & 0xffff; + if (imm16 != ignored_halfword) { + if (!first_mov_done) { + if (invert_move) { + if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i); + instruction_count++; + } else { + if (emit_code) masm->movz(temp, imm16, 16 * i); + instruction_count++; + } + first_mov_done = true; + } else { + // Construct a wider constant. + if (emit_code) masm->movk(temp, imm16, 16 * i); + instruction_count++; + } + } + } + + VIXL_ASSERT(first_mov_done); + + // Move the temporary if the original destination register was the stack + // pointer. + if (rd.IsSP()) { + if (emit_code) masm->mov(rd, temp); + instruction_count++; + } + return instruction_count; + } +} + + +bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm) { + bool emit_code = masm != NULL; + unsigned n, imm_s, imm_r; + int reg_size = dst.size(); + + if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move zero instruction. Movz can't write + // to the stack pointer. + if (emit_code) { + masm->movz(dst, imm); + } + return true; + } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move negative instruction. Movn can't + // write to the stack pointer. + if (emit_code) { + masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); + } + return true; + } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be represented in a logical orr instruction. + VIXL_ASSERT(!dst.IsZero()); + if (emit_code) { + masm->LogicalImmediate( + dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR); + } + return true; + } + return false; +} + + +void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { + VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) && + ((bit == -1) || (type >= kBranchTypeFirstUsingBit))); + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + B(static_cast<Condition>(type), label); + } else { + switch (type) { + case always: B(label); break; + case never: break; + case reg_zero: Cbz(reg, label); break; + case reg_not_zero: Cbnz(reg, label); break; + case reg_bit_clear: Tbz(reg, bit, label); break; + case reg_bit_set: Tbnz(reg, bit, label); break; + default: + VIXL_UNREACHABLE(); + } + } +} + + +void MacroAssembler::B(Label* label) { + SingleEmissionCheckScope guard(this); + b(label); +} + + +void MacroAssembler::B(Label* label, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + b(&done, InvertCondition(cond)); + b(label); + bind(&done); + } else { + // TODO: Need to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + b(label, cond); + } +} + + +void MacroAssembler::Cbnz(const Register& rt, Label* label) { + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbz(rt, &done); + b(label); + bind(&done); + } else { + // TODO: Need to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + cbnz(rt, label); + } +} + + +void MacroAssembler::Cbz(const Register& rt, Label* label) { + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbnz(rt, &done); + b(label); + bind(&done); + } else { + // TODO: Nede to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + cbz(rt, label); + } +} + + +void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + // TODO: Nede to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + tbnz(rt, bit_pos, label); + } +} + + +void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbnz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + // TODO: Nede to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + tbz(rt, bit_pos, label); + } +} + + +void MacroAssembler::And(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, AND); +} + + +void MacroAssembler::Ands(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, ANDS); +} + + +void MacroAssembler::Tst(const Register& rn, + const Operand& operand) { + Ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Bic(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, BIC); +} + + +void MacroAssembler::Bics(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, BICS); +} + + +void MacroAssembler::Orr(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, ORR); +} + + +void MacroAssembler::Orn(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, ORN); +} + + +void MacroAssembler::Eor(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, EOR); +} + + +void MacroAssembler::Eon(const Register& rd, + const Register& rn, + const Operand& operand) { + LogicalMacro(rd, rn, operand, EON); +} + + +void MacroAssembler::LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op) { + // The worst case for size is logical immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to do the operation + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate()) { + int64_t immediate = operand.immediate(); + unsigned reg_size = rd.size(); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast<LogicalOp>(op & ~NOT); + immediate = ~immediate; + } + + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are consistent. + VIXL_ASSERT(((immediate >> kWRegSize) == 0) || + ((immediate >> kWRegSize) == -1)); + immediate &= kWRegMask; + } + + VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate)); + + // Special cases for all set or all clear immediates. + if (immediate == 0) { + switch (op) { + case AND: + Mov(rd, 0); + return; + case ORR: + VIXL_FALLTHROUGH(); + case EOR: + Mov(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } else if ((rd.Is64Bits() && (immediate == -1)) || + (rd.Is32Bits() && (immediate == 0xffffffff))) { + switch (op) { + case AND: + Mov(rd, rn); + return; + case ORR: + Mov(rd, immediate); + return; + case EOR: + Mvn(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // Immediate can't be encoded: synthesize using move immediate. + Register temp = temps.AcquireSameSizeAs(rn); + Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate); + + // VIXL can acquire temp registers. Assert that the caller is aware. + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn)); + VIXL_ASSERT(!temp.Is(operand.maybeReg())); + + if (rd.Is(sp)) { + // If rd is the stack pointer we cannot use it as the destination + // register so we use the temp register as an intermediate again. + Logical(temp, rn, imm_operand, op); + Mov(sp, temp); + } else { + Logical(rd, rn, imm_operand, op); + } + } + } else if (operand.IsExtendedRegister()) { + VIXL_ASSERT(operand.reg().size() <= rd.size()); + // Add/sub extended supports shift <= 4. We want to support exactly the + // same modes here. + VIXL_ASSERT(operand.shift_amount() <= 4); + VIXL_ASSERT(operand.reg().Is64Bits() || + ((operand.extend() != UXTX) && (operand.extend() != SXTX))); + + temps.Exclude(operand.reg()); + Register temp = temps.AcquireSameSizeAs(rn); + + // VIXL can acquire temp registers. Assert that the caller is aware. + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn)); + VIXL_ASSERT(!temp.Is(operand.maybeReg())); + + EmitExtendShift(temp, operand.reg(), operand.extend(), + operand.shift_amount()); + Logical(rd, rn, Operand(temp), op); + } else { + // The operand can be encoded in the instruction. + VIXL_ASSERT(operand.IsShiftedRegister()); + Logical(rd, rn, operand, op); + } +} + + +void MacroAssembler::Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode) { + // The worst case for size is mov immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mov(rd, operand.immediate()); + } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { + // Emit a shift instruction if moving a shifted register. This operation + // could also be achieved using an orr instruction (like orn used by Mvn), + // but using a shift instruction makes the disassembly clearer. + EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount()); + } else if (operand.IsExtendedRegister()) { + // Emit an extend instruction if moving an extended register. This handles + // extend with post-shift operations, too. + EmitExtendShift(rd, operand.reg(), operand.extend(), + operand.shift_amount()); + } else { + // Otherwise, emit a register move only if the registers are distinct, or + // if they are not X registers. + // + // Note that mov(w0, w0) is not a no-op because it clears the top word of + // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W + // registers is not required to clear the top word of the X register. In + // this case, the instruction is discarded. + // + // If the sp is an operand, add #0 is emitted, otherwise, orr #0. + if (!rd.Is(operand.reg()) || (rd.Is32Bits() && + (discard_mode == kDontDiscardForSameWReg))) { + mov(rd, operand.reg()); + } + } +} + + +void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(is_uint16(imm)); + int byte1 = (imm & 0xff); + int byte2 = ((imm >> 8) & 0xff); + if (byte1 == byte2) { + movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); + } else if (byte1 == 0) { + movi(vd, byte2, LSL, 8); + } else if (byte2 == 0) { + movi(vd, byte1); + } else if (byte1 == 0xff) { + mvni(vd, ~byte2 & 0xff, LSL, 8); + } else if (byte2 == 0xff) { + mvni(vd, ~byte1 & 0xff); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + movz(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(is_uint32(imm)); + + uint8_t bytes[sizeof(imm)]; + memcpy(bytes, &imm, sizeof(imm)); + + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 4; ++i) { + if ((bytes[i] != 0) && (bytes[i] != 0xff)) { + all0orff = false; + break; + } + } + + if (all0orff == true) { + movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm)); + return; + } + } + + // Of the 4 bytes, only one byte is non-zero. + for (int i = 0; i < 4; i++) { + if ((imm & (0xff << (i * 8))) == imm) { + movi(vd, bytes[i], LSL, i * 8); + return; + } + } + + // Of the 4 bytes, only one byte is not 0xff. + for (int i = 0; i < 4; i++) { + uint32_t mask = ~(0xff << (i * 8)); + if ((imm & mask) == mask) { + mvni(vd, ~bytes[i] & 0xff, LSL, i * 8); + return; + } + } + + // Immediate is of the form 0x00MMFFFF. + if ((imm & 0xff00ffff) == 0x0000ffff) { + movi(vd, bytes[2], MSL, 16); + return; + } + + // Immediate is of the form 0x0000MMFF. + if ((imm & 0xffff00ff) == 0x000000ff) { + movi(vd, bytes[1], MSL, 8); + return; + } + + // Immediate is of the form 0xFFMM0000. + if ((imm & 0xff00ffff) == 0xff000000) { + mvni(vd, ~bytes[2] & 0xff, MSL, 16); + return; + } + // Immediate is of the form 0xFFFFMM00. + if ((imm & 0xffff00ff) == 0xffff0000) { + mvni(vd, ~bytes[1] & 0xff, MSL, 8); + return; + } + + // Top and bottom 16-bits are equal. + if (((imm >> 16) & 0xffff) == (imm & 0xffff)) { + Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 8; ++i) { + int byteval = (imm >> (i * 8)) & 0xff; + if (byteval != 0 && byteval != 0xff) { + all0orff = false; + break; + } + } + if (all0orff == true) { + movi(vd, imm); + return; + } + } + + // Top and bottom 32-bits are equal. + if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) { + Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, imm); + if (vd.Is1D()) { + mov(vd.D(), 0, temp); + } else { + dup(vd.V2D(), temp); + } + } +} + + +void MacroAssembler::Movi(const VRegister& vd, + uint64_t imm, + Shift shift, + int shift_amount) { + MacroEmissionCheckScope guard(this); + if (shift_amount != 0 || shift != LSL) { + movi(vd, imm, shift, shift_amount); + } else if (vd.Is8B() || vd.Is16B()) { + // 8-bit immediate. + VIXL_ASSERT(is_uint8(imm)); + movi(vd, imm); + } else if (vd.Is4H() || vd.Is8H()) { + // 16-bit immediate. + Movi16bitHelper(vd, imm); + } else if (vd.Is2S() || vd.Is4S()) { + // 32-bit immediate. + Movi32bitHelper(vd, imm); + } else { + // 64-bit immediate. + Movi64bitHelper(vd, imm); + } +} + + +void MacroAssembler::Movi(const VRegister& vd, + uint64_t hi, + uint64_t lo) { + // TODO: Move 128-bit values in a more efficient way. + VIXL_ASSERT(vd.Is128Bits()); + UseScratchRegisterScope temps(this); + Movi(vd.V2D(), lo); + Register temp = temps.AcquireX(); + Mov(temp, hi); + Ins(vd.V2D(), 1, temp); +} + + +void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { + // The worst case for size is mvn immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mvn(rd, operand.immediate()); + } else if (operand.IsExtendedRegister()) { + UseScratchRegisterScope temps(this); + temps.Exclude(operand.reg()); + + // Emit two instructions for the extend case. This differs from Mov, as + // the extend and invert can't be achieved in one instruction. + Register temp = temps.AcquireSameSizeAs(rd); + + // VIXL can acquire temp registers. Assert that the caller is aware. + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(operand.maybeReg())); + + EmitExtendShift(temp, operand.reg(), operand.extend(), + operand.shift_amount()); + mvn(rd, Operand(temp)); + } else { + // Otherwise, register and shifted register cases can be handled by the + // assembler directly, using orn. + mvn(rd, operand); + } +} + + +void MacroAssembler::Mov(const Register& rd, uint64_t imm) { + MoveImmediateHelper(this, rd, imm); +} + + +void MacroAssembler::Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + if (operand.IsImmediate() && (operand.immediate() < 0)) { + ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP); + } +} + + +void MacroAssembler::Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + if (operand.IsImmediate() && (operand.immediate() < 0)) { + ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN); + } +} + + +void MacroAssembler::ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + VIXL_ASSERT((cond != al) && (cond != nv)); + // The worst case for size is ccmp immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for ccmp + MacroEmissionCheckScope guard(this); + + if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || + (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { + // The immediate can be encoded in the instruction, or the operand is an + // unshifted register: call the assembler. + ConditionalCompare(rn, operand, nzcv, cond, op); + } else { + UseScratchRegisterScope temps(this); + // The operand isn't directly supported by the instruction: perform the + // operation on a temporary register. + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rn) && !temp.Is(operand.maybeReg())); + Mov(temp, operand); + ConditionalCompare(rn, temp, nzcv, cond, op); + } +} + + +void MacroAssembler::Csel(const Register& rd, + const Register& rn, + const Operand& operand, + Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + // The worst case for size is csel immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for csel + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Immediate argument. Handle special cases of 0, 1 and -1 using zero + // register. + int64_t imm = operand.immediate(); + Register zr = AppropriateZeroRegFor(rn); + if (imm == 0) { + csel(rd, rn, zr, cond); + } else if (imm == 1) { + csinc(rd, rn, zr, cond); + } else if (imm == -1) { + csinv(rd, rn, zr, cond); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn)); + VIXL_ASSERT(!temp.Is(operand.maybeReg())); + Mov(temp, operand.immediate()); + csel(rd, rn, temp, cond); + } + } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { + // Unshifted register argument. + csel(rd, rn, operand.reg(), cond); + } else { + // All other arguments. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn)); + VIXL_ASSERT(!temp.Is(operand.maybeReg())); + Mov(temp, operand); + csel(rd, rn, temp, cond); + } +} + + +void MacroAssembler::Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + if (operand.IsImmediate() && (operand.immediate() < 0) && + IsImmAddSub(-operand.immediate())) { + AddSubMacro(rd, rn, -operand.immediate(), S, SUB); + } else { + AddSubMacro(rd, rn, operand, S, ADD); + } +} + + +void MacroAssembler::Adds(const Register& rd, + const Register& rn, + const Operand& operand) { + Add(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + if (operand.IsImmediate() && (operand.immediate() < 0) && + IsImmAddSub(-operand.immediate())) { + AddSubMacro(rd, rn, -operand.immediate(), S, ADD); + } else { + AddSubMacro(rd, rn, operand, S, SUB); + } +} + + +void MacroAssembler::Subs(const Register& rd, + const Register& rn, + const Operand& operand) { + Sub(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { + Adds(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { + Subs(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Fcmp(const FPRegister& fn, double value, + FPTrapFlags trap) { + // The worst case for size is: + // * 1 to materialise the constant, using literal pool if necessary + // * 1 instruction for fcmp{e} + MacroEmissionCheckScope guard(this); + if (value != 0.0) { + UseScratchRegisterScope temps(this); + FPRegister tmp = temps.AcquireSameSizeAs(fn); + VIXL_ASSERT(!tmp.Is(fn)); + Fmov(tmp, value); + FPCompareMacro(fn, tmp, trap); + } else { + FPCompareMacro(fn, value, trap); + } +} + + +void MacroAssembler::Fcmpe(const FPRegister& fn, double value) { + Fcmp(fn, value, EnableTrap); +} + + +void MacroAssembler::Fmov(VRegister vd, double imm) { + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, static_cast<float>(imm)); + return; + } + + VIXL_ASSERT(vd.Is1D() || vd.Is2D()); + if (IsImmFP64(imm)) { + fmov(vd, imm); + } else { + uint64_t rawbits = double_to_rawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, xzr); + } else { + Assembler::fImmPool64(vd, imm); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + +void MacroAssembler::Fmov(VRegister vd, float imm) { + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, static_cast<double>(imm)); + return; + } + + VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S()); + if (IsImmFP32(imm)) { + fmov(vd, imm); + } else { + uint32_t rawbits = float_to_rawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, wzr); + } else { + Assembler::fImmPool32(vd, imm); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + + +void MacroAssembler::Neg(const Register& rd, + const Operand& operand) { + if (operand.IsImmediate()) { + Mov(rd, -operand.immediate()); + } else { + Sub(rd, AppropriateZeroRegFor(rd), operand); + } +} + + +void MacroAssembler::Negs(const Register& rd, + const Operand& operand) { + Subs(rd, AppropriateZeroRegFor(rd), operand); +} + + +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, + int64_t imm) { + return OneInstrMoveImmediateHelper(this, dst, imm); +} + + +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, + int64_t imm) { + int reg_size = dst.size(); + + // Encode the immediate in a single move instruction, if possible. + if (TryOneInstrMoveImmediate(dst, imm)) { + // The move was successful; nothing to do here. + } else { + // Pre-shift the immediate to the least-significant bits of the register. + int shift_low = CountTrailingZeros(imm, reg_size); + int64_t imm_low = imm >> shift_low; + + // Pre-shift the immediate to the most-significant bits of the register, + // inserting set bits in the least-significant bits. + int shift_high = CountLeadingZeros(imm, reg_size); + int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1); + + if (TryOneInstrMoveImmediate(dst, imm_low)) { + // The new immediate has been moved into the destination's low bits: + // return a new leftward-shifting operand. + return Operand(dst, LSL, shift_low); + } else if (TryOneInstrMoveImmediate(dst, imm_high)) { + // The new immediate has been moved into the destination's high bits: + // return a new rightward-shifting operand. + return Operand(dst, LSR, shift_high); + } else { + Mov(dst, imm); + } + } + return Operand(dst); +} + + +void MacroAssembler::ComputeAddress(const Register& dst, + const MemOperand& mem_op) { + // We cannot handle pre-indexing or post-indexing. + VIXL_ASSERT(mem_op.addrmode() == Offset); + Register base = mem_op.base(); + if (mem_op.IsImmediateOffset()) { + Add(dst, base, mem_op.offset()); + } else { + VIXL_ASSERT(mem_op.IsRegisterOffset()); + Register reg_offset = mem_op.regoffset(); + Shift shift = mem_op.shift(); + Extend extend = mem_op.extend(); + if (shift == NO_SHIFT) { + VIXL_ASSERT(extend != NO_EXTEND); + Add(dst, base, Operand(reg_offset, extend, mem_op.shift_amount())); + } else { + VIXL_ASSERT(extend == NO_EXTEND); + Add(dst, base, Operand(reg_offset, shift, mem_op.shift_amount())); + } + } +} + + +void MacroAssembler::AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + // Worst case is add/sub immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + + if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && + (S == LeaveFlags)) { + // The instruction would be a nop. Avoid generating useless code. + return; + } + + if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || + (rn.IsZero() && !operand.IsShiftedRegister()) || + (operand.IsShiftedRegister() && (operand.shift() == ROR))) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(rn); + + // VIXL can acquire temp registers. Assert that the caller is aware. + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn)); + VIXL_ASSERT(!temp.Is(operand.maybeReg())); + + if (operand.IsImmediate()) { + Operand imm_operand = + MoveImmediateForShiftedOp(temp, operand.immediate()); + AddSub(rd, rn, imm_operand, S, op); + } else { + Mov(temp, operand); + AddSub(rd, rn, temp, S, op); + } + } else { + AddSub(rd, rn, operand, S, op); + } +} + + +void MacroAssembler::Adc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); +} + + +void MacroAssembler::Adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); +} + + +void MacroAssembler::Sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); +} + + +void MacroAssembler::Sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); +} + + +void MacroAssembler::Ngc(const Register& rd, + const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + Sbc(rd, zr, operand); +} + + +void MacroAssembler::Ngcs(const Register& rd, + const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + Sbcs(rd, zr, operand); +} + + +void MacroAssembler::AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.size() == rn.size()); + // Worst case is addc/subc immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate() || + (operand.IsShiftedRegister() && (operand.shift() == ROR))) { + // Add/sub with carry (immediate or ROR shifted register.) + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg())); + Mov(temp, operand); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { + // Add/sub with carry (shifted register). + VIXL_ASSERT(operand.reg().size() == rd.size()); + VIXL_ASSERT(operand.shift() != ROR); + VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2, + operand.shift_amount())); + temps.Exclude(operand.reg()); + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg())); + EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsExtendedRegister()) { + // Add/sub with carry (extended register). + VIXL_ASSERT(operand.reg().size() <= rd.size()); + // Add/sub extended supports a shift <= 4. We want to support exactly the + // same modes. + VIXL_ASSERT(operand.shift_amount() <= 4); + VIXL_ASSERT(operand.reg().Is64Bits() || + ((operand.extend() != UXTX) && (operand.extend() != SXTX))); + temps.Exclude(operand.reg()); + Register temp = temps.AcquireSameSizeAs(rn); + VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg())); + EmitExtendShift(temp, operand.reg(), operand.extend(), + operand.shift_amount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else { + // The addressing mode is directly supported by the instruction. + AddSubWithCarry(rd, rn, operand, S, op); + } +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ +void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + LoadStoreMacro(REG, addr, OP); \ +} +LS_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + + +void MacroAssembler::LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op) { + // Worst case is ldr/str pre/post index: + // * 1 instruction for ldr/str + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.offset(); + unsigned access_size = CalcLSDataSize(op); + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) && + !IsImmLSUnscaled(offset)) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.base()); + VIXL_ASSERT(!temp.Is(rt)); + VIXL_ASSERT(!temp.Is(addr.base()) && !temp.Is(addr.regoffset())); + Mov(temp, addr.offset()); + LoadStore(rt, MemOperand(addr.base(), temp), op); + } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { + // Post-index beyond unscaled addressing range. + LoadStore(rt, MemOperand(addr.base()), op); + Add(addr.base(), addr.base(), Operand(offset)); + } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { + // Pre-index beyond unscaled addressing range. + Add(addr.base(), addr.base(), Operand(offset)); + LoadStore(rt, MemOperand(addr.base()), op); + } else { + // Encodable in one load/store instruction. + LoadStore(rt, addr, op); + } +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ +void MacroAssembler::FN(const REGTYPE REG, \ + const REGTYPE REG2, \ + const MemOperand& addr) { \ + LoadStorePairMacro(REG, REG2, addr, OP); \ +} +LSPAIR_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + // TODO(all): Should we support register offset for load-store-pair? + VIXL_ASSERT(!addr.IsRegisterOffset()); + // Worst case is ldp/stp immediate: + // * 1 instruction for ldp/stp + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.offset(); + unsigned access_size = CalcLSPairDataSize(op); + + // Check if the offset fits in the immediate field of the appropriate + // instruction. If not, emit two instructions to perform the operation. + if (IsImmLSPair(offset, access_size)) { + // Encodable in one load/store pair instruction. + LoadStorePair(rt, rt2, addr, op); + } else { + Register base = addr.base(); + if (addr.IsImmediateOffset()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(base); + Add(temp, base, offset); + LoadStorePair(rt, rt2, MemOperand(temp), op); + } else if (addr.IsPostIndex()) { + LoadStorePair(rt, rt2, MemOperand(base), op); + Add(base, base, offset); + } else { + VIXL_ASSERT(addr.IsPreIndex()); + Add(base, base, offset); + LoadStorePair(rt, rt2, MemOperand(base), op); + } + } +} + + +void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) { + MacroEmissionCheckScope guard(this); + + // There are no pre- or post-index modes for prfm. + VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset()); + + // The access size is implicitly 8 bytes for all prefetch operations. + unsigned size = kXRegSizeInBytesLog2; + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) && + !IsImmLSUnscaled(addr.offset())) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.base()); + Mov(temp, addr.offset()); + Prefetch(op, MemOperand(addr.base(), temp)); + } else { + // Simple register-offsets are encodable in one instruction. + Prefetch(op, addr); + } +} + + +void MacroAssembler::PushStackPointer() { + PrepareForPush(1, 8); + + // Pushing a stack pointer leads to implementation-defined + // behavior, which may be surprising. In particular, + // str x28, [x28, #-8]! + // pre-decrements the stack pointer, storing the decremented value. + // Additionally, sp is read as xzr in this context, so it cannot be pushed. + // So we must use a scratch register. + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + + Mov(scratch, GetStackPointer64()); + str(scratch, MemOperand(GetStackPointer64(), -8, PreIndex)); +} + + +void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, + const CPURegister& src2, const CPURegister& src3) { + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(src0.IsValid()); + + int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); + int size = src0.SizeInBytes(); + + if (src0.Is(GetStackPointer64())) { + VIXL_ASSERT(count == 1); + VIXL_ASSERT(size == 8); + PushStackPointer(); + return; + } + + PrepareForPush(count, size); + PushHelper(count, size, src0, src1, src2, src3); +} + + +void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, + const CPURegister& dst2, const CPURegister& dst3) { + // It is not valid to pop into the same register more than once in one + // instruction, not even into the zero register. + VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(dst0.IsValid()); + + int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); + int size = dst0.SizeInBytes(); + + PrepareForPop(count, size); + PopHelper(count, size, dst0, dst1, dst2, dst3); +} + + +void MacroAssembler::PushCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*TmpList())); + VIXL_ASSERT(!registers.Overlaps(*FPTmpList())); + + int reg_size = registers.RegisterSizeInBytes(); + PrepareForPush(registers.Count(), reg_size); + + // Bump the stack pointer and store two registers at the bottom. + int size = registers.TotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Stp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), -size, PreIndex)); + } else if (bottom_0.IsValid()) { + Str(bottom_0, MemOperand(GetStackPointer64(), -size, PreIndex)); + } + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& src0 = registers.PopLowestIndex(); + const CPURegister& src1 = registers.PopLowestIndex(); + if (src1.IsValid()) { + Stp(src0, src1, MemOperand(GetStackPointer64(), offset)); + } else { + Str(src0, MemOperand(GetStackPointer64(), offset)); + } + offset += 2 * reg_size; + } +} + + +void MacroAssembler::PopCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*TmpList())); + VIXL_ASSERT(!registers.Overlaps(*FPTmpList())); + + int reg_size = registers.RegisterSizeInBytes(); + PrepareForPop(registers.Count(), reg_size); + + + int size = registers.TotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (dst1.IsValid()) { + Ldp(dst0, dst1, MemOperand(GetStackPointer64(), offset)); + } else { + Ldr(dst0, MemOperand(GetStackPointer64(), offset)); + } + offset += 2 * reg_size; + } + + // Load the two registers at the bottom and drop the stack pointer. + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Ldp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), size, PostIndex)); + } else if (bottom_0.IsValid()) { + Ldr(bottom_0, MemOperand(GetStackPointer64(), size, PostIndex)); + } +} + + +void MacroAssembler::PushMultipleTimes(int count, Register src) { + int size = src.SizeInBytes(); + + PrepareForPush(count, size); + // Push up to four registers at a time if possible because if the current + // stack pointer is sp and the register size is 32, registers must be pushed + // in blocks of four in order to maintain the 16-byte alignment for sp. + while (count >= 4) { + PushHelper(4, size, src, src, src, src); + count -= 4; + } + if (count >= 2) { + PushHelper(2, size, src, src, NoReg, NoReg); + count -= 2; + } + if (count == 1) { + PushHelper(1, size, src, NoReg, NoReg, NoReg); + count -= 1; + } + VIXL_ASSERT(count == 0); +} + + +void MacroAssembler::PushHelper(int count, int size, + const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 stp. + InstructionAccurateScope scope(this, 2, + InstructionAccurateScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(size == src0.SizeInBytes()); + + // Pushing the stack pointer has unexpected behavior. See PushStackPointer(). + VIXL_ASSERT(!src0.Is(GetStackPointer64()) && !src0.Is(sp)); + VIXL_ASSERT(!src1.Is(GetStackPointer64()) && !src1.Is(sp)); + VIXL_ASSERT(!src2.Is(GetStackPointer64()) && !src2.Is(sp)); + VIXL_ASSERT(!src3.Is(GetStackPointer64()) && !src3.Is(sp)); + + // The JS engine should never push 4 bytes. + VIXL_ASSERT(size >= 8); + + // When pushing multiple registers, the store order is chosen such that + // Push(a, b) is equivalent to Push(a) followed by Push(b). + switch (count) { + case 1: + VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); + str(src0, MemOperand(GetStackPointer64(), -1 * size, PreIndex)); + break; + case 2: + VIXL_ASSERT(src2.IsNone() && src3.IsNone()); + stp(src1, src0, MemOperand(GetStackPointer64(), -2 * size, PreIndex)); + break; + case 3: + VIXL_ASSERT(src3.IsNone()); + stp(src2, src1, MemOperand(GetStackPointer64(), -3 * size, PreIndex)); + str(src0, MemOperand(GetStackPointer64(), 2 * size)); + break; + case 4: + // Skip over 4 * size, then fill in the gap. This allows four W registers + // to be pushed using sp, whilst maintaining 16-byte alignment for sp at + // all times. + stp(src3, src2, MemOperand(GetStackPointer64(), -4 * size, PreIndex)); + stp(src1, src0, MemOperand(GetStackPointer64(), 2 * size)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PopHelper(int count, int size, + const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 ldp. + InstructionAccurateScope scope(this, 2, + InstructionAccurateScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(size == dst0.SizeInBytes()); + + // When popping multiple registers, the load order is chosen such that + // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). + switch (count) { + case 1: + VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); + ldr(dst0, MemOperand(GetStackPointer64(), 1 * size, PostIndex)); + break; + case 2: + VIXL_ASSERT(dst2.IsNone() && dst3.IsNone()); + ldp(dst0, dst1, MemOperand(GetStackPointer64(), 2 * size, PostIndex)); + break; + case 3: + VIXL_ASSERT(dst3.IsNone()); + ldr(dst2, MemOperand(GetStackPointer64(), 2 * size)); + ldp(dst0, dst1, MemOperand(GetStackPointer64(), 3 * size, PostIndex)); + break; + case 4: + // Load the higher addresses first, then load the lower addresses and skip + // the whole block in the second instruction. This allows four W registers + // to be popped using sp, whilst maintaining 16-byte alignment for sp at + // all times. + ldp(dst2, dst3, MemOperand(GetStackPointer64(), 2 * size)); + ldp(dst0, dst1, MemOperand(GetStackPointer64(), 4 * size, PostIndex)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PrepareForPush(int count, int size) { + if (sp.Is(GetStackPointer64())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } else { + // Even if the current stack pointer is not the system stack pointer (sp), + // the system stack pointer will still be modified in order to comply with + // ABI rules about accessing memory below the system stack pointer. + BumpSystemStackPointer(count * size); + } +} + + +void MacroAssembler::PrepareForPop(int count, int size) { + USE(count, size); + if (sp.Is(GetStackPointer64())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } +} + +void MacroAssembler::Poke(const Register& src, const Operand& offset) { + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.immediate() >= 0); + } + + Str(src, MemOperand(GetStackPointer64(), offset)); +} + + +void MacroAssembler::Peek(const Register& dst, const Operand& offset) { + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.immediate() >= 0); + } + + Ldr(dst, MemOperand(GetStackPointer64(), offset)); +} + + +void MacroAssembler::Claim(const Operand& size) { + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.immediate() > 0); + if (sp.Is(GetStackPointer64())) { + VIXL_ASSERT((size.immediate() % 16) == 0); + } + } + + Sub(GetStackPointer64(), GetStackPointer64(), size); + + // Make sure the real stack pointer reflects the claimed stack space. + // We can't use stack memory below the stack pointer, it could be clobbered by + // interupts and signal handlers. + if (!sp.Is(GetStackPointer64())) { + Mov(sp, GetStackPointer64()); + } +} + + +void MacroAssembler::Drop(const Operand& size) { + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.immediate() > 0); + if (sp.Is(GetStackPointer64())) { + VIXL_ASSERT((size.immediate() % 16) == 0); + } + } + + Add(GetStackPointer64(), GetStackPointer64(), size); +} + + +void MacroAssembler::PushCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 stp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + InstructionAccurateScope scope(this, 10); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(GetStackPointer64())); + + MemOperand tos(sp, -2 * static_cast<int>(kXRegSizeInBytes), PreIndex); + + stp(x29, x30, tos); + stp(x27, x28, tos); + stp(x25, x26, tos); + stp(x23, x24, tos); + stp(x21, x22, tos); + stp(x19, x20, tos); + + stp(d14, d15, tos); + stp(d12, d13, tos); + stp(d10, d11, tos); + stp(d8, d9, tos); +} + + +void MacroAssembler::PopCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 ldp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + InstructionAccurateScope scope(this, 10); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(GetStackPointer64())); + + MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex); + + ldp(d8, d9, tos); + ldp(d10, d11, tos); + ldp(d12, d13, tos); + ldp(d14, d15, tos); + + ldp(x19, x20, tos); + ldp(x21, x22, tos); + ldp(x23, x24, tos); + ldp(x25, x26, tos); + ldp(x27, x28, tos); + ldp(x29, x30, tos); +} + +void MacroAssembler::LoadCPURegList(CPURegList registers, + const MemOperand& src) { + LoadStoreCPURegListHelper(kLoad, registers, src); +} + +void MacroAssembler::StoreCPURegList(CPURegList registers, + const MemOperand& dst) { + LoadStoreCPURegListHelper(kStore, registers, dst); +} + + +void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op, + CPURegList registers, + const MemOperand& mem) { + // We do not handle pre-indexing or post-indexing. + VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex())); + VIXL_ASSERT(!registers.Overlaps(tmp_list_)); + VIXL_ASSERT(!registers.Overlaps(fptmp_list_)); + VIXL_ASSERT(!registers.IncludesAliasOf(sp)); + + UseScratchRegisterScope temps(this); + + MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers, + mem, + &temps); + + while (registers.Count() >= 2) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (op == kStore) { + Stp(dst0, dst1, loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldp(dst0, dst1, loc); + } + loc.AddOffset(2 * registers.RegisterSizeInBytes()); + } + if (!registers.IsEmpty()) { + if (op == kStore) { + Str(registers.PopLowestIndex(), loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldr(registers.PopLowestIndex(), loc); + } + } +} + +MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope) { + // If necessary, pre-compute the base address for the accesses. + if (mem.IsRegisterOffset()) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + + } else if (mem.IsImmediateOffset()) { + int reg_size = registers.RegisterSizeInBytes(); + int total_size = registers.TotalSizeInBytes(); + int64_t min_offset = mem.offset(); + int64_t max_offset = mem.offset() + std::max(0, total_size - 2 * reg_size); + if ((registers.Count() >= 2) && + (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) || + !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + } + } + + return mem; +} + +void MacroAssembler::BumpSystemStackPointer(const Operand& space) { + VIXL_ASSERT(!sp.Is(GetStackPointer64())); + // TODO: Several callers rely on this not using scratch registers, so we use + // the assembler directly here. However, this means that large immediate + // values of 'space' cannot be handled. + InstructionAccurateScope scope(this, 1); + sub(sp, GetStackPointer64(), space); +} + + +void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) { + +#ifdef JS_SIMULATOR_ARM64 + // The arguments to the trace pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + InstructionAccurateScope scope(this, kTraceLength / kInstructionSize); + + Label start; + bind(&start); + + // Refer to simulator-a64.h for a description of the marker and its + // arguments. + hlt(kTraceOpcode); + + // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset); + dc32(parameters); + + // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset); + dc32(command); +#else + // Emit nothing on real hardware. + USE(parameters, command); +#endif +} + + +void MacroAssembler::Log(TraceParameters parameters) { + +#ifdef JS_SIMULATOR_ARM64 + // The arguments to the log pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + InstructionAccurateScope scope(this, kLogLength / kInstructionSize); + + Label start; + bind(&start); + + // Refer to simulator-a64.h for a description of the marker and its + // arguments. + hlt(kLogOpcode); + + // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset); + dc32(parameters); +#else + // Emit nothing on real hardware. + USE(parameters); +#endif +} + + +void MacroAssembler::EnableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateEnable)); + InstructionAccurateScope scope(this, 1); + movn(xzr, InstrumentStateEnable); +} + + +void MacroAssembler::DisableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateDisable)); + InstructionAccurateScope scope(this, 1); + movn(xzr, InstrumentStateDisable); +} + + +void MacroAssembler::AnnotateInstrumentation(const char* marker_name) { + VIXL_ASSERT(strlen(marker_name) == 2); + + // We allow only printable characters in the marker names. Unprintable + // characters are reserved for controlling features of the instrumentation. + VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); + + InstructionAccurateScope scope(this, 1); + movn(xzr, (marker_name[1] << 8) | marker_name[0]); +} + + +void UseScratchRegisterScope::Open(MacroAssembler* masm) { + VIXL_ASSERT(!initialised_); + available_ = masm->TmpList(); + availablefp_ = masm->FPTmpList(); + old_available_ = available_->list(); + old_availablefp_ = availablefp_->list(); + VIXL_ASSERT(available_->type() == CPURegister::kRegister); + VIXL_ASSERT(availablefp_->type() == CPURegister::kVRegister); +#ifdef DEBUG + initialised_ = true; +#endif +} + + +void UseScratchRegisterScope::Close() { + if (available_) { + available_->set_list(old_available_); + available_ = NULL; + } + if (availablefp_) { + availablefp_->set_list(old_availablefp_); + availablefp_ = NULL; + } +#ifdef DEBUG + initialised_ = false; +#endif +} + + +UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) { +#ifdef DEBUG + initialised_ = false; +#endif + Open(masm); +} + +// This allows deferred (and optional) initialisation of the scope. +UseScratchRegisterScope::UseScratchRegisterScope() + : available_(NULL), availablefp_(NULL), + old_available_(0), old_availablefp_(0) { +#ifdef DEBUG + initialised_ = false; +#endif +} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + Close(); +} + + +bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const { + return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg); +} + + +Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { + int code = AcquireNextAvailable(available_).code(); + return Register(code, reg.size()); +} + + +FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { + int code = AcquireNextAvailable(availablefp_).code(); + return FPRegister(code, reg.size()); +} + + +void UseScratchRegisterScope::Release(const CPURegister& reg) { + VIXL_ASSERT(initialised_); + if (reg.IsRegister()) { + ReleaseByCode(available_, reg.code()); + } else if (reg.IsFPRegister()) { + ReleaseByCode(availablefp_, reg.code()); + } else { + VIXL_ASSERT(reg.IsNone()); + } +} + + +void UseScratchRegisterScope::Include(const CPURegList& list) { + VIXL_ASSERT(initialised_); + if (list.type() == CPURegister::kRegister) { + // Make sure that neither sp nor xzr are included the list. + IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit())); + } else { + VIXL_ASSERT(list.type() == CPURegister::kVRegister); + IncludeByRegList(availablefp_, list.list()); + } +} + + +void UseScratchRegisterScope::Include(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + VIXL_ASSERT(initialised_); + RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); + // Make sure that neither sp nor xzr are included the list. + include &= ~(xzr.Bit() | sp.Bit()); + + IncludeByRegList(available_, include); +} + + +void UseScratchRegisterScope::Include(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); + IncludeByRegList(availablefp_, include); +} + + +void UseScratchRegisterScope::Exclude(const CPURegList& list) { + if (list.type() == CPURegister::kRegister) { + ExcludeByRegList(available_, list.list()); + } else { + VIXL_ASSERT(list.type() == CPURegister::kVRegister); + ExcludeByRegList(availablefp_, list.list()); + } +} + + +void UseScratchRegisterScope::Exclude(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); + ExcludeByRegList(available_, exclude); +} + + +void UseScratchRegisterScope::Exclude(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); + ExcludeByRegList(availablefp_, excludefp); +} + + +void UseScratchRegisterScope::Exclude(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4) { + RegList exclude = 0; + RegList excludefp = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4}; + + for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) { + if (regs[i].IsRegister()) { + exclude |= regs[i].Bit(); + } else if (regs[i].IsFPRegister()) { + excludefp |= regs[i].Bit(); + } else { + VIXL_ASSERT(regs[i].IsNone()); + } + } + + ExcludeByRegList(available_, exclude); + ExcludeByRegList(availablefp_, excludefp); +} + + +void UseScratchRegisterScope::ExcludeAll() { + ExcludeByRegList(available_, available_->list()); + ExcludeByRegList(availablefp_, availablefp_->list()); +} + + +CPURegister UseScratchRegisterScope::AcquireNextAvailable( + CPURegList* available) { + VIXL_CHECK(!available->IsEmpty()); + CPURegister result = available->PopLowestIndex(); + VIXL_ASSERT(!AreAliased(result, xzr, sp)); + return result; +} + + +void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) { + ReleaseByRegList(available, static_cast<RegList>(1) << code); +} + + +void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available, + RegList regs) { + available->set_list(available->list() | regs); +} + + +void UseScratchRegisterScope::IncludeByRegList(CPURegList* available, + RegList regs) { + available->set_list(available->list() | regs); +} + + +void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available, + RegList exclude) { + available->set_list(available->list() & ~exclude); +} + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h new file mode 100644 index 000000000..352794432 --- /dev/null +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h @@ -0,0 +1,2494 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_ +#define VIXL_A64_MACRO_ASSEMBLER_A64_H_ + +#include <algorithm> +#include <limits> + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instrument-vixl.h" +#include "jit/arm64/vixl/Simulator-Constants-vixl.h" + +#define LS_MACRO_LIST(V) \ + V(Ldrb, Register&, rt, LDRB_w) \ + V(Strb, Register&, rt, STRB_w) \ + V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ + V(Ldrh, Register&, rt, LDRH_w) \ + V(Strh, Register&, rt, STRH_w) \ + V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ + V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ + V(Str, CPURegister&, rt, StoreOpFor(rt)) \ + V(Ldrsw, Register&, rt, LDRSW_x) + + +#define LSPAIR_MACRO_LIST(V) \ + V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ + V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ + V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) + +namespace vixl { + +// Forward declaration +class MacroAssembler; +class UseScratchRegisterScope; + +// This scope has the following purposes: +// * Acquire/Release the underlying assembler's code buffer. +// * This is mandatory before emitting. +// * Emit the literal or veneer pools if necessary before emitting the +// macro-instruction. +// * Ensure there is enough space to emit the macro-instruction. +class EmissionCheckScope { + public: + EmissionCheckScope(MacroAssembler* masm, size_t size) + : masm_(masm) + { } + + protected: + MacroAssembler* masm_; +#ifdef DEBUG + Label start_; + size_t size_; +#endif +}; + + +// Helper for common Emission checks. +// The macro-instruction maps to a single instruction. +class SingleEmissionCheckScope : public EmissionCheckScope { + public: + explicit SingleEmissionCheckScope(MacroAssembler* masm) + : EmissionCheckScope(masm, kInstructionSize) {} +}; + + +// The macro instruction is a "typical" macro-instruction. Typical macro- +// instruction only emit a few instructions, a few being defined as 8 here. +class MacroEmissionCheckScope : public EmissionCheckScope { + public: + explicit MacroEmissionCheckScope(MacroAssembler* masm) + : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {} + + private: + static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize; +}; + + +enum BranchType { + // Copies of architectural conditions. + // The associated conditions can be used in place of those, the code will + // take care of reinterpreting them with the correct type. + integer_eq = eq, + integer_ne = ne, + integer_hs = hs, + integer_lo = lo, + integer_mi = mi, + integer_pl = pl, + integer_vs = vs, + integer_vc = vc, + integer_hi = hi, + integer_ls = ls, + integer_ge = ge, + integer_lt = lt, + integer_gt = gt, + integer_le = le, + integer_al = al, + integer_nv = nv, + + // These two are *different* from the architectural codes al and nv. + // 'always' is used to generate unconditional branches. + // 'never' is used to not generate a branch (generally as the inverse + // branch type of 'always). + always, never, + // cbz and cbnz + reg_zero, reg_not_zero, + // tbz and tbnz + reg_bit_clear, reg_bit_set, + + // Aliases. + kBranchTypeFirstCondition = eq, + kBranchTypeLastCondition = nv, + kBranchTypeFirstUsingReg = reg_zero, + kBranchTypeFirstUsingBit = reg_bit_clear +}; + + +enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; + + +class MacroAssembler : public js::jit::Assembler { + public: + MacroAssembler(); + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. + void FinalizeCode(); + + + // Constant generation helpers. + // These functions return the number of instructions required to move the + // immediate into the destination register. Also, if the masm pointer is + // non-null, it generates the code to do so. + // The two features are implemented using one function to avoid duplication of + // the logic. + // The function can be used to evaluate the cost of synthesizing an + // instruction using 'mov immediate' instructions. A user might prefer loading + // a constant using the literal pool instead of using multiple 'mov immediate' + // instructions. + static int MoveImmediateHelper(MacroAssembler* masm, + const Register &rd, + uint64_t imm); + static bool OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm); + + + // Logical macros. + void And(const Register& rd, + const Register& rn, + const Operand& operand); + void Ands(const Register& rd, + const Register& rn, + const Operand& operand); + void Bic(const Register& rd, + const Register& rn, + const Operand& operand); + void Bics(const Register& rd, + const Register& rn, + const Operand& operand); + void Orr(const Register& rd, + const Register& rn, + const Operand& operand); + void Orn(const Register& rd, + const Register& rn, + const Operand& operand); + void Eor(const Register& rd, + const Register& rn, + const Operand& operand); + void Eon(const Register& rd, + const Register& rn, + const Operand& operand); + void Tst(const Register& rn, const Operand& operand); + void LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op); + + // Add and sub macros. + void Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Adds(const Register& rd, + const Register& rn, + const Operand& operand); + void Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Subs(const Register& rd, + const Register& rn, + const Operand& operand); + void Cmn(const Register& rn, const Operand& operand); + void Cmp(const Register& rn, const Operand& operand); + void Neg(const Register& rd, + const Operand& operand); + void Negs(const Register& rd, + const Operand& operand); + + void AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + // Add/sub with carry macros. + void Adc(const Register& rd, + const Register& rn, + const Operand& operand); + void Adcs(const Register& rd, + const Register& rn, + const Operand& operand); + void Sbc(const Register& rd, + const Register& rn, + const Operand& operand); + void Sbcs(const Register& rd, + const Register& rn, + const Operand& operand); + void Ngc(const Register& rd, + const Operand& operand); + void Ngcs(const Register& rd, + const Operand& operand); + void AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + // Move macros. + void Mov(const Register& rd, uint64_t imm); + void Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + void Mvn(const Register& rd, uint64_t imm) { + Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask)); + } + void Mvn(const Register& rd, const Operand& operand); + + // Try to move an immediate into the destination register in a single + // instruction. Returns true for success, and updates the contents of dst. + // Returns false, otherwise. + bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + + // Move an immediate into register dst, and return an Operand object for + // use with a subsequent instruction that accepts a shift. The value moved + // into dst is not necessarily equal to imm; it may have had a shifting + // operation applied to it that will be subsequently undone by the shift + // applied in the Operand. + Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm); + + // Synthesises the address represented by a MemOperand into a register. + void ComputeAddress(const Register& dst, const MemOperand& mem_op); + + // Conditional macros. + void Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + void Csel(const Register& rd, + const Register& rn, + const Operand& operand, + Condition cond); + + // Load/store macros. +#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ + void FN(const REGTYPE REG, const MemOperand& addr); + LS_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op); + +#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); + LSPAIR_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + + void Prfm(PrefetchOperation op, const MemOperand& addr); + + // Push or pop up to 4 registers of the same width to or from the stack, + // using the current stack pointer as set by SetStackPointer. + // + // If an argument register is 'NoReg', all further arguments are also assumed + // to be 'NoReg', and are thus not pushed or popped. + // + // Arguments are ordered such that "Push(a, b);" is functionally equivalent + // to "Push(a); Push(b);". + // + // It is valid to push the same register more than once, and there is no + // restriction on the order in which registers are specified. + // + // It is not valid to pop into the same register more than once in one + // operation, not even into the zero register. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes on entry and the total size of the specified + // registers must also be a multiple of 16 bytes. + // + // Even if the current stack pointer is not the system stack pointer (sp), + // Push (and derived methods) will still modify the system stack pointer in + // order to comply with ABI rules about accessing memory below the system + // stack pointer. + // + // Other than the registers passed into Pop, the stack pointer and (possibly) + // the system stack pointer, these methods do not modify any other registers. + void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, + const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); + void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, + const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); + void PushStackPointer(); + + // Alternative forms of Push and Pop, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses (as in the A32 push + // and pop instructions). + // + // (Push|Pop)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. + void PushCPURegList(CPURegList registers); + void PopCPURegList(CPURegList registers); + + void PushSizeRegList(RegList registers, unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PushCPURegList(CPURegList(type, reg_size, registers)); + } + void PopSizeRegList(RegList registers, unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PopCPURegList(CPURegList(type, reg_size, registers)); + } + void PushXRegList(RegList regs) { + PushSizeRegList(regs, kXRegSize); + } + void PopXRegList(RegList regs) { + PopSizeRegList(regs, kXRegSize); + } + void PushWRegList(RegList regs) { + PushSizeRegList(regs, kWRegSize); + } + void PopWRegList(RegList regs) { + PopSizeRegList(regs, kWRegSize); + } + void PushDRegList(RegList regs) { + PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PopDRegList(RegList regs) { + PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PushSRegList(RegList regs) { + PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + void PopSRegList(RegList regs) { + PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + + // Push the specified register 'count' times. + void PushMultipleTimes(int count, Register src); + + // Poke 'src' onto the stack. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Poke(const Register& src, const Operand& offset); + + // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Peek(const Register& dst, const Operand& offset); + + // Alternative forms of Peek and Poke, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses. + // + // (Peek|Poke)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred. + void PeekCPURegList(CPURegList registers, int64_t offset) { + LoadCPURegList(registers, MemOperand(StackPointer(), offset)); + } + void PokeCPURegList(CPURegList registers, int64_t offset) { + StoreCPURegList(registers, MemOperand(StackPointer(), offset)); + } + + void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PeekCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PokeCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PeekXRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kXRegSize); + } + void PokeXRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kXRegSize); + } + void PeekWRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kWRegSize); + } + void PokeWRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kWRegSize); + } + void PeekDRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PokeDRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PeekSRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + void PokeSRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + + + // Claim or drop stack space without actually accessing memory. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes and the size claimed or dropped must be a + // multiple of 16 bytes. + void Claim(const Operand& size); + void Drop(const Operand& size); + + // Preserve the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are pushed before lower-numbered registers, and + // thus get higher addresses. + // Floating-point registers are pushed before general-purpose registers, and + // thus get higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PushCalleeSavedRegisters(); + + // Restore the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are popped after lower-numbered registers, and + // thus come from higher addresses. + // Floating-point registers are popped after general-purpose registers, and + // thus come from higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PopCalleeSavedRegisters(); + + void LoadCPURegList(CPURegList registers, const MemOperand& src); + void StoreCPURegList(CPURegList registers, const MemOperand& dst); + + // Remaining instructions are simple pass-through calls to the assembler. + void Adr(const Register& rd, Label* label) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adr(rd, label); + } + void Adrp(const Register& rd, Label* label) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adrp(rd, label); + } + void Asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + asr(rd, rn, shift); + } + void Asr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + asrv(rd, rn, rm); + } + + // Branch type inversion relies on these relations. + VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && + (reg_bit_clear == (reg_bit_set ^ 1)) && + (always == (never ^ 1))); + + BranchType InvertBranchType(BranchType type) { + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + return static_cast<BranchType>( + InvertCondition(static_cast<Condition>(type))); + } else { + return static_cast<BranchType>(type ^ 1); + } + } + + void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); + + void B(Label* label); + void B(Label* label, Condition cond); + void B(Condition cond, Label* label) { + B(label, cond); + } + void Bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfm(rd, rn, immr, imms); + } + void Bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfi(rd, rn, lsb, width); + } + void Bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfxil(rd, rn, lsb, width); + } + void Bind(Label* label); + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + void Bl(Label* label) { + SingleEmissionCheckScope guard(this); + bl(label); + } + void Blr(const Register& xn) { + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + blr(xn); + } + void Br(const Register& xn) { + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + br(xn); + } + void Brk(int code = 0) { + SingleEmissionCheckScope guard(this); + brk(code); + } + void Cbnz(const Register& rt, Label* label); + void Cbz(const Register& rt, Label* label); + void Cinc(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinc(rd, rn, cond); + } + void Cinv(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinv(rd, rn, cond); + } + void Clrex() { + SingleEmissionCheckScope guard(this); + clrex(); + } + void Cls(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cls(rd, rn); + } + void Clz(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + clz(rd, rn); + } + void Cneg(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cneg(rd, rn, cond); + } + void Cset(const Register& rd, Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + cset(rd, cond); + } + void Csetm(const Register& rd, Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + csetm(rd, cond); + } + void Csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinc(rd, rn, rm, cond); + } + void Csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinv(rd, rn, rm, cond); + } + void Csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csneg(rd, rn, rm, cond); + } + void Dmb(BarrierDomain domain, BarrierType type) { + SingleEmissionCheckScope guard(this); + dmb(domain, type); + } + void Dsb(BarrierDomain domain, BarrierType type) { + SingleEmissionCheckScope guard(this); + dsb(domain, type); + } + void Extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + extr(rd, rn, rm, lsb); + } + void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fadd(vd, vn, vm); + } + void Fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap = DisableTrap) { + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + FPCCompareMacro(vn, vm, nzcv, cond, trap); + } + void Fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + Fccmp(vn, vm, nzcv, cond, EnableTrap); + } + void Fcmp(const VRegister& vn, const VRegister& vm, + FPTrapFlags trap = DisableTrap) { + SingleEmissionCheckScope guard(this); + FPCompareMacro(vn, vm, trap); + } + void Fcmp(const VRegister& vn, double value, + FPTrapFlags trap = DisableTrap); + void Fcmpe(const VRegister& vn, double value); + void Fcmpe(const VRegister& vn, const VRegister& vm) { + Fcmp(vn, vm, EnableTrap); + } + void Fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + fcsel(vd, vn, vm, cond); + } + void Fcvt(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvt(vd, vn); + } + void Fcvtl(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtl(vd, vn); + } + void Fcvtl2(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtl2(vd, vn); + } + void Fcvtn(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtn(vd, vn); + } + void Fcvtn2(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtn2(vd, vn); + } + void Fcvtxn(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtxn(vd, vn); + } + void Fcvtxn2(const VRegister& vd, const VRegister& vn) { + SingleEmissionCheckScope guard(this); + fcvtxn2(vd, vn); + } + void Fcvtas(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtas(rd, vn); + } + void Fcvtau(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtau(rd, vn); + } + void Fcvtms(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtms(rd, vn); + } + void Fcvtmu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtmu(rd, vn); + } + void Fcvtns(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtns(rd, vn); + } + void Fcvtnu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtnu(rd, vn); + } + void Fcvtps(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtps(rd, vn); + } + void Fcvtpu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtpu(rd, vn); + } + void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzs(rd, vn, fbits); + } + void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzu(rd, vn, fbits); + } + void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fdiv(vd, vn, vm); + } + void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fmax(vd, vn, vm); + } + void Fmaxnm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fmaxnm(vd, vn, vm); + } + void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fmin(vd, vn, vm); + } + void Fminnm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fminnm(vd, vn, vm); + } + void Fmov(VRegister vd, VRegister vn) { + SingleEmissionCheckScope guard(this); + // Only emit an instruction if vd and vn are different, and they are both D + // registers. fmov(s0, s0) is not a no-op because it clears the top word of + // d0. Technically, fmov(d0, d0) is not a no-op either because it clears + // the top of q0, but VRegister does not currently support Q registers. + if (!vd.Is(vn) || !vd.Is64Bits()) { + fmov(vd, vn); + } + } + void Fmov(VRegister vd, Register rn) { + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(vd, rn); + } + void Fmov(const VRegister& vd, int index, const Register& rn) { + SingleEmissionCheckScope guard(this); + fmov(vd, index, rn); + } + void Fmov(const Register& rd, const VRegister& vn, int index) { + SingleEmissionCheckScope guard(this); + fmov(rd, vn, index); + } + + // Provide explicit double and float interfaces for FP immediate moves, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of vd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Fmov(VRegister vd, double imm); + void Fmov(VRegister vd, float imm); + // Provide a template to allow other types to be converted automatically. + template<typename T> + void Fmov(VRegister vd, T imm) { + Fmov(vd, static_cast<double>(imm)); + } + void Fmov(Register rd, VRegister vn) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(rd, vn); + } + void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fmul(vd, vn, vm); + } + void Fnmul(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fnmul(vd, vn, vm); + } + void Fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + SingleEmissionCheckScope guard(this); + fmadd(vd, vn, vm, va); + } + void Fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + SingleEmissionCheckScope guard(this); + fmsub(vd, vn, vm, va); + } + void Fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + SingleEmissionCheckScope guard(this); + fnmadd(vd, vn, vm, va); + } + void Fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + SingleEmissionCheckScope guard(this); + fnmsub(vd, vn, vm, va); + } + void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + SingleEmissionCheckScope guard(this); + fsub(vd, vn, vm); + } + void Hint(SystemHint code) { + SingleEmissionCheckScope guard(this); + hint(code); + } + void Hlt(int code) { + SingleEmissionCheckScope guard(this); + hlt(code); + } + void Isb() { + SingleEmissionCheckScope guard(this); + isb(); + } + void Ldar(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldar(rt, src); + } + void Ldarb(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldarb(rt, src); + } + void Ldarh(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldarh(rt, src); + } + void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldaxp(rt, rt2, src); + } + void Ldaxr(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldaxr(rt, src); + } + void Ldaxrb(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldaxrb(rt, src); + } + void Ldaxrh(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldaxrh(rt, src); + } + void Ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldnp(rt, rt2, src); + } + // Provide both double and float interfaces for FP immediate loads, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of fd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Ldr(const VRegister& vt, double imm) { + SingleEmissionCheckScope guard(this); + if (vt.Is64Bits()) { + ldr(vt, imm); + } else { + ldr(vt, static_cast<float>(imm)); + } + } + void Ldr(const VRegister& vt, float imm) { + SingleEmissionCheckScope guard(this); + if (vt.Is32Bits()) { + ldr(vt, imm); + } else { + ldr(vt, static_cast<double>(imm)); + } + } + /* + void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) { + VIXL_ASSERT(vt.IsQ()); + SingleEmissionCheckScope guard(this); + ldr(vt, new Literal<uint64_t>(high64, low64, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + */ + void Ldr(const Register& rt, uint64_t imm) { + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + ldr(rt, imm); + } + void Ldrsw(const Register& rt, uint32_t imm) { + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + ldrsw(rt, imm); + } + void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldxp(rt, rt2, src); + } + void Ldxr(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldxr(rt, src); + } + void Ldxrb(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldxrb(rt, src); + } + void Ldxrh(const Register& rt, const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ldxrh(rt, src); + } + void Lsl(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsl(rd, rn, shift); + } + void Lsl(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lslv(rd, rn, rm); + } + void Lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsr(rd, rn, shift); + } + void Lsr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lsrv(rd, rn, rm); + } + void Madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + madd(rd, rn, rm, ra); + } + void Mneg(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mneg(rd, rn, rm); + } + void Mov(const Register& rd, const Register& rn) { + SingleEmissionCheckScope guard(this); + mov(rd, rn); + } + void Movk(const Register& rd, uint64_t imm, int shift = -1) { + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + movk(rd, imm, shift); + } + void Mrs(const Register& rt, SystemRegister sysreg) { + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + mrs(rt, sysreg); + } + void Msr(SystemRegister sysreg, const Register& rt) { + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + msr(sysreg, rt); + } + void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) { + SingleEmissionCheckScope guard(this); + sys(op1, crn, crm, op2, rt); + } + void Dc(DataCacheOp op, const Register& rt) { + SingleEmissionCheckScope guard(this); + dc(op, rt); + } + void Ic(InstructionCacheOp op, const Register& rt) { + SingleEmissionCheckScope guard(this); + ic(op, rt); + } + void Msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + msub(rd, rn, rm, ra); + } + void Mul(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mul(rd, rn, rm); + } + void Nop() { + SingleEmissionCheckScope guard(this); + nop(); + } + void Rbit(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rbit(rd, rn); + } + void Ret(const Register& xn = lr) { + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + ret(xn); + } + void Rev(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev(rd, rn); + } + void Rev16(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev16(rd, rn); + } + void Rev32(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev32(rd, rn); + } + void Ror(const Register& rd, const Register& rs, unsigned shift) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rs.IsZero()); + SingleEmissionCheckScope guard(this); + ror(rd, rs, shift); + } + void Ror(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + rorv(rd, rn, rm); + } + void Sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfiz(rd, rn, lsb, width); + } + void Sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfm(rd, rn, immr, imms); + } + void Sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfx(rd, rn, lsb, width); + } + void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + scvtf(vd, rn, fbits); + } + void Sdiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + sdiv(rd, rn, rm); + } + void Smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smaddl(rd, rn, rm, ra); + } + void Smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smsubl(rd, rn, rm, ra); + } + void Smull(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + smull(rd, rn, rm); + } + void Smulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + smulh(xd, xn, xm); + } + void Stlr(const Register& rt, const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + stlr(rt, dst); + } + void Stlrb(const Register& rt, const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + stlrb(rt, dst); + } + void Stlrh(const Register& rt, const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + stlrh(rt, dst); + } + void Stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stlxp(rs, rt, rt2, dst); + } + void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxr(rs, rt, dst); + } + void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrb(rs, rt, dst); + } + void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrh(rs, rt, dst); + } + void Stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + stnp(rt, rt2, dst); + } + void Stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stxp(rs, rt, rt2, dst); + } + void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxr(rs, rt, dst); + } + void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrb(rs, rt, dst); + } + void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(!rs.Aliases(dst.base())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrh(rs, rt, dst); + } + void Svc(int code) { + SingleEmissionCheckScope guard(this); + svc(code); + } + void Sxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtb(rd, rn); + } + void Sxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxth(rd, rn); + } + void Sxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtw(rd, rn); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vn4, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vn4, vm); + } + void Tbnz(const Register& rt, unsigned bit_pos, Label* label); + void Tbz(const Register& rt, unsigned bit_pos, Label* label); + void Ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfiz(rd, rn, lsb, width); + } + void Ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfm(rd, rn, immr, imms); + } + void Ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfx(rd, rn, lsb, width); + } + void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ucvtf(vd, rn, fbits); + } + void Udiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + udiv(rd, rn, rm); + } + void Umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umaddl(rd, rn, rm, ra); + } + void Umull(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + umull(rd, rn, rm); + } + void Umulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + umulh(xd, xn, xm); + } + void Umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umsubl(rd, rn, rm, ra); + } + void Unreachable() { + SingleEmissionCheckScope guard(this); +#ifdef JS_SIMULATOR_ARM64 + hlt(kUnreachableOpcode); +#else + // Branch to 0 to generate a segfault. + // lr - kInstructionSize is the address of the offending instruction. + blr(xzr); +#endif + } + void Uxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtb(rd, rn); + } + void Uxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxth(rd, rn); + } + void Uxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtw(rd, rn); + } + + // NEON 3 vector register instructions. + #define NEON_3VREG_MACRO_LIST(V) \ + V(add, Add) \ + V(addhn, Addhn) \ + V(addhn2, Addhn2) \ + V(addp, Addp) \ + V(and_, And) \ + V(bic, Bic) \ + V(bif, Bif) \ + V(bit, Bit) \ + V(bsl, Bsl) \ + V(cmeq, Cmeq) \ + V(cmge, Cmge) \ + V(cmgt, Cmgt) \ + V(cmhi, Cmhi) \ + V(cmhs, Cmhs) \ + V(cmtst, Cmtst) \ + V(eor, Eor) \ + V(fabd, Fabd) \ + V(facge, Facge) \ + V(facgt, Facgt) \ + V(faddp, Faddp) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxp, Fmaxp) \ + V(fminnmp, Fminnmp) \ + V(fminp, Fminp) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmulx, Fmulx) \ + V(frecps, Frecps) \ + V(frsqrts, Frsqrts) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(mul, Mul) \ + V(orn, Orn) \ + V(orr, Orr) \ + V(pmul, Pmul) \ + V(pmull, Pmull) \ + V(pmull2, Pmull2) \ + V(raddhn, Raddhn) \ + V(raddhn2, Raddhn2) \ + V(rsubhn, Rsubhn) \ + V(rsubhn2, Rsubhn2) \ + V(saba, Saba) \ + V(sabal, Sabal) \ + V(sabal2, Sabal2) \ + V(sabd, Sabd) \ + V(sabdl, Sabdl) \ + V(sabdl2, Sabdl2) \ + V(saddl, Saddl) \ + V(saddl2, Saddl2) \ + V(saddw, Saddw) \ + V(saddw2, Saddw2) \ + V(shadd, Shadd) \ + V(shsub, Shsub) \ + V(smax, Smax) \ + V(smaxp, Smaxp) \ + V(smin, Smin) \ + V(sminp, Sminp) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(sqadd, Sqadd) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(sqdmulh, Sqdmulh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqrdmulh, Sqrdmulh) \ + V(sqrshl, Sqrshl) \ + V(sqshl, Sqshl) \ + V(sqsub, Sqsub) \ + V(srhadd, Srhadd) \ + V(srshl, Srshl) \ + V(sshl, Sshl) \ + V(ssubl, Ssubl) \ + V(ssubl2, Ssubl2) \ + V(ssubw, Ssubw) \ + V(ssubw2, Ssubw2) \ + V(sub, Sub) \ + V(subhn, Subhn) \ + V(subhn2, Subhn2) \ + V(trn1, Trn1) \ + V(trn2, Trn2) \ + V(uaba, Uaba) \ + V(uabal, Uabal) \ + V(uabal2, Uabal2) \ + V(uabd, Uabd) \ + V(uabdl, Uabdl) \ + V(uabdl2, Uabdl2) \ + V(uaddl, Uaddl) \ + V(uaddl2, Uaddl2) \ + V(uaddw, Uaddw) \ + V(uaddw2, Uaddw2) \ + V(uhadd, Uhadd) \ + V(uhsub, Uhsub) \ + V(umax, Umax) \ + V(umaxp, Umaxp) \ + V(umin, Umin) \ + V(uminp, Uminp) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(uqadd, Uqadd) \ + V(uqrshl, Uqrshl) \ + V(uqshl, Uqshl) \ + V(uqsub, Uqsub) \ + V(urhadd, Urhadd) \ + V(urshl, Urshl) \ + V(ushl, Ushl) \ + V(usubl, Usubl) \ + V(usubl2, Usubl2) \ + V(usubw, Usubw) \ + V(usubw2, Usubw2) \ + V(uzp1, Uzp1) \ + V(uzp2, Uzp2) \ + V(zip1, Zip1) \ + V(zip2, Zip2) + + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm); \ + } + NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) + #undef DEFINE_MACRO_ASM_FUNC + + // NEON 2 vector register instructions. + #define NEON_2VREG_MACRO_LIST(V) \ + V(abs, Abs) \ + V(addp, Addp) \ + V(addv, Addv) \ + V(cls, Cls) \ + V(clz, Clz) \ + V(cnt, Cnt) \ + V(fabs, Fabs) \ + V(faddp, Faddp) \ + V(fcvtas, Fcvtas) \ + V(fcvtau, Fcvtau) \ + V(fcvtms, Fcvtms) \ + V(fcvtmu, Fcvtmu) \ + V(fcvtns, Fcvtns) \ + V(fcvtnu, Fcvtnu) \ + V(fcvtps, Fcvtps) \ + V(fcvtpu, Fcvtpu) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxnmv, Fmaxnmv) \ + V(fmaxp, Fmaxp) \ + V(fmaxv, Fmaxv) \ + V(fminnmp, Fminnmp) \ + V(fminnmv, Fminnmv) \ + V(fminp, Fminp) \ + V(fminv, Fminv) \ + V(fneg, Fneg) \ + V(frecpe, Frecpe) \ + V(frecpx, Frecpx) \ + V(frinta, Frinta) \ + V(frinti, Frinti) \ + V(frintm, Frintm) \ + V(frintn, Frintn) \ + V(frintp, Frintp) \ + V(frintx, Frintx) \ + V(frintz, Frintz) \ + V(frsqrte, Frsqrte) \ + V(fsqrt, Fsqrt) \ + V(mov, Mov) \ + V(mvn, Mvn) \ + V(neg, Neg) \ + V(not_, Not) \ + V(rbit, Rbit) \ + V(rev16, Rev16) \ + V(rev32, Rev32) \ + V(rev64, Rev64) \ + V(sadalp, Sadalp) \ + V(saddlp, Saddlp) \ + V(saddlv, Saddlv) \ + V(smaxv, Smaxv) \ + V(sminv, Sminv) \ + V(sqabs, Sqabs) \ + V(sqneg, Sqneg) \ + V(sqxtn, Sqxtn) \ + V(sqxtn2, Sqxtn2) \ + V(sqxtun, Sqxtun) \ + V(sqxtun2, Sqxtun2) \ + V(suqadd, Suqadd) \ + V(sxtl, Sxtl) \ + V(sxtl2, Sxtl2) \ + V(uadalp, Uadalp) \ + V(uaddlp, Uaddlp) \ + V(uaddlv, Uaddlv) \ + V(umaxv, Umaxv) \ + V(uminv, Uminv) \ + V(uqxtn, Uqxtn) \ + V(uqxtn2, Uqxtn2) \ + V(urecpe, Urecpe) \ + V(ursqrte, Ursqrte) \ + V(usqadd, Usqadd) \ + V(uxtl, Uxtl) \ + V(uxtl2, Uxtl2) \ + V(xtn, Xtn) \ + V(xtn2, Xtn2) + + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn) { \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn); \ + } + NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) + #undef DEFINE_MACRO_ASM_FUNC + + // NEON 2 vector register with immediate instructions. + #define NEON_2VREG_FPIMM_MACRO_LIST(V) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fcmle, Fcmle) \ + V(fcmlt, Fcmlt) + + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + double imm) { \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, imm); \ + } + NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) + #undef DEFINE_MACRO_ASM_FUNC + + // NEON by element instructions. + #define NEON_BYELEMENT_MACRO_LIST(V) \ + V(fmul, Fmul) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmulx, Fmulx) \ + V(mul, Mul) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(sqdmulh, Sqdmulh) \ + V(sqrdmulh, Sqrdmulh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) + + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index \ + ) { \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm, vm_index); \ + } + NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) + #undef DEFINE_MACRO_ASM_FUNC + + #define NEON_2VREG_SHIFT_MACRO_LIST(V) \ + V(rshrn, Rshrn) \ + V(rshrn2, Rshrn2) \ + V(shl, Shl) \ + V(shll, Shll) \ + V(shll2, Shll2) \ + V(shrn, Shrn) \ + V(shrn2, Shrn2) \ + V(sli, Sli) \ + V(sqrshrn, Sqrshrn) \ + V(sqrshrn2, Sqrshrn2) \ + V(sqrshrun, Sqrshrun) \ + V(sqrshrun2, Sqrshrun2) \ + V(sqshl, Sqshl) \ + V(sqshlu, Sqshlu) \ + V(sqshrn, Sqshrn) \ + V(sqshrn2, Sqshrn2) \ + V(sqshrun, Sqshrun) \ + V(sqshrun2, Sqshrun2) \ + V(sri, Sri) \ + V(srshr, Srshr) \ + V(srsra, Srsra) \ + V(sshll, Sshll) \ + V(sshll2, Sshll2) \ + V(sshr, Sshr) \ + V(ssra, Ssra) \ + V(uqrshrn, Uqrshrn) \ + V(uqrshrn2, Uqrshrn2) \ + V(uqshl, Uqshl) \ + V(uqshrn, Uqshrn) \ + V(uqshrn2, Uqshrn2) \ + V(urshr, Urshr) \ + V(ursra, Ursra) \ + V(ushll, Ushll) \ + V(ushll2, Ushll2) \ + V(ushr, Ushr) \ + V(usra, Usra) \ + + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + int shift) { \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, shift); \ + } + NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) + #undef DEFINE_MACRO_ASM_FUNC + + void Bic(const VRegister& vd, + const int imm8, + const int left_shift = 0) { + SingleEmissionCheckScope guard(this); + bic(vd, imm8, left_shift); + } + void Cmeq(const VRegister& vd, + const VRegister& vn, + int imm) { + SingleEmissionCheckScope guard(this); + cmeq(vd, vn, imm); + } + void Cmge(const VRegister& vd, + const VRegister& vn, + int imm) { + SingleEmissionCheckScope guard(this); + cmge(vd, vn, imm); + } + void Cmgt(const VRegister& vd, + const VRegister& vn, + int imm) { + SingleEmissionCheckScope guard(this); + cmgt(vd, vn, imm); + } + void Cmle(const VRegister& vd, + const VRegister& vn, + int imm) { + SingleEmissionCheckScope guard(this); + cmle(vd, vn, imm); + } + void Cmlt(const VRegister& vd, + const VRegister& vn, + int imm) { + SingleEmissionCheckScope guard(this); + cmlt(vd, vn, imm); + } + void Dup(const VRegister& vd, + const VRegister& vn, + int index) { + SingleEmissionCheckScope guard(this); + dup(vd, vn, index); + } + void Dup(const VRegister& vd, + const Register& rn) { + SingleEmissionCheckScope guard(this); + dup(vd, rn); + } + void Ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + SingleEmissionCheckScope guard(this); + ext(vd, vn, vm, index); + } + void Ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, vn, vn_index); + } + void Ins(const VRegister& vd, + int vd_index, + const Register& rn) { + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, rn); + } + void Ld1(const VRegister& vt, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1(vt, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, vt4, src); + } + void Ld1(const VRegister& vt, + int lane, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1(vt, lane, src); + } + void Ld1r(const VRegister& vt, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld1r(vt, src); + } + void Ld2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, src); + } + void Ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, lane, src); + } + void Ld2r(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld2r(vt, vt2, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, lane, src); + } + void Ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld3r(vt, vt2, vt3, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, lane, src); + } + void Ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + SingleEmissionCheckScope guard(this); + ld4r(vt, vt2, vt3, vt4, src); + } + void Mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, vn, vn_index); + } + void Mov(const VRegister& vd, + const VRegister& vn, + int index) { + SingleEmissionCheckScope guard(this); + mov(vd, vn, index); + } + void Mov(const VRegister& vd, + int vd_index, + const Register& rn) { + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, rn); + } + void Mov(const Register& rd, + const VRegister& vn, + int vn_index) { + SingleEmissionCheckScope guard(this); + mov(rd, vn, vn_index); + } + void Movi(const VRegister& vd, + uint64_t imm, + Shift shift = LSL, + int shift_amount = 0); + void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); + void Mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0) { + SingleEmissionCheckScope guard(this); + mvni(vd, imm8, shift, shift_amount); + } + void Orr(const VRegister& vd, + const int imm8, + const int left_shift = 0) { + SingleEmissionCheckScope guard(this); + orr(vd, imm8, left_shift); + } + void Scvtf(const VRegister& vd, + const VRegister& vn, + int fbits = 0) { + SingleEmissionCheckScope guard(this); + scvtf(vd, vn, fbits); + } + void Ucvtf(const VRegister& vd, + const VRegister& vn, + int fbits = 0) { + SingleEmissionCheckScope guard(this); + ucvtf(vd, vn, fbits); + } + void Fcvtzs(const VRegister& vd, + const VRegister& vn, + int fbits = 0) { + SingleEmissionCheckScope guard(this); + fcvtzs(vd, vn, fbits); + } + void Fcvtzu(const VRegister& vd, + const VRegister& vn, + int fbits = 0) { + SingleEmissionCheckScope guard(this); + fcvtzu(vd, vn, fbits); + } + void St1(const VRegister& vt, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st1(vt, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st1(vt, vt2, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, vt4, dst); + } + void St1(const VRegister& vt, + int lane, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st1(vt, lane, dst); + } + void St2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st2(vt, vt2, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, dst); + } + void St2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st2(vt, vt2, lane, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, lane, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, lane, dst); + } + void Smov(const Register& rd, + const VRegister& vn, + int vn_index) { + SingleEmissionCheckScope guard(this); + smov(rd, vn, vn_index); + } + void Umov(const Register& rd, + const VRegister& vn, + int vn_index) { + SingleEmissionCheckScope guard(this); + umov(rd, vn, vn_index); + } + void Crc32b(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32b(rd, rn, rm); + } + void Crc32h(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32h(rd, rn, rm); + } + void Crc32w(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32w(rd, rn, rm); + } + void Crc32x(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32x(rd, rn, rm); + } + void Crc32cb(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32cb(rd, rn, rm); + } + void Crc32ch(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32ch(rd, rn, rm); + } + void Crc32cw(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32cw(rd, rn, rm); + } + void Crc32cx(const Register& rd, + const Register& rn, + const Register& rm) { + SingleEmissionCheckScope guard(this); + crc32cx(rd, rn, rm); + } + + // Push the system stack pointer (sp) down to allow the same to be done to + // the current stack pointer (according to StackPointer()). This must be + // called _before_ accessing the memory. + // + // This is necessary when pushing or otherwise adding things to the stack, to + // satisfy the AAPCS64 constraint that the memory below the system stack + // pointer is not accessed. + // + // This method asserts that StackPointer() is not sp, since the call does + // not make sense in that context. + // + // TODO: This method can only accept values of 'space' that can be encoded in + // one instruction. Refer to the implementation for details. + void BumpSystemStackPointer(const Operand& space); + + // Set the current stack pointer, but don't generate any code. + void SetStackPointer64(const Register& stack_pointer) { + VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer)); + sp_ = stack_pointer; + } + + // Return the current stack pointer, as set by SetStackPointer. + const Register& StackPointer() const { + return sp_; + } + + const Register& GetStackPointer64() const { + return sp_; + } + + const js::jit::Register getStackPointer() const { + int code = sp_.code(); + if (code == kSPRegInternalCode) { + code = 31; + } + return js::jit::Register::FromCode(code); + } + + CPURegList* TmpList() { return &tmp_list_; } + CPURegList* FPTmpList() { return &fptmp_list_; } + + // Trace control when running the debug simulator. + // + // For example: + // + // __ Trace(LOG_REGS, TRACE_ENABLE); + // Will add registers to the trace if it wasn't already the case. + // + // __ Trace(LOG_DISASM, TRACE_DISABLE); + // Will stop logging disassembly. It has no effect if the disassembly wasn't + // already being logged. + void Trace(TraceParameters parameters, TraceCommand command); + + // Log the requested data independently of what is being traced. + // + // For example: + // + // __ Log(LOG_FLAGS) + // Will output the flags. + void Log(TraceParameters parameters); + + // Enable or disable instrumentation when an Instrument visitor is attached to + // the simulator. + void EnableInstrumentation(); + void DisableInstrumentation(); + + // Add a marker to the instrumentation data produced by an Instrument visitor. + // The name is a two character string that will be attached to the marker in + // the output data. + void AnnotateInstrumentation(const char* marker_name); + + private: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together setup code for a large block of + // registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, int size, + const CPURegister& src0, const CPURegister& src1, + const CPURegister& src2, const CPURegister& src3); + void PopHelper(int count, int size, + const CPURegister& dst0, const CPURegister& dst1, + const CPURegister& dst2, const CPURegister& dst3); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + // Perform necessary maintenance operations before a push or pop. + // + // Note that size is per register, and is specified in bytes. + void PrepareForPush(int count, int size); + void PrepareForPop(int count, int size); + + // The actual implementation of load and store operations for CPURegList. + enum LoadStoreCPURegListAction { + kLoad, + kStore + }; + void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation, + CPURegList registers, + const MemOperand& mem); + // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`. + // This helper may allocate registers from `scratch_scope` and generate code + // to compute an intermediate address. The resulting MemOperand is only valid + // as long as `scratch_scope` remains valid. + MemOperand BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope); + + bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) { + return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().getOffset() - label->offset()); + } + + // The register to use as a stack pointer for stack operations. + Register sp_; + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_; + CPURegList fptmp_list_; + + ptrdiff_t checkpoint_; + ptrdiff_t recommended_checkpoint_; +}; + + +// All Assembler emits MUST acquire/release the underlying code buffer. The +// helper scope below will do so and optionally ensure the buffer is big enough +// to receive the emit. It is possible to request the scope not to perform any +// checks (kNoCheck) if for example it is known in advance the buffer size is +// adequate or there is some other size checking mechanism in place. +class CodeBufferCheckScope { + public: + // Tell whether or not the scope needs to ensure the associated CodeBuffer + // has enough space for the requested size. + enum CheckPolicy { + kNoCheck, + kCheck + }; + + // Tell whether or not the scope should assert the amount of code emitted + // within the scope is consistent with the requested amount. + enum AssertPolicy { + kNoAssert, // No assert required. + kExactSize, // The code emitted must be exactly size bytes. + kMaximumSize // The code emitted must be at most size bytes. + }; + + CodeBufferCheckScope(Assembler* assm, + size_t size, + CheckPolicy check_policy = kCheck, + AssertPolicy assert_policy = kMaximumSize) + { } + + // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert). + explicit CodeBufferCheckScope(Assembler* assm) {} +}; + + +// Use this scope when you need a one-to-one mapping between methods and +// instructions. This scope prevents the MacroAssembler from being called and +// literal pools from being emitted. It also asserts the number of instructions +// emitted is what you specified when creating the scope. +// FIXME: Because of the disabled calls below, this class asserts nothing. +class InstructionAccurateScope : public CodeBufferCheckScope { + public: + InstructionAccurateScope(MacroAssembler* masm, + int64_t count, + AssertPolicy policy = kExactSize) + : CodeBufferCheckScope(masm, + (count * kInstructionSize), + kCheck, + policy) { + } +}; + + +// This scope utility allows scratch registers to be managed safely. The +// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch +// registers. These registers can be allocated on demand, and will be returned +// at the end of the scope. +// +// When the scope ends, the MacroAssembler's lists will be restored to their +// original state, even if the lists were modified by some other means. +class UseScratchRegisterScope { + public: + // This constructor implicitly calls the `Open` function to initialise the + // scope, so it is ready to use immediately after it has been constructed. + explicit UseScratchRegisterScope(MacroAssembler* masm); + // This constructor allows deferred and optional initialisation of the scope. + // The user is required to explicitly call the `Open` function before using + // the scope. + UseScratchRegisterScope(); + // This function performs the actual initialisation work. + void Open(MacroAssembler* masm); + + // The destructor always implicitly calls the `Close` function. + ~UseScratchRegisterScope(); + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close(); + + + bool IsAvailable(const CPURegister& reg) const; + + + // Take a register from the appropriate temps list. It will be returned + // automatically when the scope ends. + Register AcquireW() { return AcquireNextAvailable(available_).W(); } + Register AcquireX() { return AcquireNextAvailable(available_).X(); } + VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } + VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } + + + Register AcquireSameSizeAs(const Register& reg); + VRegister AcquireSameSizeAs(const VRegister& reg); + + + // Explicitly release an acquired (or excluded) register, putting it back in + // the appropriate temps list. + void Release(const CPURegister& reg); + + + // Make the specified registers available as scratch registers for the + // duration of this scope. + void Include(const CPURegList& list); + void Include(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Include(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + + // Make sure that the specified registers are not available in this scope. + // This can be used to prevent helper functions from using sensitive + // registers, for example. + void Exclude(const CPURegList& list); + void Exclude(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Exclude(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + void Exclude(const CPURegister& reg1, + const CPURegister& reg2 = NoCPUReg, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + + + // Prevent any scratch registers from being used in this scope. + void ExcludeAll(); + + + private: + static CPURegister AcquireNextAvailable(CPURegList* available); + + static void ReleaseByCode(CPURegList* available, int code); + + static void ReleaseByRegList(CPURegList* available, + RegList regs); + + static void IncludeByRegList(CPURegList* available, + RegList exclude); + + static void ExcludeByRegList(CPURegList* available, + RegList exclude); + + // Available scratch registers. + CPURegList* available_; // kRegister + CPURegList* availablefp_; // kVRegister + + // The state of the available lists at the start of this scope. + RegList old_available_; // kRegister + RegList old_availablefp_; // kVRegister +#ifdef DEBUG + bool initialised_; +#endif + + // Disallow copy constructor and operator=. + UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } + void operator=(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } +}; + + +} // namespace vixl + +#endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_ diff --git a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp new file mode 100644 index 000000000..3b2e0a8bc --- /dev/null +++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp @@ -0,0 +1,712 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jsutil.h" + +#include "jit/arm64/vixl/Assembler-vixl.h" +#include "jit/Label.h" + +namespace vixl { + + +// Assembler +void Assembler::FinalizeCode() { +#ifdef DEBUG + finalized_ = true; +#endif +} + +// Unbound Label Representation. +// +// We can have multiple branches using the same label before it is bound. +// Assembler::bind() must then be able to enumerate all the branches and patch +// them to target the final label location. +// +// When a Label is unbound with uses, its offset is pointing to the tip of a +// linked list of uses. The uses can be branches or adr/adrp instructions. In +// the case of branches, the next member in the linked list is simply encoded +// as the branch target. For adr/adrp, the relative pc offset is encoded in the +// immediate field as a signed instruction offset. +// +// In both cases, the end of the list is encoded as a 0 pc offset, i.e. the +// tail is pointing to itself. + +static const ptrdiff_t kEndOfLabelUseList = 0; + +BufferOffset +MozBaseAssembler::NextLink(BufferOffset cur) +{ + Instruction* link = getInstructionAt(cur); + // Raw encoded offset. + ptrdiff_t offset = link->ImmPCRawOffset(); + // End of the list is encoded as 0. + if (offset == kEndOfLabelUseList) + return BufferOffset(); + // The encoded offset is the number of instructions to move. + return BufferOffset(cur.getOffset() + offset * kInstructionSize); +} + +static ptrdiff_t +EncodeOffset(BufferOffset cur, BufferOffset next) +{ + MOZ_ASSERT(next.assigned() && cur.assigned()); + ptrdiff_t offset = next.getOffset() - cur.getOffset(); + MOZ_ASSERT(offset % kInstructionSize == 0); + return offset / kInstructionSize; +} + +void +MozBaseAssembler::SetNextLink(BufferOffset cur, BufferOffset next) +{ + Instruction* link = getInstructionAt(cur); + link->SetImmPCRawOffset(EncodeOffset(cur, next)); +} + +// A common implementation for the LinkAndGet<Type>OffsetTo helpers. +// +// If the label is bound, returns the offset as a multiple of 1 << elementShift. +// Otherwise, links the instruction to the label and returns the raw offset to +// encode. (This will be an instruction count.) +// +// The offset is calculated by aligning the PC and label addresses down to a +// multiple of 1 << elementShift, then calculating the (scaled) offset between +// them. This matches the semantics of adrp, for example. (Assuming that the +// assembler buffer is page-aligned, which it probably isn't.) +// +// For an unbound label, the returned offset will be encodable in the provided +// branch range. If the label is already bound, the caller is expected to make +// sure that it is in range, and emit the necessary branch instrutions if it +// isn't. +// +ptrdiff_t +MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange, + unsigned elementShift, Label* label) +{ + if (armbuffer_.oom()) + return kEndOfLabelUseList; + + if (label->bound()) { + // The label is bound: all uses are already linked. + ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift); + ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift); + return label_offset - branch_offset; + } + + // Keep track of short-range branches targeting unbound labels. We may need + // to insert veneers in PatchShortRangeBranchToVeneer() below. + if (branchRange < NumShortBranchRangeTypes) { + // This is the last possible branch target. + BufferOffset deadline(branch.getOffset() + + Instruction::ImmBranchMaxForwardOffset(branchRange)); + armbuffer_.registerBranchDeadline(branchRange, deadline); + } + + // The label is unbound and previously unused: Store the offset in the label + // itself for patching by bind(). + if (!label->used()) { + label->use(branch.getOffset()); + return kEndOfLabelUseList; + } + + // The label is unbound and has multiple users. Create a linked list between + // the branches, and update the linked list head in the label struct. This is + // not always trivial since the branches in the linked list have limited + // ranges. + + // What is the earliest buffer offset that would be reachable by the branch + // we're about to add? + ptrdiff_t earliestReachable = + branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange); + + // If the existing instruction at the head of the list is within reach of the + // new branch, we can simply insert the new branch at the front of the list. + if (label->offset() >= earliestReachable) { + ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label)); + label->use(branch.getOffset()); + MOZ_ASSERT(offset != kEndOfLabelUseList); + return offset; + } + + // The label already has a linked list of uses, but we can't reach the head + // of the list with the allowed branch range. Insert this branch at a + // different position in the list. + // + // Find an existing branch, exbr, such that: + // + // 1. The new branch can be reached by exbr, and either + // 2a. The new branch can reach exbr's target, or + // 2b. The exbr branch is at the end of the list. + // + // Then the new branch can be inserted after exbr in the linked list. + // + // We know that it is always possible to find an exbr branch satisfying these + // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All + // branches are guaranteed to either be able to reach the end of the + // assembler buffer, or they will be pointing to an unconditional branch that + // can. + // + // In particular, the end of the list is always a viable candidate, so we'll + // just get that. + BufferOffset next(label); + BufferOffset exbr; + do { + exbr = next; + next = NextLink(next); + } while (next.assigned()); + SetNextLink(exbr, branch); + + // This branch becomes the new end of the list. + return kEndOfLabelUseList; +} + +ptrdiff_t MozBaseAssembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) { + return LinkAndGetOffsetTo(branch, UncondBranchRangeType, 0, label); +} + +ptrdiff_t MozBaseAssembler::LinkAndGetInstructionOffsetTo(BufferOffset branch, + ImmBranchRangeType branchRange, + Label* label) { + return LinkAndGetOffsetTo(branch, branchRange, kInstructionSizeLog2, label); +} + +ptrdiff_t MozBaseAssembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) { + return LinkAndGetOffsetTo(branch, UncondBranchRangeType, kPageSizeLog2, label); +} + +BufferOffset Assembler::b(int imm26) { + return EmitBranch(B | ImmUncondBranch(imm26)); +} + + +void Assembler::b(Instruction* at, int imm26) { + return EmitBranch(at, B | ImmUncondBranch(imm26)); +} + + +BufferOffset Assembler::b(int imm19, Condition cond) { + return EmitBranch(B_cond | ImmCondBranch(imm19) | cond); +} + + +void Assembler::b(Instruction* at, int imm19, Condition cond) { + EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond); +} + + +BufferOffset Assembler::b(Label* label) { + // Encode the relative offset from the inserted branch to the label. + return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label)); +} + + +BufferOffset Assembler::b(Label* label, Condition cond) { + // Encode the relative offset from the inserted branch to the label. + return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), cond); +} + +void Assembler::br(Instruction* at, const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + // No need for EmitBranch(): no immediate offset needs fixing. + Emit(at, BR | Rn(xn)); +} + + +void Assembler::blr(Instruction* at, const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + // No need for EmitBranch(): no immediate offset needs fixing. + Emit(at, BLR | Rn(xn)); +} + + +void Assembler::bl(int imm26) { + EmitBranch(BL | ImmUncondBranch(imm26)); +} + + +void Assembler::bl(Instruction* at, int imm26) { + EmitBranch(at, BL | ImmUncondBranch(imm26)); +} + + +void Assembler::bl(Label* label) { + // Encode the relative offset from the inserted branch to the label. + return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label)); +} + + +void Assembler::cbz(const Register& rt, int imm19) { + EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbz(Instruction* at, const Register& rt, int imm19) { + EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbz(const Register& rt, Label* label) { + // Encode the relative offset from the inserted branch to the label. + return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label)); +} + + +void Assembler::cbnz(const Register& rt, int imm19) { + EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) { + EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbnz(const Register& rt, Label* label) { + // Encode the relative offset from the inserted branch to the label. + return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label)); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) { + // Encode the relative offset from the inserted branch to the label. + return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) { + // Encode the relative offset from the inserted branch to the label. + return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label)); +} + + +void Assembler::adr(const Register& rd, int imm21) { + VIXL_ASSERT(rd.Is64Bits()); + EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd)); +} + + +void Assembler::adr(Instruction* at, const Register& rd, int imm21) { + VIXL_ASSERT(rd.Is64Bits()); + EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd)); +} + + +void Assembler::adr(const Register& rd, Label* label) { + // Encode the relative offset from the inserted adr to the label. + return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label)); +} + + +void Assembler::adrp(const Register& rd, int imm21) { + VIXL_ASSERT(rd.Is64Bits()); + EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd)); +} + + +void Assembler::adrp(Instruction* at, const Register& rd, int imm21) { + VIXL_ASSERT(rd.Is64Bits()); + EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd)); +} + + +void Assembler::adrp(const Register& rd, Label* label) { + VIXL_ASSERT(AllowPageOffsetDependentCode()); + // Encode the relative offset from the inserted adr to the label. + return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label)); +} + + +BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) { + return Logical(rd, rn, operand, ANDS); +} + + +BufferOffset Assembler::tst(const Register& rn, const Operand& operand) { + return ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) { + LoadLiteralOp op = LoadLiteralOpFor(rt); + Emit(at, op | ImmLLiteral(imm19) | Rt(rt)); +} + + +BufferOffset Assembler::hint(SystemHint code) { + return Emit(HINT | ImmHint(code) | Rt(xzr)); +} + + +void Assembler::hint(Instruction* at, SystemHint code) { + Emit(at, HINT | ImmHint(code) | Rt(xzr)); +} + + +void Assembler::svc(Instruction* at, int code) { + VIXL_ASSERT(is_uint16(code)); + Emit(at, SVC | ImmException(code)); +} + + +void Assembler::nop(Instruction* at) { + hint(at, NOP); +} + + +BufferOffset Assembler::Logical(const Register& rd, const Register& rn, + const Operand operand, LogicalOp op) +{ + VIXL_ASSERT(rd.size() == rn.size()); + if (operand.IsImmediate()) { + int64_t immediate = operand.immediate(); + unsigned reg_size = rd.size(); + + VIXL_ASSERT(immediate != 0); + VIXL_ASSERT(immediate != -1); + VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate)); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast<LogicalOp>(op & ~NOT); + immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + return LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // This case is handled in the macro assembler. + VIXL_UNREACHABLE(); + } + } else { + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(operand.reg().size() == rd.size()); + Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); + return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); + } +} + + +BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn, + unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op) +{ + unsigned reg_size = rd.size(); + Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); + return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | + ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn)); +} + + +BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, Instr op) +{ + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); + return Emit(SF(rd) | op | Flags(S) | + ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | + Rm(operand.reg()) | Rn(rn) | Rd(rd)); +} + + +void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) { + // Store the js::jit::PoolEntry index into the instruction. + // finishPool() will walk over all literal load instructions + // and use PatchConstantPoolLoad() to patch to the final relative offset. + *((uint32_t*)load) |= Assembler::ImmLLiteral(index); +} + + +bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) { + Instruction* load = reinterpret_cast<Instruction*>(loadAddr); + + // The load currently contains the js::jit::PoolEntry's index, + // as written by InsertIndexIntoTag(). + uint32_t index = load->ImmLLiteral(); + + // Each entry in the literal pool is uint32_t-sized, + // but literals may use multiple entries. + uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr); + Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]); + + load->SetImmLLiteral(source); + return false; // Nothing uses the return value. +} + +void +MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx, + BufferOffset deadline, BufferOffset veneer) +{ + // Reconstruct the position of the branch from (rangeIdx, deadline). + vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx); + BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange)); + Instruction *branchInst = buffer->getInst(branch); + Instruction *veneerInst = buffer->getInst(veneer); + + // Verify that the branch range matches what's encoded. + MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange); + + // We want to insert veneer after branch in the linked list of instructions + // that use the same unbound label. + // The veneer should be an unconditional branch. + ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset(); + + // If offset is 0, this is the end of the linked list. + if (nextElemOffset != kEndOfLabelUseList) { + // Make the offset relative to veneer so it targets the same instruction + // as branchInst. + nextElemOffset *= kInstructionSize; + nextElemOffset += branch.getOffset() - veneer.getOffset(); + nextElemOffset /= kInstructionSize; + } + Assembler::b(veneerInst, nextElemOffset); + + // Now point branchInst at veneer. See also SetNextLink() above. + branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer)); +} + +struct PoolHeader { + uint32_t data; + + struct Header { + // The size should take into account the pool header. + // The size is in units of Instruction (4bytes), not byte. + union { + struct { + uint32_t size : 15; + + // "Natural" guards are part of the normal instruction stream, + // while "non-natural" guards are inserted for the sole purpose + // of skipping around a pool. + bool isNatural : 1; + uint32_t ONES : 16; + }; + uint32_t data; + }; + + Header(int size_, bool isNatural_) + : size(size_), + isNatural(isNatural_), + ONES(0xffff) + { } + + Header(uint32_t data) + : data(data) + { + JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); + VIXL_ASSERT(ONES == 0xffff); + } + + uint32_t raw() const { + JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); + return data; + } + }; + + PoolHeader(int size_, bool isNatural_) + : data(Header(size_, isNatural_).raw()) + { } + + uint32_t size() const { + Header tmp(data); + return tmp.size; + } + + uint32_t isNatural() const { + Header tmp(data); + return tmp.isNatural; + } +}; + + +void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) { + JS_STATIC_ASSERT(sizeof(PoolHeader) == 4); + + // Get the total size of the pool. + const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize(); + const uintptr_t totalPoolInstructions = totalPoolSize / sizeof(Instruction); + + VIXL_ASSERT((totalPoolSize & 0x3) == 0); + VIXL_ASSERT(totalPoolInstructions < (1 << 15)); + + PoolHeader header(totalPoolInstructions, isNatural); + *(PoolHeader*)start = header; +} + + +void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) { + return; +} + + +void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) { + int byteOffset = dest.getOffset() - branch.getOffset(); + VIXL_ASSERT(byteOffset % kInstructionSize == 0); + + int instOffset = byteOffset >> kInstructionSizeLog2; + Assembler::b(inst, instOffset); +} + + +ptrdiff_t MozBaseAssembler::GetBranchOffset(const Instruction* ins) { + // Branch instructions use an instruction offset. + if (ins->BranchType() != UnknownBranchType) + return ins->ImmPCRawOffset() * kInstructionSize; + + // ADR and ADRP encode relative offsets and therefore require patching as if they were branches. + // ADR uses a byte offset. + if (ins->IsADR()) + return ins->ImmPCRawOffset(); + + // ADRP uses a page offset. + if (ins->IsADRP()) + return ins->ImmPCRawOffset() * kPageSize; + + MOZ_CRASH("Unsupported branch type"); +} + + +void MozBaseAssembler::RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final) { + if (i->IsCondBranchImm()) { + VIXL_ASSERT(i->IsCondB()); + Assembler::b(i, offset, cond); + return; + } + MOZ_CRASH("Unsupported branch type"); +} + + +void MozBaseAssembler::RetargetNearBranch(Instruction* i, int byteOffset, bool final) { + const int instOffset = byteOffset >> kInstructionSizeLog2; + + // The only valid conditional instruction is B. + if (i->IsCondBranchImm()) { + VIXL_ASSERT(byteOffset % kInstructionSize == 0); + VIXL_ASSERT(i->IsCondB()); + Condition cond = static_cast<Condition>(i->ConditionBranch()); + Assembler::b(i, instOffset, cond); + return; + } + + // Valid unconditional branches are B and BL. + if (i->IsUncondBranchImm()) { + VIXL_ASSERT(byteOffset % kInstructionSize == 0); + if (i->IsUncondB()) { + Assembler::b(i, instOffset); + } else { + VIXL_ASSERT(i->IsBL()); + Assembler::bl(i, instOffset); + } + + VIXL_ASSERT(i->ImmUncondBranch() == instOffset); + return; + } + + // Valid compare branches are CBZ and CBNZ. + if (i->IsCompareBranch()) { + VIXL_ASSERT(byteOffset % kInstructionSize == 0); + Register rt = i->SixtyFourBits() ? Register::XRegFromCode(i->Rt()) + : Register::WRegFromCode(i->Rt()); + + if (i->IsCBZ()) { + Assembler::cbz(i, rt, instOffset); + } else { + VIXL_ASSERT(i->IsCBNZ()); + Assembler::cbnz(i, rt, instOffset); + } + + VIXL_ASSERT(i->ImmCmpBranch() == instOffset); + return; + } + + // Valid test branches are TBZ and TBNZ. + if (i->IsTestBranch()) { + VIXL_ASSERT(byteOffset % kInstructionSize == 0); + // Opposite of ImmTestBranchBit(): MSB in bit 5, 0:5 at bit 40. + unsigned bit_pos = (i->ImmTestBranchBit5() << 5) | (i->ImmTestBranchBit40()); + VIXL_ASSERT(is_uint6(bit_pos)); + + // Register size doesn't matter for the encoding. + Register rt = Register::XRegFromCode(i->Rt()); + + if (i->IsTBZ()) { + Assembler::tbz(i, rt, bit_pos, instOffset); + } else { + VIXL_ASSERT(i->IsTBNZ()); + Assembler::tbnz(i, rt, bit_pos, instOffset); + } + + VIXL_ASSERT(i->ImmTestBranch() == instOffset); + return; + } + + if (i->IsADR()) { + Register rd = Register::XRegFromCode(i->Rd()); + Assembler::adr(i, rd, byteOffset); + return; + } + + if (i->IsADRP()) { + const int pageOffset = byteOffset >> kPageSizeLog2; + Register rd = Register::XRegFromCode(i->Rd()); + Assembler::adrp(i, rd, pageOffset); + return; + } + + MOZ_CRASH("Unsupported branch type"); +} + + +void MozBaseAssembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond) { + MOZ_CRASH("RetargetFarBranch()"); +} + + +} // namespace vixl + diff --git a/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h new file mode 100644 index 000000000..d079340fc --- /dev/null +++ b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h @@ -0,0 +1,216 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef jit_arm64_vixl_MozBaseAssembler_vixl_h +#define jit_arm64_vixl_MozBaseAssembler_vixl_h + +#include "jit/arm64/vixl/Constants-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" + +#include "jit/shared/Assembler-shared.h" +#include "jit/shared/IonAssemblerBufferWithConstantPools.h" + +namespace vixl { + + +using js::jit::BufferOffset; + + +class MozBaseAssembler; +typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, MozBaseAssembler, + NumShortBranchRangeTypes> ARMBuffer; + +// Base class for vixl::Assembler, for isolating Moz-specific changes to VIXL. +class MozBaseAssembler : public js::jit::AssemblerShared { + // Buffer initialization constants. + static const unsigned BufferGuardSize = 1; + static const unsigned BufferHeaderSize = 1; + static const size_t BufferCodeAlignment = 8; + static const size_t BufferMaxPoolOffset = 1024; + static const unsigned BufferPCBias = 0; + static const uint32_t BufferAlignmentFillInstruction = BRK | (0xdead << ImmException_offset); + static const uint32_t BufferNopFillInstruction = HINT | (31 << Rt_offset); + static const unsigned BufferNumDebugNopsToInsert = 0; + + public: + MozBaseAssembler() + : armbuffer_(BufferGuardSize, + BufferHeaderSize, + BufferCodeAlignment, + BufferMaxPoolOffset, + BufferPCBias, + BufferAlignmentFillInstruction, + BufferNopFillInstruction, + BufferNumDebugNopsToInsert) + { } + + public: + // Helper function for use with the ARMBuffer. + // The MacroAssembler must create an AutoJitContextAlloc before initializing the buffer. + void initWithAllocator() { + armbuffer_.initWithAllocator(); + } + + // Return the Instruction at a given byte offset. + Instruction* getInstructionAt(BufferOffset offset) { + return armbuffer_.getInst(offset); + } + + // Return the byte offset of a bound label. + template <typename T> + inline T GetLabelByteOffset(const js::jit::Label* label) { + VIXL_ASSERT(label->bound()); + JS_STATIC_ASSERT(sizeof(T) >= sizeof(uint32_t)); + return reinterpret_cast<T>(label->offset()); + } + + protected: + // Get the buffer offset of the next inserted instruction. This may flush + // constant pools. + BufferOffset nextInstrOffset() { + return armbuffer_.nextInstrOffset(); + } + + // Get the next usable buffer offset. Note that a constant pool may be placed + // here before the next instruction is emitted. + BufferOffset nextOffset() const { + return armbuffer_.nextOffset(); + } + + // Allocate memory in the buffer by forwarding to armbuffer_. + // Propagate OOM errors. + BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries, + uint8_t* inst, uint8_t* data, + ARMBuffer::PoolEntry* pe = nullptr, + bool markAsBranch = false) + { + BufferOffset offset = armbuffer_.allocEntry(numInst, numPoolEntries, inst, + data, pe, markAsBranch); + propagateOOM(offset.assigned()); + return offset; + } + + // Emit the instruction, returning its offset. + BufferOffset Emit(Instr instruction, bool isBranch = false) { + JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); + return armbuffer_.putInt(*(uint32_t*)(&instruction), isBranch); + } + + BufferOffset EmitBranch(Instr instruction) { + return Emit(instruction, true); + } + + public: + // Emit the instruction at |at|. + static void Emit(Instruction* at, Instr instruction) { + JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); + memcpy(at, &instruction, sizeof(instruction)); + } + + static void EmitBranch(Instruction* at, Instr instruction) { + // TODO: Assert that the buffer already has the instruction marked as a branch. + Emit(at, instruction); + } + + // Emit data inline in the instruction stream. + BufferOffset EmitData(void const * data, unsigned size) { + VIXL_ASSERT(size % 4 == 0); + return armbuffer_.allocEntry(size / sizeof(uint32_t), 0, (uint8_t*)(data), nullptr); + } + + public: + // Size of the code generated in bytes, including pools. + size_t SizeOfCodeGenerated() const { + return armbuffer_.size(); + } + + // Move the pool into the instruction stream. + void flushBuffer() { + armbuffer_.flushPool(); + } + + // Inhibit pool flushing for the given number of instructions. + // Generating more than |maxInst| instructions in a no-pool region + // triggers an assertion within the ARMBuffer. + // Does not nest. + void enterNoPool(size_t maxInst) { + armbuffer_.enterNoPool(maxInst); + } + + // Marks the end of a no-pool region. + void leaveNoPool() { + armbuffer_.leaveNoPool(); + } + + public: + // Static interface used by IonAssemblerBufferWithConstantPools. + static void InsertIndexIntoTag(uint8_t* load, uint32_t index); + static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr); + static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx, BufferOffset deadline, + BufferOffset veneer); + static uint32_t PlaceConstantPoolBarrier(int offset); + + static void WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural); + static void WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural); + static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest); + + static ptrdiff_t GetBranchOffset(const Instruction* i); + static void RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final = true); + static void RetargetNearBranch(Instruction* i, int offset, bool final = true); + static void RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond); + + protected: + // Functions for managing Labels and linked lists of Label uses. + + // Get the next Label user in the linked list of Label uses. + // Return an unassigned BufferOffset when the end of the list is reached. + BufferOffset NextLink(BufferOffset cur); + + // Patch the instruction at cur to link to the instruction at next. + void SetNextLink(BufferOffset cur, BufferOffset next); + + // Link the current (not-yet-emitted) instruction to the specified label, + // then return a raw offset to be encoded in the instruction. + ptrdiff_t LinkAndGetByteOffsetTo(BufferOffset branch, js::jit::Label* label); + ptrdiff_t LinkAndGetInstructionOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange, + js::jit::Label* label); + ptrdiff_t LinkAndGetPageOffsetTo(BufferOffset branch, js::jit::Label* label); + + // A common implementation for the LinkAndGet<Type>OffsetTo helpers. + ptrdiff_t LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange, + unsigned elementSizeBits, js::jit::Label* label); + + protected: + // The buffer into which code and relocation info are generated. + ARMBuffer armbuffer_; +}; + + +} // namespace vixl + + +#endif // jit_arm64_vixl_MozBaseAssembler_vixl_h + diff --git a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp new file mode 100644 index 000000000..fcae1ba47 --- /dev/null +++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp @@ -0,0 +1,195 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/Architecture-arm64.h" +#include "jit/arm64/vixl/Assembler-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" + +namespace vixl { + +bool Instruction::IsUncondB() const { + return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B); +} + + +bool Instruction::IsCondB() const { + return Mask(ConditionalBranchMask) == (ConditionalBranchFixed | B_cond); +} + + +bool Instruction::IsBL() const { + return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | BL); +} + + +bool Instruction::IsBR() const { + return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BR); +} + + +bool Instruction::IsBLR() const { + return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BLR); +} + + +bool Instruction::IsTBZ() const { + return Mask(TestBranchMask) == TBZ; +} + + +bool Instruction::IsTBNZ() const { + return Mask(TestBranchMask) == TBNZ; +} + + +bool Instruction::IsCBZ() const { + return Mask(CompareBranchMask) == CBZ_w || Mask(CompareBranchMask) == CBZ_x; +} + + +bool Instruction::IsCBNZ() const { + return Mask(CompareBranchMask) == CBNZ_w || Mask(CompareBranchMask) == CBNZ_x; +} + + +bool Instruction::IsLDR() const { + return Mask(LoadLiteralMask) == LDR_x_lit; +} + + +bool Instruction::IsNOP() const { + return Mask(SystemHintMask) == HINT && ImmHint() == NOP; +} + + +bool Instruction::IsADR() const { + return Mask(PCRelAddressingMask) == ADR; +} + + +bool Instruction::IsADRP() const { + return Mask(PCRelAddressingMask) == ADRP; +} + + +bool Instruction::IsBranchLinkImm() const { + return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL); +} + + +bool Instruction::IsTargetReachable(Instruction* target) const { + VIXL_ASSERT(((target - this) & 3) == 0); + int offset = (target - this) >> kInstructionSizeLog2; + switch (BranchType()) { + case CondBranchType: + return is_int19(offset); + case UncondBranchType: + return is_int26(offset); + case CompareBranchType: + return is_int19(offset); + case TestBranchType: + return is_int14(offset); + default: + VIXL_UNREACHABLE(); + } +} + + +ptrdiff_t Instruction::ImmPCRawOffset() const { + ptrdiff_t offset; + if (IsPCRelAddressing()) { + // ADR and ADRP. + offset = ImmPCRel(); + } else if (BranchType() == UnknownBranchType) { + offset = ImmLLiteral(); + } else { + offset = ImmBranch(); + } + return offset; +} + +void +Instruction::SetImmPCRawOffset(ptrdiff_t offset) +{ + if (IsPCRelAddressing()) { + // ADR and ADRP. We're encoding a raw offset here. + // See also SetPCRelImmTarget(). + Instr imm = vixl::Assembler::ImmPCRelAddress(offset); + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); + } else { + SetBranchImmTarget(this + (offset << kInstructionSizeLog2)); + } +} + +// Is this a stack pointer synchronization instruction as inserted by +// MacroAssembler::syncStackPtr()? +bool +Instruction::IsStackPtrSync() const +{ + // The stack pointer sync is a move to the stack pointer. + // This is encoded as 'add sp, Rs, #0'. + return IsAddSubImmediate() && Rd() == js::jit::Registers::sp && ImmAddSub() == 0; +} + +// Skip over a constant pool at |this| if there is one. +// +// If |this| is pointing to the artifical guard branch around a constant pool, +// return the instruction after the pool. Otherwise return |this| itself. +// +// This function does not skip constant pools with a natural guard branch. It +// is assumed that anyone inspecting the instruction stream understands about +// branches that were inserted naturally. +const Instruction* +Instruction::skipPool() const +{ + // Artificial pool guards can only be B (rather than BR), and they must be + // forward branches. + if (!IsUncondB() || ImmUncondBranch() <= 0) + return this; + + // Check for a constant pool header which has the high 16 bits set. See + // struct PoolHeader. Bit 15 indicates a natural pool guard when set. It + // must be clear which indicates an artificial pool guard. + const Instruction *header = InstructionAtOffset(kInstructionSize); + if (header->Mask(0xffff8000) != 0xffff0000) + return this; + + // OK, this is an artificial jump around a constant pool. + return ImmPCOffsetTarget(); +} + + +void Instruction::SetBits32(int msb, int lsb, unsigned value) { + uint32_t me; + memcpy(&me, this, sizeof(me)); + uint32_t new_mask = (1 << (msb+1)) - (1 << lsb); + uint32_t keep_mask = ~new_mask; + me = (me & keep_mask) | ((value << lsb) & new_mask); + memcpy(this, &me, sizeof(me)); +} + + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp new file mode 100644 index 000000000..7447b4d2a --- /dev/null +++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp @@ -0,0 +1,708 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "mozilla/DebugOnly.h" + +#include "jit/arm64/vixl/Debugger-vixl.h" +#include "jit/arm64/vixl/Simulator-vixl.h" +#include "jit/IonTypes.h" +#include "threading/LockGuard.h" +#include "vm/Runtime.h" + +namespace vixl { + + +using mozilla::DebugOnly; +using js::jit::ABIFunctionType; + +Simulator::Simulator(Decoder* decoder, FILE* stream) + : stream_(nullptr) + , print_disasm_(nullptr) + , instrumentation_(nullptr) + , stack_(nullptr) + , stack_limit_(nullptr) + , decoder_(nullptr) + , oom_(false) + , lock_(js::mutexid::Arm64SimulatorLock) +{ + this->init(decoder, stream); +} + + +Simulator::~Simulator() { + js_free(stack_); + stack_ = nullptr; + + // The decoder may outlive the simulator. + if (print_disasm_) { + decoder_->RemoveVisitor(print_disasm_); + js_delete(print_disasm_); + print_disasm_ = nullptr; + } + + if (instrumentation_) { + decoder_->RemoveVisitor(instrumentation_); + js_delete(instrumentation_); + instrumentation_ = nullptr; + } +} + + +void Simulator::ResetState() { + // Reset the system registers. + nzcv_ = SimSystemRegister::DefaultValueFor(NZCV); + fpcr_ = SimSystemRegister::DefaultValueFor(FPCR); + + // Reset registers to 0. + pc_ = nullptr; + pc_modified_ = false; + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + set_xreg(i, 0xbadbeef); + } + // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP. + uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1); + VIXL_ASSERT(IsSignallingNaN(rawbits_to_double(nan_bits & kDRegMask))); + VIXL_ASSERT(IsSignallingNaN(rawbits_to_float(nan_bits & kSRegMask))); + for (unsigned i = 0; i < kNumberOfFPRegisters; i++) { + set_dreg_bits(i, nan_bits); + } + // Returning to address 0 exits the Simulator. + set_lr(kEndOfSimAddress); + set_resume_pc(nullptr); +} + + +void Simulator::init(Decoder* decoder, FILE* stream) { + // Ensure that shift operations act as the simulator expects. + VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1); + VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF); + + instruction_stats_ = false; + + // Set up the decoder. + decoder_ = decoder; + decoder_->AppendVisitor(this); + + stream_ = stream; + print_disasm_ = js_new<PrintDisassembler>(stream_); + if (!print_disasm_) { + oom_ = true; + return; + } + set_coloured_trace(false); + trace_parameters_ = LOG_NONE; + + ResetState(); + + // Allocate and set up the simulator stack. + stack_ = (byte*)js_malloc(stack_size_); + if (!stack_) { + oom_ = true; + return; + } + stack_limit_ = stack_ + stack_protection_size_; + // Configure the starting stack pointer. + // - Find the top of the stack. + byte * tos = stack_ + stack_size_; + // - There's a protection region at both ends of the stack. + tos -= stack_protection_size_; + // - The stack pointer must be 16-byte aligned. + tos = AlignDown(tos, 16); + set_sp(tos); + + // Set the sample period to 10, as the VIXL examples and tests are short. + instrumentation_ = js_new<Instrument>("vixl_stats.csv", 10); + if (!instrumentation_) { + oom_ = true; + return; + } + + // Print a warning about exclusive-access instructions, but only the first + // time they are encountered. This warning can be silenced using + // SilenceExclusiveAccessWarning(). + print_exclusive_access_warning_ = true; + + redirection_ = nullptr; +} + + +Simulator* Simulator::Current() { + return js::TlsPerThreadData.get()->simulator(); +} + + +Simulator* Simulator::Create(JSContext* cx) { + Decoder *decoder = js_new<vixl::Decoder>(); + if (!decoder) + return nullptr; + + // FIXME: This just leaks the Decoder object for now, which is probably OK. + // FIXME: We should free it at some point. + // FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts. + Simulator *sim; + if (getenv("USE_DEBUGGER") != nullptr) + sim = js_new<Debugger>(decoder, stdout); + else + sim = js_new<Simulator>(decoder, stdout); + + // Check if Simulator:init ran out of memory. + if (sim && sim->oom()) { + js_delete(sim); + return nullptr; + } + + return sim; +} + + +void Simulator::Destroy(Simulator* sim) { + js_delete(sim); +} + + +void Simulator::ExecuteInstruction() { + // The program counter should always be aligned. + VIXL_ASSERT(IsWordAligned(pc_)); + decoder_->Decode(pc_); + const Instruction* rpc = resume_pc_; + increment_pc(); + + if (MOZ_UNLIKELY(rpc)) { + JSRuntime::innermostWasmActivation()->setResumePC((void*)pc()); + set_pc(rpc); + // Just calling set_pc turns the pc_modified_ flag on, which means it doesn't + // auto-step after executing the next instruction. Force that to off so it + // will auto-step after executing the first instruction of the handler. + pc_modified_ = false; + resume_pc_ = nullptr; + } +} + + +uintptr_t Simulator::stackLimit() const { + return reinterpret_cast<uintptr_t>(stack_limit_); +} + + +uintptr_t* Simulator::addressOfStackLimit() { + return (uintptr_t*)&stack_limit_; +} + + +bool Simulator::overRecursed(uintptr_t newsp) const { + if (newsp) + newsp = xreg(31, Reg31IsStackPointer); + return newsp <= stackLimit(); +} + + +bool Simulator::overRecursedWithExtra(uint32_t extra) const { + uintptr_t newsp = xreg(31, Reg31IsStackPointer) - extra; + return newsp <= stackLimit(); +} + + +void Simulator::set_resume_pc(void* new_resume_pc) { + resume_pc_ = AddressUntag(reinterpret_cast<Instruction*>(new_resume_pc)); +} + + +int64_t Simulator::call(uint8_t* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + + // First eight arguments passed in registers. + VIXL_ASSERT(argument_count <= 8); + // This code should use the type of the called function + // (with templates, like the callVM machinery), but since the + // number of called functions is miniscule, their types have been + // divined from the number of arguments. + if (argument_count == 8) { + // EnterJitData::jitcode. + set_xreg(0, va_arg(parameters, int64_t)); + // EnterJitData::maxArgc. + set_xreg(1, va_arg(parameters, unsigned)); + // EnterJitData::maxArgv. + set_xreg(2, va_arg(parameters, int64_t)); + // EnterJitData::osrFrame. + set_xreg(3, va_arg(parameters, int64_t)); + // EnterJitData::calleeToken. + set_xreg(4, va_arg(parameters, int64_t)); + // EnterJitData::scopeChain. + set_xreg(5, va_arg(parameters, int64_t)); + // EnterJitData::osrNumStackValues. + set_xreg(6, va_arg(parameters, unsigned)); + // Address of EnterJitData::result. + set_xreg(7, va_arg(parameters, int64_t)); + } else if (argument_count == 2) { + // EntryArg* args + set_xreg(0, va_arg(parameters, int64_t)); + // uint8_t* GlobalData + set_xreg(1, va_arg(parameters, int64_t)); + } else if (argument_count == 1) { // irregexp + // InputOutputData& data + set_xreg(0, va_arg(parameters, int64_t)); + } else { + MOZ_CRASH("Unknown number of arguments"); + } + + va_end(parameters); + + // Call must transition back to native code on exit. + VIXL_ASSERT(xreg(30) == int64_t(kEndOfSimAddress)); + + // Execute the simulation. + DebugOnly<int64_t> entryStack = xreg(31, Reg31IsStackPointer); + RunFrom((Instruction*)entry); + DebugOnly<int64_t> exitStack = xreg(31, Reg31IsStackPointer); + VIXL_ASSERT(entryStack == exitStack); + + int64_t result = xreg(0); + if (getenv("USE_DEBUGGER")) + printf("LEAVE\n"); + return result; +} + + +// Protects the icache and redirection properties of the simulator. +class AutoLockSimulatorCache : public js::LockGuard<js::Mutex> +{ + friend class Simulator; + using Base = js::LockGuard<js::Mutex>; + + public: + explicit AutoLockSimulatorCache(Simulator* sim) + : Base(sim->lock_) + { + } +}; + + +// When the generated code calls a VM function (masm.callWithABI) we need to +// call that function instead of trying to execute it with the simulator +// (because it's x64 code instead of AArch64 code). We do that by redirecting the VM +// call to a svc (Supervisor Call) instruction that is handled by the +// simulator. We write the original destination of the jump just at a known +// offset from the svc instruction so the simulator knows what to call. +class Redirection +{ + friend class Simulator; + + Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim) + : nativeFunction_(nativeFunction), + type_(type), + next_(nullptr) + { + next_ = sim->redirection(); + // TODO: Flush ICache? + sim->setRedirection(this); + + Instruction* instr = (Instruction*)(&svcInstruction_); + vixl::Assembler::svc(instr, kCallRtRedirected); + } + + public: + void* addressOfSvcInstruction() { return &svcInstruction_; } + void* nativeFunction() const { return nativeFunction_; } + ABIFunctionType type() const { return type_; } + + static Redirection* Get(void* nativeFunction, ABIFunctionType type) { + Simulator* sim = Simulator::Current(); + AutoLockSimulatorCache alsr(sim); + + // TODO: Store srt_ in the simulator for this assertion. + // VIXL_ASSERT_IF(pt->simulator(), pt->simulator()->srt_ == srt); + + Redirection* current = sim->redirection(); + for (; current != nullptr; current = current->next_) { + if (current->nativeFunction_ == nativeFunction) { + VIXL_ASSERT(current->type() == type); + return current; + } + } + + js::AutoEnterOOMUnsafeRegion oomUnsafe; + Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection)); + if (!redir) + oomUnsafe.crash("Simulator redirection"); + new(redir) Redirection(nativeFunction, type, sim); + return redir; + } + + static const Redirection* FromSvcInstruction(const Instruction* svcInstruction) { + const uint8_t* addrOfSvc = reinterpret_cast<const uint8_t*>(svcInstruction); + const uint8_t* addrOfRedirection = addrOfSvc - offsetof(Redirection, svcInstruction_); + return reinterpret_cast<const Redirection*>(addrOfRedirection); + } + + private: + void* nativeFunction_; + uint32_t svcInstruction_; + ABIFunctionType type_; + Redirection* next_; +}; + + +void Simulator::setRedirection(Redirection* redirection) { + redirection_ = redirection; +} + + +Redirection* Simulator::redirection() const { + return redirection_; +} + + +void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) { + Redirection* redirection = Redirection::Get(nativeFunction, type); + return redirection->addressOfSvcInstruction(); +} + + +void Simulator::VisitException(const Instruction* instr) { + switch (instr->Mask(ExceptionMask)) { + case BRK: { + int lowbit = ImmException_offset; + int highbit = ImmException_offset + ImmException_width - 1; + HostBreakpoint(instr->Bits(highbit, lowbit)); + break; + } + case HLT: + switch (instr->ImmException()) { + case kUnreachableOpcode: + DoUnreachable(instr); + return; + case kTraceOpcode: + DoTrace(instr); + return; + case kLogOpcode: + DoLog(instr); + return; + case kPrintfOpcode: + DoPrintf(instr); + return; + default: + HostBreakpoint(); + return; + } + case SVC: + // The SVC instruction is hijacked by the JIT as a pseudo-instruction + // causing the Simulator to execute host-native code for callWithABI. + switch (instr->ImmException()) { + case kCallRtRedirected: + VisitCallRedirection(instr); + return; + case kMarkStackPointer: + spStack_.append(xreg(31, Reg31IsStackPointer)); + return; + case kCheckStackPointer: { + int64_t current = xreg(31, Reg31IsStackPointer); + int64_t expected = spStack_.popCopy(); + VIXL_ASSERT(current == expected); + return; + } + default: + VIXL_UNIMPLEMENTED(); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::setGPR32Result(int32_t result) { + set_wreg(0, result); +} + + +void Simulator::setGPR64Result(int64_t result) { + set_xreg(0, result); +} + + +void Simulator::setFP32Result(float result) { + set_sreg(0, result); +} + + +void Simulator::setFP64Result(double result) { + set_dreg(0, result); +} + + +typedef int64_t (*Prototype_General0)(); +typedef int64_t (*Prototype_General1)(int64_t arg0); +typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1); +typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2); +typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3); +typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4); +typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5); +typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, int64_t arg6); +typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7); + +typedef int64_t (*Prototype_Int_Double)(double arg0); +typedef int64_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1); +typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, uint64_t arg1, uint64_t arg2); +typedef int64_t (*Prototype_Int_IntDoubleIntInt)(uint64_t arg0, double arg1, + uint64_t arg2, uint64_t arg3); + +typedef float (*Prototype_Float32_Float32)(float arg0); + +typedef double (*Prototype_Double_None)(); +typedef double (*Prototype_Double_Double)(double arg0); +typedef double (*Prototype_Double_Int)(int32_t arg0); +typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1); +typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1); +typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1); +typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2); +typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1, + double arg2, double arg3); + + +// Simulator support for callWithABI(). +void +Simulator::VisitCallRedirection(const Instruction* instr) +{ + VIXL_ASSERT(instr->Mask(ExceptionMask) == SVC); + VIXL_ASSERT(instr->ImmException() == kCallRtRedirected); + + const Redirection* redir = Redirection::FromSvcInstruction(instr); + uintptr_t nativeFn = reinterpret_cast<uintptr_t>(redir->nativeFunction()); + + // Stack must be aligned prior to the call. + // FIXME: It's actually our job to perform the alignment... + //VIXL_ASSERT((xreg(31, Reg31IsStackPointer) & (StackAlignment - 1)) == 0); + + // Used to assert that callee-saved registers are preserved. + DebugOnly<int64_t> x19 = xreg(19); + DebugOnly<int64_t> x20 = xreg(20); + DebugOnly<int64_t> x21 = xreg(21); + DebugOnly<int64_t> x22 = xreg(22); + DebugOnly<int64_t> x23 = xreg(23); + DebugOnly<int64_t> x24 = xreg(24); + DebugOnly<int64_t> x25 = xreg(25); + DebugOnly<int64_t> x26 = xreg(26); + DebugOnly<int64_t> x27 = xreg(27); + DebugOnly<int64_t> x28 = xreg(28); + DebugOnly<int64_t> x29 = xreg(29); + DebugOnly<int64_t> savedSP = xreg(31, Reg31IsStackPointer); + + // Remember LR for returning from the "call". + int64_t savedLR = xreg(30); + + // Allow recursive Simulator calls: returning from the call must stop + // the simulation and transition back to native Simulator code. + set_xreg(30, int64_t(kEndOfSimAddress)); + + // Store argument register values in local variables for ease of use below. + int64_t x0 = xreg(0); + int64_t x1 = xreg(1); + int64_t x2 = xreg(2); + int64_t x3 = xreg(3); + int64_t x4 = xreg(4); + int64_t x5 = xreg(5); + int64_t x6 = xreg(6); + int64_t x7 = xreg(7); + double d0 = dreg(0); + double d1 = dreg(1); + double d2 = dreg(2); + double d3 = dreg(3); + float s0 = sreg(0); + + // Dispatch the call and set the return value. + switch (redir->type()) { + // Cases with int64_t return type. + case js::jit::Args_General0: { + int64_t ret = reinterpret_cast<Prototype_General0>(nativeFn)(); + setGPR64Result(ret); + break; + } + case js::jit::Args_General1: { + int64_t ret = reinterpret_cast<Prototype_General1>(nativeFn)(x0); + setGPR64Result(ret); + break; + } + case js::jit::Args_General2: { + int64_t ret = reinterpret_cast<Prototype_General2>(nativeFn)(x0, x1); + setGPR64Result(ret); + break; + } + case js::jit::Args_General3: { + int64_t ret = reinterpret_cast<Prototype_General3>(nativeFn)(x0, x1, x2); + setGPR64Result(ret); + break; + } + case js::jit::Args_General4: { + int64_t ret = reinterpret_cast<Prototype_General4>(nativeFn)(x0, x1, x2, x3); + setGPR64Result(ret); + break; + } + case js::jit::Args_General5: { + int64_t ret = reinterpret_cast<Prototype_General5>(nativeFn)(x0, x1, x2, x3, x4); + setGPR64Result(ret); + break; + } + case js::jit::Args_General6: { + int64_t ret = reinterpret_cast<Prototype_General6>(nativeFn)(x0, x1, x2, x3, x4, x5); + setGPR64Result(ret); + break; + } + case js::jit::Args_General7: { + int64_t ret = reinterpret_cast<Prototype_General7>(nativeFn)(x0, x1, x2, x3, x4, x5, x6); + setGPR64Result(ret); + break; + } + case js::jit::Args_General8: { + int64_t ret = reinterpret_cast<Prototype_General8>(nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7); + setGPR64Result(ret); + break; + } + + // Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption. + case js::jit::Args_Int_Double: { + int64_t ret = reinterpret_cast<Prototype_Int_Double>(nativeFn)(d0); + setGPR64Result(ret); + break; + } + case js::jit::Args_Int_IntDouble: { + int64_t ret = reinterpret_cast<Prototype_Int_IntDouble>(nativeFn)(x0, d0); + setGPR64Result(ret); + break; + } + + case js::jit::Args_Int_IntDoubleIntInt: { + int64_t ret = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(nativeFn)(x0, d0, x1, x2); + setGPR64Result(ret); + break; + } + + case js::jit::Args_Int_DoubleIntInt: { + int64_t ret = reinterpret_cast<Prototype_Int_DoubleIntInt>(nativeFn)(d0, x0, x1); + setGPR64Result(ret); + break; + } + + // Cases with float return type. + case js::jit::Args_Float32_Float32: { + float ret = reinterpret_cast<Prototype_Float32_Float32>(nativeFn)(s0); + setFP32Result(ret); + break; + } + + // Cases with double return type. + case js::jit::Args_Double_None: { + double ret = reinterpret_cast<Prototype_Double_None>(nativeFn)(); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_Double: { + double ret = reinterpret_cast<Prototype_Double_Double>(nativeFn)(d0); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_Int: { + double ret = reinterpret_cast<Prototype_Double_Int>(nativeFn)(x0); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_DoubleInt: { + double ret = reinterpret_cast<Prototype_Double_DoubleInt>(nativeFn)(d0, x0); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_DoubleDouble: { + double ret = reinterpret_cast<Prototype_Double_DoubleDouble>(nativeFn)(d0, d1); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_DoubleDoubleDouble: { + double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(nativeFn)(d0, d1, d2); + setFP64Result(ret); + break; + } + case js::jit::Args_Double_DoubleDoubleDoubleDouble: { + double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(nativeFn)(d0, d1, d2, d3); + setFP64Result(ret); + break; + } + + case js::jit::Args_Double_IntDouble: { + double ret = reinterpret_cast<Prototype_Double_IntDouble>(nativeFn)(x0, d0); + setFP64Result(ret); + break; + } + + default: + MOZ_CRASH("Unknown function type."); + } + + // TODO: Nuke the volatile registers. + + // Assert that callee-saved registers are unchanged. + VIXL_ASSERT(xreg(19) == x19); + VIXL_ASSERT(xreg(20) == x20); + VIXL_ASSERT(xreg(21) == x21); + VIXL_ASSERT(xreg(22) == x22); + VIXL_ASSERT(xreg(23) == x23); + VIXL_ASSERT(xreg(24) == x24); + VIXL_ASSERT(xreg(25) == x25); + VIXL_ASSERT(xreg(26) == x26); + VIXL_ASSERT(xreg(27) == x27); + VIXL_ASSERT(xreg(28) == x28); + VIXL_ASSERT(xreg(29) == x29); + + // Assert that the stack is unchanged. + VIXL_ASSERT(savedSP == xreg(31, Reg31IsStackPointer)); + + // Simulate a return. + set_lr(savedLR); + set_pc((Instruction*)savedLR); + if (getenv("USE_DEBUGGER")) + printf("SVCRET\n"); +} + + +} // namespace vixl + + +vixl::Simulator* js::PerThreadData::simulator() const { + return runtime_->simulator(); +} + + +vixl::Simulator* JSRuntime::simulator() const { + return simulator_; +} + + +uintptr_t* JSRuntime::addressOfSimulatorStackLimit() { + return simulator_->addressOfStackLimit(); +} diff --git a/js/src/jit/arm64/vixl/Platform-vixl.h b/js/src/jit/arm64/vixl/Platform-vixl.h new file mode 100644 index 000000000..df0420867 --- /dev/null +++ b/js/src/jit/arm64/vixl/Platform-vixl.h @@ -0,0 +1,39 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_PLATFORM_H +#define VIXL_PLATFORM_H + +// Define platform specific functionalities. +#include <signal.h> + +#include "js-config.h" + +namespace vixl { +inline void HostBreakpoint(int64_t code = 0) { raise(SIGINT); } +} // namespace vixl + +#endif diff --git a/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h new file mode 100644 index 000000000..9a66ab023 --- /dev/null +++ b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h @@ -0,0 +1,141 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_SIMULATOR_CONSTANTS_A64_H_ +#define VIXL_A64_SIMULATOR_CONSTANTS_A64_H_ + +namespace vixl { + +// Debug instructions. +// +// VIXL's macro-assembler and simulator support a few pseudo instructions to +// make debugging easier. These pseudo instructions do not exist on real +// hardware. +// +// TODO: Also consider allowing these pseudo-instructions to be disabled in the +// simulator, so that users can check that the input is a valid native code. +// (This isn't possible in all cases. Printf won't work, for example.) +// +// Each debug pseudo instruction is represented by a HLT instruction. The HLT +// immediate field is used to identify the type of debug pseudo instruction. + +enum DebugHltOpcodes { + kUnreachableOpcode = 0xdeb0, + kPrintfOpcode, + kTraceOpcode, + kLogOpcode, + // Aliases. + kDebugHltFirstOpcode = kUnreachableOpcode, + kDebugHltLastOpcode = kLogOpcode +}; + +// Each pseudo instruction uses a custom encoding for additional arguments, as +// described below. + +// Unreachable - kUnreachableOpcode +// +// Instruction which should never be executed. This is used as a guard in parts +// of the code that should not be reachable, such as in data encoded inline in +// the instructions. + +// Printf - kPrintfOpcode +// - arg_count: The number of arguments. +// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields. +// +// Simulate a call to printf. +// +// Floating-point and integer arguments are passed in separate sets of registers +// in AAPCS64 (even for varargs functions), so it is not possible to determine +// the type of each argument without some information about the values that were +// passed in. This information could be retrieved from the printf format string, +// but the format string is not trivial to parse so we encode the relevant +// information with the HLT instruction. +// +// Also, the following registers are populated (as if for a native A64 call): +// x0: The format string +// x1-x7: Optional arguments, if type == CPURegister::kRegister +// d0-d7: Optional arguments, if type == CPURegister::kFPRegister +const unsigned kPrintfArgCountOffset = 1 * kInstructionSize; +const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize; +const unsigned kPrintfLength = 3 * kInstructionSize; + +const unsigned kPrintfMaxArgCount = 4; + +// The argument pattern is a set of two-bit-fields, each with one of the +// following values: +enum PrintfArgPattern { + kPrintfArgW = 1, + kPrintfArgX = 2, + // There is no kPrintfArgS because floats are always converted to doubles in C + // varargs calls. + kPrintfArgD = 3 +}; +static const unsigned kPrintfArgPatternBits = 2; + +// Trace - kTraceOpcode +// - parameter: TraceParameter stored as a uint32_t +// - command: TraceCommand stored as a uint32_t +// +// Allow for trace management in the generated code. This enables or disables +// automatic tracing of the specified information for every simulated +// instruction. +const unsigned kTraceParamsOffset = 1 * kInstructionSize; +const unsigned kTraceCommandOffset = 2 * kInstructionSize; +const unsigned kTraceLength = 3 * kInstructionSize; + +// Trace parameters. +enum TraceParameters { + LOG_DISASM = 1 << 0, // Log disassembly. + LOG_REGS = 1 << 1, // Log general purpose registers. + LOG_VREGS = 1 << 2, // Log NEON and floating-point registers. + LOG_SYSREGS = 1 << 3, // Log the flags and system registers. + LOG_WRITE = 1 << 4, // Log writes to memory. + + LOG_NONE = 0, + LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS, + LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE +}; + +// Trace commands. +enum TraceCommand { + TRACE_ENABLE = 1, + TRACE_DISABLE = 2 +}; + +// Log - kLogOpcode +// - parameter: TraceParameter stored as a uint32_t +// +// Print the specified information once. This mechanism is separate from Trace. +// In particular, _all_ of the specified registers are printed, rather than just +// the registers that the instruction writes. +// +// Any combination of the TraceParameters values can be used, except that +// LOG_DISASM is not supported for Log. +const unsigned kLogParamsOffset = 1 * kInstructionSize; +const unsigned kLogLength = 2 * kInstructionSize; +} // namespace vixl + +#endif // VIXL_A64_SIMULATOR_CONSTANTS_A64_H_ diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.cpp b/js/src/jit/arm64/vixl/Simulator-vixl.cpp new file mode 100644 index 000000000..9d95a2bb4 --- /dev/null +++ b/js/src/jit/arm64/vixl/Simulator-vixl.cpp @@ -0,0 +1,3949 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "js-config.h" + +#ifdef JS_SIMULATOR_ARM64 + +#include "jit/arm64/vixl/Simulator-vixl.h" + +#include <cmath> +#include <string.h> + +namespace vixl { + +const Instruction* Simulator::kEndOfSimAddress = NULL; + +void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { + int width = msb - lsb + 1; + VIXL_ASSERT(is_uintn(width, bits) || is_intn(width, bits)); + + bits <<= lsb; + uint32_t mask = ((1 << width) - 1) << lsb; + VIXL_ASSERT((mask & write_ignore_mask_) == 0); + + value_ = (value_ & ~mask) | (bits & mask); +} + + +SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { + switch (id) { + case NZCV: + return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); + case FPCR: + return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); + default: + VIXL_UNREACHABLE(); + return SimSystemRegister(); + } +} + + +void Simulator::Run() { + pc_modified_ = false; + while (pc_ != kEndOfSimAddress) { + ExecuteInstruction(); + LogAllWrittenRegisters(); + } +} + + +void Simulator::RunFrom(const Instruction* first) { + set_pc(first); + Run(); +} + + +const char* Simulator::xreg_names[] = { +"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", +"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", +"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", +"x24", "x25", "x26", "x27", "x28", "x29", "lr", "xzr", "sp"}; + +const char* Simulator::wreg_names[] = { +"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", +"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15", +"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23", +"w24", "w25", "w26", "w27", "w28", "w29", "w30", "wzr", "wsp"}; + +const char* Simulator::sreg_names[] = { +"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", +"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", +"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", +"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"}; + +const char* Simulator::dreg_names[] = { +"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", +"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", +"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", +"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; + +const char* Simulator::vreg_names[] = { +"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", +"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", +"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", +"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; + + + +const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return wreg_names[code]; +} + + +const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return xreg_names[code]; +} + + +const char* Simulator::SRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return sreg_names[code]; +} + + +const char* Simulator::DRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return dreg_names[code]; +} + + +const char* Simulator::VRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vreg_names[code]; +} + + +#define COLOUR(colour_code) "\033[0;" colour_code "m" +#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m" +#define NORMAL "" +#define GREY "30" +#define RED "31" +#define GREEN "32" +#define YELLOW "33" +#define BLUE "34" +#define MAGENTA "35" +#define CYAN "36" +#define WHITE "37" +void Simulator::set_coloured_trace(bool value) { + coloured_trace_ = value; + + clr_normal = value ? COLOUR(NORMAL) : ""; + clr_flag_name = value ? COLOUR_BOLD(WHITE) : ""; + clr_flag_value = value ? COLOUR(NORMAL) : ""; + clr_reg_name = value ? COLOUR_BOLD(CYAN) : ""; + clr_reg_value = value ? COLOUR(CYAN) : ""; + clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : ""; + clr_vreg_value = value ? COLOUR(MAGENTA) : ""; + clr_memory_address = value ? COLOUR_BOLD(BLUE) : ""; + clr_warning = value ? COLOUR_BOLD(YELLOW) : ""; + clr_warning_message = value ? COLOUR(YELLOW) : ""; + clr_printf = value ? COLOUR(GREEN) : ""; +} +#undef COLOUR +#undef COLOUR_BOLD +#undef NORMAL +#undef GREY +#undef RED +#undef GREEN +#undef YELLOW +#undef BLUE +#undef MAGENTA +#undef CYAN +#undef WHITE + + +void Simulator::set_trace_parameters(int parameters) { + bool disasm_before = trace_parameters_ & LOG_DISASM; + trace_parameters_ = parameters; + bool disasm_after = trace_parameters_ & LOG_DISASM; + + if (disasm_before != disasm_after) { + if (disasm_after) { + decoder_->InsertVisitorBefore(print_disasm_, this); + } else { + decoder_->RemoveVisitor(print_disasm_); + } + } +} + + +void Simulator::set_instruction_stats(bool value) { + if (value != instruction_stats_) { + if (value) { + decoder_->AppendVisitor(instrumentation_); + } else { + decoder_->RemoveVisitor(instrumentation_); + } + instruction_stats_ = value; + } +} + +// Helpers --------------------------------------------------------------------- +uint64_t Simulator::AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in) { + VIXL_ASSERT((carry_in == 0) || (carry_in == 1)); + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + + uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt; + uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask; + uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask; + + left &= reg_mask; + right &= reg_mask; + uint64_t result = (left + right + carry_in) & reg_mask; + + if (set_flags) { + nzcv().SetN(CalcNFlag(result, reg_size)); + nzcv().SetZ(CalcZFlag(result)); + + // Compute the C flag by comparing the result to the max unsigned integer. + uint64_t max_uint_2op = max_uint - carry_in; + bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right); + nzcv().SetC(C ? 1 : 0); + + // Overflow iff the sign bit is the same for the two inputs and different + // for the result. + uint64_t left_sign = left & sign_mask; + uint64_t right_sign = right & sign_mask; + uint64_t result_sign = result & sign_mask; + bool V = (left_sign == right_sign) && (left_sign != result_sign); + nzcv().SetV(V ? 1 : 0); + + LogSystemRegister(NZCV); + } + return result; +} + + +int64_t Simulator::ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount) { + if (amount == 0) { + return value; + } + int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask; + switch (shift_type) { + case LSL: + return (value << amount) & mask; + case LSR: + return static_cast<uint64_t>(value) >> amount; + case ASR: { + // Shift used to restore the sign. + unsigned s_shift = kXRegSize - reg_size; + // Value with its sign restored. + int64_t s_value = (value << s_shift) >> s_shift; + return (s_value >> amount) & mask; + } + case ROR: { + if (reg_size == kWRegSize) { + value &= kWRegMask; + } + return (static_cast<uint64_t>(value) >> amount) | + ((value & ((INT64_C(1) << amount) - 1)) << + (reg_size - amount)); + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } +} + + +int64_t Simulator::ExtendValue(unsigned reg_size, + int64_t value, + Extend extend_type, + unsigned left_shift) { + switch (extend_type) { + case UXTB: + value &= kByteMask; + break; + case UXTH: + value &= kHalfWordMask; + break; + case UXTW: + value &= kWordMask; + break; + case SXTB: + value = (value << 56) >> 56; + break; + case SXTH: + value = (value << 48) >> 48; + break; + case SXTW: + value = (value << 32) >> 32; + break; + case UXTX: + case SXTX: + break; + default: + VIXL_UNREACHABLE(); + } + int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask; + return (value << left_shift) & mask; +} + + +void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) { + AssertSupportedFPCR(); + + // TODO: This assumes that the C++ implementation handles comparisons in the + // way that we expect (as per AssertSupportedFPCR()). + bool process_exception = false; + if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) { + nzcv().SetRawValue(FPUnorderedFlag); + if (IsSignallingNaN(val0) || IsSignallingNaN(val1) || + (trap == EnableTrap)) { + process_exception = true; + } + } else if (val0 < val1) { + nzcv().SetRawValue(FPLessThanFlag); + } else if (val0 > val1) { + nzcv().SetRawValue(FPGreaterThanFlag); + } else if (val0 == val1) { + nzcv().SetRawValue(FPEqualFlag); + } else { + VIXL_UNREACHABLE(); + } + LogSystemRegister(NZCV); + if (process_exception) FPProcessException(); +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize( + unsigned reg_size, unsigned lane_size) { + VIXL_ASSERT(reg_size >= lane_size); + + uint32_t format = 0; + if (reg_size != lane_size) { + switch (reg_size) { + default: VIXL_UNREACHABLE(); break; + case kQRegSizeInBytes: format = kPrintRegAsQVector; break; + case kDRegSizeInBytes: format = kPrintRegAsDVector; break; + } + } + + switch (lane_size) { + default: VIXL_UNREACHABLE(); break; + case kQRegSizeInBytes: format |= kPrintReg1Q; break; + case kDRegSizeInBytes: format |= kPrintReg1D; break; + case kSRegSizeInBytes: format |= kPrintReg1S; break; + case kHRegSizeInBytes: format |= kPrintReg1H; break; + case kBRegSizeInBytes: format |= kPrintReg1B; break; + } + // These sizes would be duplicate case labels. + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D); + VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S); + + return static_cast<PrintRegisterFormat>(format); +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat( + VectorFormat vform) { + switch (vform) { + default: VIXL_UNREACHABLE(); return kPrintReg16B; + case kFormat16B: return kPrintReg16B; + case kFormat8B: return kPrintReg8B; + case kFormat8H: return kPrintReg8H; + case kFormat4H: return kPrintReg4H; + case kFormat4S: return kPrintReg4S; + case kFormat2S: return kPrintReg2S; + case kFormat2D: return kPrintReg2D; + case kFormat1D: return kPrintReg1D; + } +} + + +void Simulator::PrintWrittenRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + if (registers_[i].WrittenSinceLastLog()) PrintRegister(i); + } +} + + +void Simulator::PrintWrittenVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q); + } +} + + +void Simulator::PrintSystemRegisters() { + PrintSystemRegister(NZCV); + PrintSystemRegister(FPCR); +} + + +void Simulator::PrintRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + PrintRegister(i); + } +} + + +void Simulator::PrintVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + PrintVRegister(i, kPrintReg1Q); + } +} + + +// Print a register's name and raw value. +// +// Only the least-significant `size_in_bytes` bytes of the register are printed, +// but the value is aligned as if the whole register had been printed. +// +// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes +// -- the default -- so that the whole register is printed. Other values of +// size_in_bytes are intended for use when the register hasn't actually been +// updated (such as in PrintWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode, + int size_in_bytes) { + // The template for all supported sizes. + // "# x{code}: 0xffeeddccbbaa9988" + // "# w{code}: 0xbbaa9988" + // "# w{code}<15:0>: 0x9988" + // "# w{code}<7:0>: 0x88" + unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2; + + const char * name = ""; + const char * suffix = ""; + switch (size_in_bytes) { + case kXRegSizeInBytes: name = XRegNameForCode(code, r31mode); break; + case kWRegSizeInBytes: name = WRegNameForCode(code, r31mode); break; + case 2: + name = WRegNameForCode(code, r31mode); + suffix = "<15:0>"; + padding_chars -= strlen(suffix); + break; + case 1: + name = WRegNameForCode(code, r31mode); + suffix = "<7:0>"; + padding_chars -= strlen(suffix); + break; + default: + VIXL_UNREACHABLE(); + } + fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix); + + // Print leading padding spaces. + VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2)); + for (unsigned i = 0; i < padding_chars; i++) { + putc(' ', stream_); + } + + // Print the specified bits in hexadecimal format. + uint64_t bits = reg<uint64_t>(code, r31mode); + bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8); + VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes); + + int chars = size_in_bytes * 2; + fprintf(stream_, "%s0x%0*" PRIx64 "%s", + clr_reg_value, chars, bits, clr_normal); +} + + +void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { + registers_[code].NotifyRegisterLogged(); + + // Don't print writes into xzr. + if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + // The template for all x and w registers: + // "# x{code}: 0x{value}" + // "# w{code}: 0x{value}" + + PrintRegisterRawHelper(code, r31mode); + fprintf(stream_, "\n"); +} + + +// Print a register's name and raw value. +// +// The `bytes` and `lsb` arguments can be used to limit the bytes that are +// printed. These arguments are intended for use in cases where register hasn't +// actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a floating-point interpretation or a memory access annotation). +void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) { + // The template for vector types: + // "# v{code}: 0xffeeddccbbaa99887766554433221100". + // An example with bytes=4 and lsb=8: + // "# v{code}: 0xbbaa9988 ". + fprintf(stream_, "# %s%5s: %s", + clr_vreg_name, VRegNameForCode(code), clr_vreg_value); + + int msb = lsb + bytes - 1; + int byte = kQRegSizeInBytes - 1; + + // Print leading padding spaces. (Two spaces per byte.) + while (byte > msb) { + fprintf(stream_, " "); + byte--; + } + + // Print the specified part of the value, byte by byte. + qreg_t rawbits = qreg(code); + fprintf(stream_, "0x"); + while (byte >= lsb) { + fprintf(stream_, "%02x", rawbits.val[byte]); + byte--; + } + + // Print trailing padding spaces. + while (byte >= 0) { + fprintf(stream_, " "); + byte--; + } + fprintf(stream_, "%s", clr_normal); +} + + +// Print each of the specified lanes of a register as a float or double value. +// +// The `lane_count` and `lslane` arguments can be used to limit the lanes that +// are printed. These arguments are intended for use in cases where register +// hasn't actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count, + int rightmost_lane) { + VIXL_ASSERT((lane_size_in_bytes == kSRegSizeInBytes) || + (lane_size_in_bytes == kDRegSizeInBytes)); + + unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes); + VIXL_ASSERT(msb <= kQRegSizeInBytes); + + // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register + // name is used: + // " (s{code}: {value})" + // " (d{code}: {value})" + // For vector types, "..." is used to represent one or more omitted lanes. + // " (..., {value}, {value}, ...)" + if ((lane_count == 1) && (rightmost_lane == 0)) { + const char * name = + (lane_size_in_bytes == kSRegSizeInBytes) ? SRegNameForCode(code) + : DRegNameForCode(code); + fprintf(stream_, " (%s%s: ", clr_vreg_name, name); + } else { + if (msb < (kQRegSizeInBytes - 1)) { + fprintf(stream_, " (..., "); + } else { + fprintf(stream_, " ("); + } + } + + // Print the list of values. + const char * separator = ""; + int leftmost_lane = rightmost_lane + lane_count - 1; + for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) { + double value = + (lane_size_in_bytes == kSRegSizeInBytes) ? vreg(code).Get<float>(lane) + : vreg(code).Get<double>(lane); + fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal); + separator = ", "; + } + + if (rightmost_lane > 0) { + fprintf(stream_, ", ..."); + } + fprintf(stream_, ")"); +} + + +void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) { + vregisters_[code].NotifyRegisterLogged(); + + int lane_size_log2 = format & kPrintRegLaneSizeMask; + + int reg_size_log2; + if (format & kPrintRegAsQVector) { + reg_size_log2 = kQRegSizeInBytesLog2; + } else if (format & kPrintRegAsDVector) { + reg_size_log2 = kDRegSizeInBytesLog2; + } else { + // Scalar types. + reg_size_log2 = lane_size_log2; + } + + int lane_count = 1 << (reg_size_log2 - lane_size_log2); + int lane_size = 1 << lane_size_log2; + + // The template for vector types: + // "# v{code}: 0x{rawbits} (..., {value}, ...)". + // The template for scalar types: + // "# v{code}: 0x{rawbits} ({reg}:{value})". + // The values in parentheses after the bit representations are floating-point + // interpretations. They are displayed only if the kPrintVRegAsFP bit is set. + + PrintVRegisterRawHelper(code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(code, lane_size, lane_count); + } + + fprintf(stream_, "\n"); +} + + +void Simulator::PrintSystemRegister(SystemRegister id) { + switch (id) { + case NZCV: + fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", + clr_flag_name, clr_flag_value, + nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(), + clr_normal); + break; + case FPCR: { + static const char * rmode[] = { + "0b00 (Round to Nearest)", + "0b01 (Round towards Plus Infinity)", + "0b10 (Round towards Minus Infinity)", + "0b11 (Round towards Zero)" + }; + VIXL_ASSERT(fpcr().RMode() < (sizeof(rmode) / sizeof(rmode[0]))); + fprintf(stream_, + "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", + clr_flag_name, clr_flag_value, + fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()], + clr_normal); + break; + } + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::PrintRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + registers_[reg_code].NotifyRegisterLogged(); + + USE(format); + + // The template is "# {reg}: 0x{value} <- {address}". + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); + fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, address, clr_normal); +} + + +void Simulator::PrintVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + vregisters_[reg_code].NotifyRegisterLogged(); + + // The template is "# v{code}: 0x{rawbits} <- address". + PrintVRegisterRawHelper(reg_code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format), + GetPrintRegLaneCount(format), lane); + } + fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, address, clr_normal); +} + + +void Simulator::PrintWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + VIXL_ASSERT(GetPrintRegLaneCount(format) == 1); + + // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy + // and readable, the value is aligned with the values in the register trace. + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister, + GetPrintRegSizeInBytes(format)); + fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, address, clr_normal); +} + + +void Simulator::PrintVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + // The templates: + // "# v{code}: 0x{rawbits} -> {address}" + // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". + // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" + // Because this trace doesn't represent a change to the source register's + // value, only the relevant part of the value is printed. To keep the trace + // tidy and readable, the raw value is aligned with the other values in the + // register trace. + int lane_count = GetPrintRegLaneCount(format); + int lane_size = GetPrintRegLaneSizeInBytes(format); + int reg_size = GetPrintRegSizeInBytes(format); + PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); + } + fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, address, clr_normal); +} + + +// Visitors--------------------------------------------------------------------- + +void Simulator::VisitUnimplemented(const Instruction* instr) { + printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast<const void*>(instr), instr->InstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitUnallocated(const Instruction* instr) { + printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast<const void*>(instr), instr->InstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitPCRelAddressing(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) || + (instr->Mask(PCRelAddressingMask) == ADRP)); + + set_reg(instr->Rd(), instr->ImmPCOffsetTarget()); +} + + +void Simulator::VisitUnconditionalBranch(const Instruction* instr) { + switch (instr->Mask(UnconditionalBranchMask)) { + case BL: + set_lr(instr->NextInstruction()); + VIXL_FALLTHROUGH(); + case B: + set_pc(instr->ImmPCOffsetTarget()); + break; + default: VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitConditionalBranch(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); + if (ConditionPassed(instr->ConditionBranch())) { + set_pc(instr->ImmPCOffsetTarget()); + } +} + + +void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) { + const Instruction* target = Instruction::Cast(xreg(instr->Rn())); + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BLR: + set_lr(instr->NextInstruction()); + VIXL_FALLTHROUGH(); + case BR: + case RET: set_pc(target); break; + default: VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitTestBranch(const Instruction* instr) { + unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) | + instr->ImmTestBranchBit40(); + bool bit_zero = ((xreg(instr->Rt()) >> bit_pos) & 1) == 0; + bool take_branch = false; + switch (instr->Mask(TestBranchMask)) { + case TBZ: take_branch = bit_zero; break; + case TBNZ: take_branch = !bit_zero; break; + default: VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + set_pc(instr->ImmPCOffsetTarget()); + } +} + + +void Simulator::VisitCompareBranch(const Instruction* instr) { + unsigned rt = instr->Rt(); + bool take_branch = false; + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: take_branch = (wreg(rt) == 0); break; + case CBZ_x: take_branch = (xreg(rt) == 0); break; + case CBNZ_w: take_branch = (wreg(rt) != 0); break; + case CBNZ_x: take_branch = (xreg(rt) != 0); break; + default: VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + set_pc(instr->ImmPCOffsetTarget()); + } +} + + +void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + bool set_flags = instr->FlagsUpdate(); + int64_t new_val = 0; + Instr operation = instr->Mask(AddSubOpMask); + + switch (operation) { + case ADD: + case ADDS: { + new_val = AddWithCarry(reg_size, + set_flags, + reg(reg_size, instr->Rn(), instr->RnMode()), + op2); + break; + } + case SUB: + case SUBS: { + new_val = AddWithCarry(reg_size, + set_flags, + reg(reg_size, instr->Rn(), instr->RnMode()), + ~op2, + 1); + break; + } + default: VIXL_UNREACHABLE(); + } + + set_reg(reg_size, instr->Rd(), new_val, LogRegWrites, instr->RdMode()); +} + + +void Simulator::VisitAddSubShifted(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ShiftOperand(reg_size, + reg(reg_size, instr->Rm()), + static_cast<Shift>(instr->ShiftDP()), + instr->ImmDPShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubImmediate(const Instruction* instr) { + int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubExtended(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ExtendValue(reg_size, + reg(reg_size, instr->Rm()), + static_cast<Extend>(instr->ExtendMode()), + instr->ImmExtendShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubWithCarry(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = reg(reg_size, instr->Rm()); + int64_t new_val; + + if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) { + op2 = ~op2; + } + + new_val = AddWithCarry(reg_size, + instr->FlagsUpdate(), + reg(reg_size, instr->Rn()), + op2, + C()); + + set_reg(reg_size, instr->Rd(), new_val); +} + + +void Simulator::VisitLogicalShifted(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + Shift shift_type = static_cast<Shift>(instr->ShiftDP()); + unsigned shift_amount = instr->ImmDPShift(); + int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type, + shift_amount); + if (instr->Mask(NOT) == NOT) { + op2 = ~op2; + } + LogicalHelper(instr, op2); +} + + +void Simulator::VisitLogicalImmediate(const Instruction* instr) { + LogicalHelper(instr, instr->ImmLogical()); +} + + +void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = reg(reg_size, instr->Rn()); + int64_t result = 0; + bool update_flags = false; + + // Switch on the logical operation, stripping out the NOT bit, as it has a + // different meaning for logical immediate instructions. + switch (instr->Mask(LogicalOpMask & ~NOT)) { + case ANDS: update_flags = true; VIXL_FALLTHROUGH(); + case AND: result = op1 & op2; break; + case ORR: result = op1 | op2; break; + case EOR: result = op1 ^ op2; break; + default: + VIXL_UNIMPLEMENTED(); + } + + if (update_flags) { + nzcv().SetN(CalcNFlag(result, reg_size)); + nzcv().SetZ(CalcZFlag(result)); + nzcv().SetC(0); + nzcv().SetV(0); + LogSystemRegister(NZCV); + } + + set_reg(reg_size, instr->Rd(), result, LogRegWrites, instr->RdMode()); +} + + +void Simulator::VisitConditionalCompareRegister(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + ConditionalCompareHelper(instr, reg(reg_size, instr->Rm())); +} + + +void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) { + ConditionalCompareHelper(instr, instr->ImmCondCmp()); +} + + +void Simulator::ConditionalCompareHelper(const Instruction* instr, + int64_t op2) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = reg(reg_size, instr->Rn()); + + if (ConditionPassed(instr->Condition())) { + // If the condition passes, set the status flags to the result of comparing + // the operands. + if (instr->Mask(ConditionalCompareMask) == CCMP) { + AddWithCarry(reg_size, true, op1, ~op2, 1); + } else { + VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); + AddWithCarry(reg_size, true, op1, op2, 0); + } + } else { + // If the condition fails, set the status flags to the nzcv immediate. + nzcv().SetFlags(instr->Nzcv()); + LogSystemRegister(NZCV); + } +} + + +void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + int offset = instr->ImmLSUnsigned() << instr->SizeLS(); + LoadStoreHelper(instr, offset, Offset); +} + + +void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + LoadStoreHelper(instr, instr->ImmLS(), Offset); +} + + +void Simulator::VisitLoadStorePreIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->ImmLS(), PreIndex); +} + + +void Simulator::VisitLoadStorePostIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->ImmLS(), PostIndex); +} + + +void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Extend ext = static_cast<Extend>(instr->ExtendMode()); + VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); + unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); + + int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext, + shift_amount); + LoadStoreHelper(instr, offset, Offset); +} + + + +void Simulator::LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode) { + unsigned srcdst = instr->Rt(); + uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode); + + LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + set_wreg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break; + case LDRH_w: + set_wreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break; + case LDR_w: + set_wreg(srcdst, Memory::Read<uint32_t>(address), NoRegLog); break; + case LDR_x: + set_xreg(srcdst, Memory::Read<uint64_t>(address), NoRegLog); break; + case LDRSB_w: + set_wreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break; + case LDRSH_w: + set_wreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break; + case LDRSB_x: + set_xreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break; + case LDRSH_x: + set_xreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break; + case LDRSW_x: + set_xreg(srcdst, Memory::Read<int32_t>(address), NoRegLog); break; + case LDR_b: + set_breg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break; + case LDR_h: + set_hreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break; + case LDR_s: + set_sreg(srcdst, Memory::Read<float>(address), NoRegLog); break; + case LDR_d: + set_dreg(srcdst, Memory::Read<double>(address), NoRegLog); break; + case LDR_q: + set_qreg(srcdst, Memory::Read<qreg_t>(address), NoRegLog); break; + + case STRB_w: Memory::Write<uint8_t>(address, wreg(srcdst)); break; + case STRH_w: Memory::Write<uint16_t>(address, wreg(srcdst)); break; + case STR_w: Memory::Write<uint32_t>(address, wreg(srcdst)); break; + case STR_x: Memory::Write<uint64_t>(address, xreg(srcdst)); break; + case STR_b: Memory::Write<uint8_t>(address, breg(srcdst)); break; + case STR_h: Memory::Write<uint16_t>(address, hreg(srcdst)); break; + case STR_s: Memory::Write<float>(address, sreg(srcdst)); break; + case STR_d: Memory::Write<double>(address, dreg(srcdst)); break; + case STR_q: Memory::Write<qreg_t>(address, qreg(srcdst)); break; + + // Ignore prfm hint instructions. + case PRFM: break; + + default: VIXL_UNIMPLEMENTED(); + } + + unsigned access_size = 1 << instr->SizeLS(); + if (instr->IsLoad()) { + if ((op == LDR_s) || (op == LDR_d)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } else { + if ((op == STR_s) || (op == STR_d)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } + + local_monitor_.MaybeClear(); +} + + +void Simulator::VisitLoadStorePairOffset(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PreIndex); +} + + +void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PostIndex); +} + + +void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::LoadStorePairHelper(const Instruction* instr, + AddrMode addrmode) { + unsigned rt = instr->Rt(); + unsigned rt2 = instr->Rt2(); + int element_size = 1 << instr->SizeLSPair(); + int64_t offset = instr->ImmLSPair() * element_size; + uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode); + uintptr_t address2 = address + element_size; + + LoadStorePairOp op = + static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); + + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); + + switch (op) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We + // will print a more detailed log. + case LDP_w: { + set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog); + set_wreg(rt2, Memory::Read<uint32_t>(address2), NoRegLog); + break; + } + case LDP_s: { + set_sreg(rt, Memory::Read<float>(address), NoRegLog); + set_sreg(rt2, Memory::Read<float>(address2), NoRegLog); + break; + } + case LDP_x: { + set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog); + set_xreg(rt2, Memory::Read<uint64_t>(address2), NoRegLog); + break; + } + case LDP_d: { + set_dreg(rt, Memory::Read<double>(address), NoRegLog); + set_dreg(rt2, Memory::Read<double>(address2), NoRegLog); + break; + } + case LDP_q: { + set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog); + set_qreg(rt2, Memory::Read<qreg_t>(address2), NoRegLog); + break; + } + case LDPSW_x: { + set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog); + set_xreg(rt2, Memory::Read<int32_t>(address2), NoRegLog); + break; + } + case STP_w: { + Memory::Write<uint32_t>(address, wreg(rt)); + Memory::Write<uint32_t>(address2, wreg(rt2)); + break; + } + case STP_s: { + Memory::Write<float>(address, sreg(rt)); + Memory::Write<float>(address2, sreg(rt2)); + break; + } + case STP_x: { + Memory::Write<uint64_t>(address, xreg(rt)); + Memory::Write<uint64_t>(address2, xreg(rt2)); + break; + } + case STP_d: { + Memory::Write<double>(address, dreg(rt)); + Memory::Write<double>(address2, dreg(rt2)); + break; + } + case STP_q: { + Memory::Write<qreg_t>(address, qreg(rt)); + Memory::Write<qreg_t>(address2, qreg(rt2)); + break; + } + default: VIXL_UNREACHABLE(); + } + + // Print a detailed trace (including the memory address) instead of the basic + // register:value trace generated by set_*reg(). + if (instr->IsLoad()) { + if ((op == LDP_s) || (op == LDP_d)) { + LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == LDP_q) { + LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } else { + if ((op == STP_s) || (op == STP_d)) { + LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == STP_q) { + LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } + + local_monitor_.MaybeClear(); +} + + +void Simulator::PrintExclusiveAccessWarning() { + if (print_exclusive_access_warning_) { + fprintf( + stderr, + "%sWARNING:%s VIXL simulator support for load-/store-/clear-exclusive " + "instructions is limited. Refer to the README for details.%s\n", + clr_warning, clr_warning_message, clr_normal); + print_exclusive_access_warning_ = false; + } +} + + +void Simulator::VisitLoadStoreExclusive(const Instruction* instr) { + PrintExclusiveAccessWarning(); + + unsigned rs = instr->Rs(); + unsigned rt = instr->Rt(); + unsigned rt2 = instr->Rt2(); + unsigned rn = instr->Rn(); + + LoadStoreExclusive op = + static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask)); + + bool is_acquire_release = instr->LdStXAcquireRelease(); + bool is_exclusive = !instr->LdStXNotExclusive(); + bool is_load = instr->LdStXLoad(); + bool is_pair = instr->LdStXPair(); + + unsigned element_size = 1 << instr->LdStXSizeLog2(); + unsigned access_size = is_pair ? element_size * 2 : element_size; + uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer); + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast<uintptr_t>(address)); + + // Check the alignment of `address`. + if (AlignDown(address, access_size) != address) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + // The sp must be aligned to 16 bytes when it is accessed. + if ((rn == 31) && (AlignDown(address, 16) != address)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + if (is_load) { + if (is_exclusive) { + local_monitor_.MarkExclusive(address, access_size); + } else { + // Any non-exclusive load can clear the local monitor as a side effect. We + // don't need to do this, but it is useful to stress the simulated code. + local_monitor_.Clear(); + } + + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We + // will print a more detailed log. + switch (op) { + case LDXRB_w: + case LDAXRB_w: + case LDARB_w: + set_wreg(rt, Memory::Read<uint8_t>(address), NoRegLog); + break; + case LDXRH_w: + case LDAXRH_w: + case LDARH_w: + set_wreg(rt, Memory::Read<uint16_t>(address), NoRegLog); + break; + case LDXR_w: + case LDAXR_w: + case LDAR_w: + set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog); + break; + case LDXR_x: + case LDAXR_x: + case LDAR_x: + set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog); + break; + case LDXP_w: + case LDAXP_w: + set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog); + set_wreg(rt2, Memory::Read<uint32_t>(address + element_size), NoRegLog); + break; + case LDXP_x: + case LDAXP_x: + set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog); + set_xreg(rt2, Memory::Read<uint64_t>(address + element_size), NoRegLog); + break; + default: + VIXL_UNREACHABLE(); + } + + if (is_acquire_release) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogRead(address + element_size, rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } else { + if (is_acquire_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + + bool do_store = true; + if (is_exclusive) { + do_store = local_monitor_.IsExclusive(address, access_size) && + global_monitor_.IsExclusive(address, access_size); + set_wreg(rs, do_store ? 0 : 1); + + // - All exclusive stores explicitly clear the local monitor. + local_monitor_.Clear(); + } else { + // - Any other store can clear the local monitor as a side effect. + local_monitor_.MaybeClear(); + } + + if (do_store) { + switch (op) { + case STXRB_w: + case STLXRB_w: + case STLRB_w: + Memory::Write<uint8_t>(address, wreg(rt)); + break; + case STXRH_w: + case STLXRH_w: + case STLRH_w: + Memory::Write<uint16_t>(address, wreg(rt)); + break; + case STXR_w: + case STLXR_w: + case STLR_w: + Memory::Write<uint32_t>(address, wreg(rt)); + break; + case STXR_x: + case STLXR_x: + case STLR_x: + Memory::Write<uint64_t>(address, xreg(rt)); + break; + case STXP_w: + case STLXP_w: + Memory::Write<uint32_t>(address, wreg(rt)); + Memory::Write<uint32_t>(address + element_size, wreg(rt2)); + break; + case STXP_x: + case STLXP_x: + Memory::Write<uint64_t>(address, xreg(rt)); + Memory::Write<uint64_t>(address + element_size, xreg(rt2)); + break; + default: + VIXL_UNREACHABLE(); + } + + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogWrite(address + element_size, rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } + } +} + + +void Simulator::VisitLoadLiteral(const Instruction* instr) { + unsigned rt = instr->Rt(); + uint64_t address = instr->LiteralAddress<uint64_t>(); + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast<uintptr_t>(address)); + + switch (instr->Mask(LoadLiteralMask)) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then + // print a more detailed log. + case LDR_w_lit: + set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + case LDR_x_lit: + set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog); + LogRead(address, rt, kPrintXReg); + break; + case LDR_s_lit: + set_sreg(rt, Memory::Read<float>(address), NoRegLog); + LogVRead(address, rt, kPrintSReg); + break; + case LDR_d_lit: + set_dreg(rt, Memory::Read<double>(address), NoRegLog); + LogVRead(address, rt, kPrintDReg); + break; + case LDR_q_lit: + set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog); + LogVRead(address, rt, kPrintReg1Q); + break; + case LDRSW_x_lit: + set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + + // Ignore prfm hint instructions. + case PRFM_lit: break; + + default: VIXL_UNREACHABLE(); + } + + local_monitor_.MaybeClear(); +} + + +uintptr_t Simulator::AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode) { + uint64_t address = xreg(addr_reg, Reg31IsStackPointer); + + if ((addr_reg == 31) && ((address % 16) != 0)) { + // When the base register is SP the stack pointer is required to be + // quadword aligned prior to the address calculation and write-backs. + // Misalignment will cause a stack alignment fault. + VIXL_ALIGNMENT_EXCEPTION(); + } + + if ((addrmode == PreIndex) || (addrmode == PostIndex)) { + VIXL_ASSERT(offset != 0); + // Only preindex should log the register update here. For Postindex, the + // update will be printed automatically by LogWrittenRegisters _after_ the + // memory access itself is logged. + RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog; + set_xreg(addr_reg, address + offset, log_mode, Reg31IsStackPointer); + } + + if ((addrmode == Offset) || (addrmode == PreIndex)) { + address += offset; + } + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast<uintptr_t>(address)); + + return static_cast<uintptr_t>(address); +} + + +void Simulator::VisitMoveWideImmediate(const Instruction* instr) { + MoveWideImmediateOp mov_op = + static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask)); + int64_t new_xn_val = 0; + + bool is_64_bits = instr->SixtyFourBits() == 1; + // Shift is limited for W operations. + VIXL_ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2)); + + // Get the shifted immediate. + int64_t shift = instr->ShiftMoveWide() * 16; + int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift; + + // Compute the new value. + switch (mov_op) { + case MOVN_w: + case MOVN_x: { + new_xn_val = ~shifted_imm16; + if (!is_64_bits) new_xn_val &= kWRegMask; + break; + } + case MOVK_w: + case MOVK_x: { + unsigned reg_code = instr->Rd(); + int64_t prev_xn_val = is_64_bits ? xreg(reg_code) + : wreg(reg_code); + new_xn_val = + (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16; + break; + } + case MOVZ_w: + case MOVZ_x: { + new_xn_val = shifted_imm16; + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Update the destination register. + set_xreg(instr->Rd(), new_xn_val); +} + + +void Simulator::VisitConditionalSelect(const Instruction* instr) { + uint64_t new_val = xreg(instr->Rn()); + + if (ConditionFailed(static_cast<Condition>(instr->Condition()))) { + new_val = xreg(instr->Rm()); + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: break; + case CSINC_w: + case CSINC_x: new_val++; break; + case CSINV_w: + case CSINV_x: new_val = ~new_val; break; + case CSNEG_w: + case CSNEG_x: new_val = -new_val; break; + default: VIXL_UNIMPLEMENTED(); + } + } + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + set_reg(reg_size, instr->Rd(), new_val); +} + + +void Simulator::VisitDataProcessing1Source(const Instruction* instr) { + unsigned dst = instr->Rd(); + unsigned src = instr->Rn(); + + switch (instr->Mask(DataProcessing1SourceMask)) { + case RBIT_w: set_wreg(dst, ReverseBits(wreg(src))); break; + case RBIT_x: set_xreg(dst, ReverseBits(xreg(src))); break; + case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), 1)); break; + case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), 1)); break; + case REV_w: set_wreg(dst, ReverseBytes(wreg(src), 2)); break; + case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), 2)); break; + case REV_x: set_xreg(dst, ReverseBytes(xreg(src), 3)); break; + case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src))); break; + case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src))); break; + case CLS_w: { + set_wreg(dst, CountLeadingSignBits(wreg(src))); + break; + } + case CLS_x: { + set_xreg(dst, CountLeadingSignBits(xreg(src))); + break; + } + default: VIXL_UNIMPLEMENTED(); + } +} + + +uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) { + VIXL_ASSERT((n > 32) && (n <= 64)); + for (unsigned i = (n - 1); i >= 32; i--) { + if (((data >> i) & 1) != 0) { + uint64_t polysh32 = (uint64_t)poly << (i - 32); + uint64_t mask = (UINT64_C(1) << i) - 1; + data = ((data & mask) ^ polysh32); + } + } + return data & 0xffffffff; +} + + +template <typename T> +uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) { + unsigned size = sizeof(val) * 8; // Number of bits in type T. + VIXL_ASSERT((size == 8) || (size == 16) || (size == 32)); + uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size; + uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32; + return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly)); +} + + +uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) { + // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute + // the CRC of each 32-bit word sequentially. + acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly); + return Crc32Checksum(acc, (uint32_t)(val >> 32), poly); +} + + +void Simulator::VisitDataProcessing2Source(const Instruction* instr) { + Shift shift_op = NO_SHIFT; + int64_t result = 0; + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + + switch (instr->Mask(DataProcessing2SourceMask)) { + case SDIV_w: { + int32_t rn = wreg(instr->Rn()); + int32_t rm = wreg(instr->Rm()); + if ((rn == kWMinInt) && (rm == -1)) { + result = kWMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case SDIV_x: { + int64_t rn = xreg(instr->Rn()); + int64_t rm = xreg(instr->Rm()); + if ((rn == kXMinInt) && (rm == -1)) { + result = kXMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_w: { + uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn())); + uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_x: { + uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn())); + uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case LSLV_w: + case LSLV_x: shift_op = LSL; break; + case LSRV_w: + case LSRV_x: shift_op = LSR; break; + case ASRV_w: + case ASRV_x: shift_op = ASR; break; + case RORV_w: + case RORV_x: shift_op = ROR; break; + case CRC32B: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint8_t val = reg<uint8_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32H: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint16_t val = reg<uint16_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32W: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint32_t val = reg<uint32_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32X: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint64_t val = reg<uint64_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + reg_size = kWRegSize; + break; + } + case CRC32CB: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint8_t val = reg<uint8_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CH: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint16_t val = reg<uint16_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CW: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint32_t val = reg<uint32_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CX: { + uint32_t acc = reg<uint32_t>(instr->Rn()); + uint64_t val = reg<uint64_t>(instr->Rm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + reg_size = kWRegSize; + break; + } + default: VIXL_UNIMPLEMENTED(); + } + + if (shift_op != NO_SHIFT) { + // Shift distance encoded in the least-significant five/six bits of the + // register. + int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f; + unsigned shift = wreg(instr->Rm()) & mask; + result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op, + shift); + } + set_reg(reg_size, instr->Rd(), result); +} + + +// The algorithm used is adapted from the one described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +// It assumes that a right shift on a signed integer is an arithmetic shift. +// Type T must be either uint64_t or int64_t. +template <typename T> +static T MultiplyHigh(T u, T v) { + uint64_t u0, v0, w0; + T u1, v1, w1, w2, t; + + VIXL_ASSERT(sizeof(u) == sizeof(u0)); + + u0 = u & 0xffffffff; + u1 = u >> 32; + v0 = v & 0xffffffff; + v1 = v >> 32; + + w0 = u0 * v0; + t = u1 * v0 + (w0 >> 32); + w1 = t & 0xffffffff; + w2 = t >> 32; + w1 = u0 * v1 + w1; + + return u1 * v1 + w2 + (w1 >> 32); +} + + +void Simulator::VisitDataProcessing3Source(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + + int64_t result = 0; + // Extract and sign- or zero-extend 32-bit arguments for widening operations. + uint64_t rn_u32 = reg<uint32_t>(instr->Rn()); + uint64_t rm_u32 = reg<uint32_t>(instr->Rm()); + int64_t rn_s32 = reg<int32_t>(instr->Rn()); + int64_t rm_s32 = reg<int32_t>(instr->Rm()); + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: + result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm())); + break; + case MSUB_w: + case MSUB_x: + result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm())); + break; + case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break; + case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break; + case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break; + case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break; + case UMULH_x: + result = MultiplyHigh(reg<uint64_t>(instr->Rn()), + reg<uint64_t>(instr->Rm())); + break; + case SMULH_x: + result = MultiplyHigh(xreg(instr->Rn()), xreg(instr->Rm())); + break; + default: VIXL_UNIMPLEMENTED(); + } + set_reg(reg_size, instr->Rd(), result); +} + + +void Simulator::VisitBitfield(const Instruction* instr) { + unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask; + int64_t R = instr->ImmR(); + int64_t S = instr->ImmS(); + int64_t diff = S - R; + int64_t mask; + if (diff >= 0) { + mask = (diff < (reg_size - 1)) ? (INT64_C(1) << (diff + 1)) - 1 + : reg_mask; + } else { + mask = (INT64_C(1) << (S + 1)) - 1; + mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R)); + diff += reg_size; + } + + // inzero indicates if the extracted bitfield is inserted into the + // destination register value or in zero. + // If extend is true, extend the sign of the extracted bitfield. + bool inzero = false; + bool extend = false; + switch (instr->Mask(BitfieldMask)) { + case BFM_x: + case BFM_w: + break; + case SBFM_x: + case SBFM_w: + inzero = true; + extend = true; + break; + case UBFM_x: + case UBFM_w: + inzero = true; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd()); + int64_t src = reg(reg_size, instr->Rn()); + // Rotate source bitfield into place. + int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R)); + // Determine the sign extension. + int64_t topbits = ((INT64_C(1) << (reg_size - diff - 1)) - 1) << (diff + 1); + int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0; + + // Merge sign extension, dest/zero and bitfield. + result = signbits | (result & mask) | (dst & ~mask); + + set_reg(reg_size, instr->Rd(), result); +} + + +void Simulator::VisitExtract(const Instruction* instr) { + unsigned lsb = instr->ImmS(); + unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize + : kWRegSize; + uint64_t low_res = static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb; + uint64_t high_res = + (lsb == 0) ? 0 : reg(reg_size, instr->Rn()) << (reg_size - lsb); + set_reg(reg_size, instr->Rd(), low_res | high_res); +} + + +void Simulator::VisitFPImmediate(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dest = instr->Rd(); + switch (instr->Mask(FPImmediateMask)) { + case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break; + case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break; + default: VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPIntegerConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->Rd(); + unsigned src = instr->Rn(); + + FPRounding round = RMode(); + + switch (instr->Mask(FPIntegerConvertMask)) { + case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break; + case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break; + case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break; + case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break; + case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break; + case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break; + case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break; + case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break; + case FCVTMS_ws: + set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity)); + break; + case FCVTMS_xs: + set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity)); + break; + case FCVTMS_wd: + set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity)); + break; + case FCVTMS_xd: + set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity)); + break; + case FCVTMU_ws: + set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity)); + break; + case FCVTMU_xs: + set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity)); + break; + case FCVTMU_wd: + set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity)); + break; + case FCVTMU_xd: + set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity)); + break; + case FCVTPS_ws: + set_wreg(dst, FPToInt32(sreg(src), FPPositiveInfinity)); + break; + case FCVTPS_xs: + set_xreg(dst, FPToInt64(sreg(src), FPPositiveInfinity)); + break; + case FCVTPS_wd: + set_wreg(dst, FPToInt32(dreg(src), FPPositiveInfinity)); + break; + case FCVTPS_xd: + set_xreg(dst, FPToInt64(dreg(src), FPPositiveInfinity)); + break; + case FCVTPU_ws: + set_wreg(dst, FPToUInt32(sreg(src), FPPositiveInfinity)); + break; + case FCVTPU_xs: + set_xreg(dst, FPToUInt64(sreg(src), FPPositiveInfinity)); + break; + case FCVTPU_wd: + set_wreg(dst, FPToUInt32(dreg(src), FPPositiveInfinity)); + break; + case FCVTPU_xd: + set_xreg(dst, FPToUInt64(dreg(src), FPPositiveInfinity)); + break; + case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break; + case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break; + case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break; + case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break; + case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break; + case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break; + case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break; + case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break; + case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break; + case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break; + case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break; + case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break; + case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break; + case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break; + case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break; + case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break; + case FMOV_ws: set_wreg(dst, sreg_bits(src)); break; + case FMOV_xd: set_xreg(dst, dreg_bits(src)); break; + case FMOV_sw: set_sreg_bits(dst, wreg(src)); break; + case FMOV_dx: set_dreg_bits(dst, xreg(src)); break; + case FMOV_d1_x: + LogicVRegister(vreg(dst)).SetUint(kFormatD, 1, xreg(src)); + break; + case FMOV_x_d1: + set_xreg(dst, LogicVRegister(vreg(src)).Uint(kFormatD, 1)); + break; + + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break; + case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break; + case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break; + case UCVTF_dw: { + set_dreg(dst, UFixedToDouble(static_cast<uint32_t>(wreg(src)), 0, round)); + break; + } + case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break; + case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break; + case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break; + case UCVTF_sw: { + set_sreg(dst, UFixedToFloat(static_cast<uint32_t>(wreg(src)), 0, round)); + break; + } + + default: VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPFixedPointConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->Rd(); + unsigned src = instr->Rn(); + int fbits = 64 - instr->FPScale(); + + FPRounding round = RMode(); + + switch (instr->Mask(FPFixedPointConvertMask)) { + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx_fixed: + set_dreg(dst, FixedToDouble(xreg(src), fbits, round)); + break; + case SCVTF_dw_fixed: + set_dreg(dst, FixedToDouble(wreg(src), fbits, round)); + break; + case UCVTF_dx_fixed: + set_dreg(dst, UFixedToDouble(xreg(src), fbits, round)); + break; + case UCVTF_dw_fixed: { + set_dreg(dst, + UFixedToDouble(static_cast<uint32_t>(wreg(src)), fbits, round)); + break; + } + case SCVTF_sx_fixed: + set_sreg(dst, FixedToFloat(xreg(src), fbits, round)); + break; + case SCVTF_sw_fixed: + set_sreg(dst, FixedToFloat(wreg(src), fbits, round)); + break; + case UCVTF_sx_fixed: + set_sreg(dst, UFixedToFloat(xreg(src), fbits, round)); + break; + case UCVTF_sw_fixed: { + set_sreg(dst, + UFixedToFloat(static_cast<uint32_t>(wreg(src)), fbits, round)); + break; + } + case FCVTZS_xd_fixed: + set_xreg(dst, FPToInt64(dreg(src) * std::pow(2.0, fbits), FPZero)); + break; + case FCVTZS_wd_fixed: + set_wreg(dst, FPToInt32(dreg(src) * std::pow(2.0, fbits), FPZero)); + break; + case FCVTZU_xd_fixed: + set_xreg(dst, FPToUInt64(dreg(src) * std::pow(2.0, fbits), FPZero)); + break; + case FCVTZU_wd_fixed: + set_wreg(dst, FPToUInt32(dreg(src) * std::pow(2.0, fbits), FPZero)); + break; + case FCVTZS_xs_fixed: + set_xreg(dst, FPToInt64(sreg(src) * std::pow(2.0f, fbits), FPZero)); + break; + case FCVTZS_ws_fixed: + set_wreg(dst, FPToInt32(sreg(src) * std::pow(2.0f, fbits), FPZero)); + break; + case FCVTZU_xs_fixed: + set_xreg(dst, FPToUInt64(sreg(src) * std::pow(2.0f, fbits), FPZero)); + break; + case FCVTZU_ws_fixed: + set_wreg(dst, FPToUInt32(sreg(src) * std::pow(2.0f, fbits), FPZero)); + break; + default: VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPCompareMask)) { + case FCMPE_s: trap = EnableTrap; VIXL_FALLTHROUGH(); + case FCMP_s: FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap); break; + case FCMPE_d: trap = EnableTrap; VIXL_FALLTHROUGH(); + case FCMP_d: FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap); break; + case FCMPE_s_zero: trap = EnableTrap; VIXL_FALLTHROUGH(); + case FCMP_s_zero: FPCompare(sreg(instr->Rn()), 0.0f, trap); break; + case FCMPE_d_zero: trap = EnableTrap; VIXL_FALLTHROUGH(); + case FCMP_d_zero: FPCompare(dreg(instr->Rn()), 0.0, trap); break; + default: VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMPE_s: trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_s: + if (ConditionPassed(instr->Condition())) { + FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap); + } else { + nzcv().SetFlags(instr->Nzcv()); + LogSystemRegister(NZCV); + } + break; + case FCCMPE_d: trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_d: + if (ConditionPassed(instr->Condition())) { + FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap); + } else { + nzcv().SetFlags(instr->Nzcv()); + LogSystemRegister(NZCV); + } + break; + default: VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalSelect(const Instruction* instr) { + AssertSupportedFPCR(); + + Instr selected; + if (ConditionPassed(instr->Condition())) { + selected = instr->Rn(); + } else { + selected = instr->Rm(); + } + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break; + case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break; + default: VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) { + AssertSupportedFPCR(); + + FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode()); + VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + bool inexact_exception = false; + + unsigned fd = instr->Rd(); + unsigned fn = instr->Rn(); + + switch (instr->Mask(FPDataProcessing1SourceMask)) { + case FMOV_s: set_sreg(fd, sreg(fn)); return; + case FMOV_d: set_dreg(fd, dreg(fn)); return; + case FABS_s: fabs_(kFormatS, vreg(fd), vreg(fn)); return; + case FABS_d: fabs_(kFormatD, vreg(fd), vreg(fn)); return; + case FNEG_s: fneg(kFormatS, vreg(fd), vreg(fn)); return; + case FNEG_d: fneg(kFormatD, vreg(fd), vreg(fn)); return; + case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); return; + case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); return; + case FCVT_hs: set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven)); return; + case FCVT_sh: set_sreg(fd, FPToFloat(hreg(fn))); return; + case FCVT_dh: set_dreg(fd, FPToDouble(FPToFloat(hreg(fn)))); return; + case FCVT_hd: set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven)); return; + case FSQRT_s: + case FSQRT_d: fsqrt(vform, rd, rn); return; + case FRINTI_s: + case FRINTI_d: break; // Use FPCR rounding mode. + case FRINTX_s: + case FRINTX_d: inexact_exception = true; break; + case FRINTA_s: + case FRINTA_d: fpcr_rounding = FPTieAway; break; + case FRINTM_s: + case FRINTM_d: fpcr_rounding = FPNegativeInfinity; break; + case FRINTN_s: + case FRINTN_d: fpcr_rounding = FPTieEven; break; + case FRINTP_s: + case FRINTP_d: fpcr_rounding = FPPositiveInfinity; break; + case FRINTZ_s: + case FRINTZ_d: fpcr_rounding = FPZero; break; + default: VIXL_UNIMPLEMENTED(); + } + + // Only FRINT* instructions fall through the switch above. + frint(vform, rd, rn, fpcr_rounding, inexact_exception); +} + + +void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) { + AssertSupportedFPCR(); + + VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FADD_s: + case FADD_d: fadd(vform, rd, rn, rm); break; + case FSUB_s: + case FSUB_d: fsub(vform, rd, rn, rm); break; + case FMUL_s: + case FMUL_d: fmul(vform, rd, rn, rm); break; + case FNMUL_s: + case FNMUL_d: fnmul(vform, rd, rn, rm); break; + case FDIV_s: + case FDIV_d: fdiv(vform, rd, rn, rm); break; + case FMAX_s: + case FMAX_d: fmax(vform, rd, rn, rm); break; + case FMIN_s: + case FMIN_d: fmin(vform, rd, rn, rm); break; + case FMAXNM_s: + case FMAXNM_d: fmaxnm(vform, rd, rn, rm); break; + case FMINNM_s: + case FMINNM_d: fminnm(vform, rd, rn, rm); break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned fd = instr->Rd(); + unsigned fn = instr->Rn(); + unsigned fm = instr->Rm(); + unsigned fa = instr->Ra(); + + switch (instr->Mask(FPDataProcessing3SourceMask)) { + // fd = fa +/- (fn * fm) + case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break; + case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break; + case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break; + case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break; + // Negated variants of the above. + case FNMADD_s: + set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm))); + break; + case FNMSUB_s: + set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm))); + break; + case FNMADD_d: + set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm))); + break; + case FNMSUB_d: + set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); + break; + default: VIXL_UNIMPLEMENTED(); + } +} + + +bool Simulator::FPProcessNaNs(const Instruction* instr) { + unsigned fd = instr->Rd(); + unsigned fn = instr->Rn(); + unsigned fm = instr->Rm(); + bool done = false; + + if (instr->Mask(FP64) == FP64) { + double result = FPProcessNaNs(dreg(fn), dreg(fm)); + if (std::isnan(result)) { + set_dreg(fd, result); + done = true; + } + } else { + float result = FPProcessNaNs(sreg(fn), sreg(fm)); + if (std::isnan(result)) { + set_sreg(fd, result); + done = true; + } + } + + return done; +} + + +void Simulator::SysOp_W(int op, int64_t val) { + switch (op) { + case IVAU: + case CVAC: + case CVAU: + case CIVAC: { + // Perform a dummy memory access to ensure that we have read access + // to the specified address. + volatile uint8_t y = Memory::Read<uint8_t>(val); + USE(y); + // TODO: Implement "case ZVA:". + break; + } + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitSystem(const Instruction* instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { + VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX); + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + PrintExclusiveAccessWarning(); + ClearLocalMonitor(); + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + switch (instr->ImmSystemRegister()) { + case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break; + case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break; + default: VIXL_UNIMPLEMENTED(); + } + break; + } + case MSR: { + switch (instr->ImmSystemRegister()) { + case NZCV: + nzcv().SetRawValue(wreg(instr->Rt())); + LogSystemRegister(NZCV); + break; + case FPCR: + fpcr().SetRawValue(wreg(instr->Rt())); + LogSystemRegister(FPCR); + break; + default: VIXL_UNIMPLEMENTED(); + } + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT); + switch (instr->ImmHint()) { + case NOP: break; + default: VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + __sync_synchronize(); + } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) { + switch (instr->Mask(SystemSysMask)) { + case SYS: SysOp_W(instr->SysOp(), xreg(instr->Rt())); break; + default: VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitCrypto2RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCrypto3RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCryptoAES(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitNEON2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + static const NEONFormatMap map_lp = { + {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D} + }; + VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp); + + static const NEONFormatMap map_fcvtl = { + {22}, {NF_4S, NF_2D} + }; + VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl); + + static const NEONFormatMap map_fcvtn = { + {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S} + }; + VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: rev64(vf, rd, rn); break; + case NEON_REV32: rev32(vf, rd, rn); break; + case NEON_REV16: rev16(vf, rd, rn); break; + case NEON_SUQADD: suqadd(vf, rd, rn); break; + case NEON_USQADD: usqadd(vf, rd, rn); break; + case NEON_CLS: cls(vf, rd, rn); break; + case NEON_CLZ: clz(vf, rd, rn); break; + case NEON_CNT: cnt(vf, rd, rn); break; + case NEON_SQABS: abs(vf, rd, rn).SignedSaturate(vf); break; + case NEON_SQNEG: neg(vf, rd, rn).SignedSaturate(vf); break; + case NEON_CMGT_zero: cmp(vf, rd, rn, 0, gt); break; + case NEON_CMGE_zero: cmp(vf, rd, rn, 0, ge); break; + case NEON_CMEQ_zero: cmp(vf, rd, rn, 0, eq); break; + case NEON_CMLE_zero: cmp(vf, rd, rn, 0, le); break; + case NEON_CMLT_zero: cmp(vf, rd, rn, 0, lt); break; + case NEON_ABS: abs(vf, rd, rn); break; + case NEON_NEG: neg(vf, rd, rn); break; + case NEON_SADDLP: saddlp(vf_lp, rd, rn); break; + case NEON_UADDLP: uaddlp(vf_lp, rd, rn); break; + case NEON_SADALP: sadalp(vf_lp, rd, rn); break; + case NEON_UADALP: uadalp(vf_lp, rd, rn); break; + case NEON_RBIT_NOT: + vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->FPType()) { + case 0: not_(vf, rd, rn); break; + case 1: rbit(vf, rd, rn);; break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap()); + FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode()); + bool inexact_exception = false; + + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: fabs_(fpf, rd, rn); return; + case NEON_FNEG: fneg(fpf, rd, rn); return; + case NEON_FSQRT: fsqrt(fpf, rd, rn); return; + case NEON_FCVTL: + if (instr->Mask(NEON_Q)) { + fcvtl2(vf_fcvtl, rd, rn); + } else { + fcvtl(vf_fcvtl, rd, rn); + } + return; + case NEON_FCVTN: + if (instr->Mask(NEON_Q)) { + fcvtn2(vf_fcvtn, rd, rn); + } else { + fcvtn(vf_fcvtn, rd, rn); + } + return; + case NEON_FCVTXN: + if (instr->Mask(NEON_Q)) { + fcvtxn2(vf_fcvtn, rd, rn); + } else { + fcvtxn(vf_fcvtn, rd, rn); + } + return; + + // The following instructions break from the switch statement, rather + // than return. + case NEON_FRINTI: break; // Use FPCR rounding mode. + case NEON_FRINTX: inexact_exception = true; break; + case NEON_FRINTA: fpcr_rounding = FPTieAway; break; + case NEON_FRINTM: fpcr_rounding = FPNegativeInfinity; break; + case NEON_FRINTN: fpcr_rounding = FPTieEven; break; + case NEON_FRINTP: fpcr_rounding = FPPositiveInfinity; break; + case NEON_FRINTZ: fpcr_rounding = FPZero; break; + + case NEON_FCVTNS: fcvts(fpf, rd, rn, FPTieEven); return; + case NEON_FCVTNU: fcvtu(fpf, rd, rn, FPTieEven); return; + case NEON_FCVTPS: fcvts(fpf, rd, rn, FPPositiveInfinity); return; + case NEON_FCVTPU: fcvtu(fpf, rd, rn, FPPositiveInfinity); return; + case NEON_FCVTMS: fcvts(fpf, rd, rn, FPNegativeInfinity); return; + case NEON_FCVTMU: fcvtu(fpf, rd, rn, FPNegativeInfinity); return; + case NEON_FCVTZS: fcvts(fpf, rd, rn, FPZero); return; + case NEON_FCVTZU: fcvtu(fpf, rd, rn, FPZero); return; + case NEON_FCVTAS: fcvts(fpf, rd, rn, FPTieAway); return; + case NEON_FCVTAU: fcvtu(fpf, rd, rn, FPTieAway); return; + case NEON_SCVTF: scvtf(fpf, rd, rn, 0, fpcr_rounding); return; + case NEON_UCVTF: ucvtf(fpf, rd, rn, 0, fpcr_rounding); return; + case NEON_URSQRTE: ursqrte(fpf, rd, rn); return; + case NEON_URECPE: urecpe(fpf, rd, rn); return; + case NEON_FRSQRTE: frsqrte(fpf, rd, rn); return; + case NEON_FRECPE: frecpe(fpf, rd, rn, fpcr_rounding); return; + case NEON_FCMGT_zero: fcmp_zero(fpf, rd, rn, gt); return; + case NEON_FCMGE_zero: fcmp_zero(fpf, rd, rn, ge); return; + case NEON_FCMEQ_zero: fcmp_zero(fpf, rd, rn, eq); return; + case NEON_FCMLE_zero: fcmp_zero(fpf, rd, rn, le); return; + case NEON_FCMLT_zero: fcmp_zero(fpf, rd, rn, lt); return; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: xtn(vf, rd, rn); return; + case NEON_SQXTN: sqxtn(vf, rd, rn); return; + case NEON_UQXTN: uqxtn(vf, rd, rn); return; + case NEON_SQXTUN: sqxtun(vf, rd, rn); return; + case NEON_SHLL: + vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + if (instr->Mask(NEON_Q)) { + shll2(vf, rd, rn); + } else { + shll(vf, rd, rn); + } + return; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } + } + + // Only FRINT* instructions fall through the switch above. + frint(fpf, rd, rn, fpcr_rounding, inexact_exception); + } +} + + +void Simulator::VisitNEON3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: and_(vf, rd, rn, rm); break; + case NEON_ORR: orr(vf, rd, rn, rm); break; + case NEON_ORN: orn(vf, rd, rn, rm); break; + case NEON_EOR: eor(vf, rd, rn, rm); break; + case NEON_BIC: bic(vf, rd, rn, rm); break; + case NEON_BIF: bif(vf, rd, rn, rm); break; + case NEON_BIT: bit(vf, rd, rn, rm); break; + case NEON_BSL: bsl(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + switch (instr->Mask(NEON3SameFPMask)) { + case NEON_FADD: fadd(vf, rd, rn, rm); break; + case NEON_FSUB: fsub(vf, rd, rn, rm); break; + case NEON_FMUL: fmul(vf, rd, rn, rm); break; + case NEON_FDIV: fdiv(vf, rd, rn, rm); break; + case NEON_FMAX: fmax(vf, rd, rn, rm); break; + case NEON_FMIN: fmin(vf, rd, rn, rm); break; + case NEON_FMAXNM: fmaxnm(vf, rd, rn, rm); break; + case NEON_FMINNM: fminnm(vf, rd, rn, rm); break; + case NEON_FMLA: fmla(vf, rd, rn, rm); break; + case NEON_FMLS: fmls(vf, rd, rn, rm); break; + case NEON_FMULX: fmulx(vf, rd, rn, rm); break; + case NEON_FACGE: fabscmp(vf, rd, rn, rm, ge); break; + case NEON_FACGT: fabscmp(vf, rd, rn, rm, gt); break; + case NEON_FCMEQ: fcmp(vf, rd, rn, rm, eq); break; + case NEON_FCMGE: fcmp(vf, rd, rn, rm, ge); break; + case NEON_FCMGT: fcmp(vf, rd, rn, rm, gt); break; + case NEON_FRECPS: frecps(vf, rd, rn, rm); break; + case NEON_FRSQRTS: frsqrts(vf, rd, rn, rm); break; + case NEON_FABD: fabd(vf, rd, rn, rm); break; + case NEON_FADDP: faddp(vf, rd, rn, rm); break; + case NEON_FMAXP: fmaxp(vf, rd, rn, rm); break; + case NEON_FMAXNMP: fmaxnmp(vf, rd, rn, rm); break; + case NEON_FMINP: fminp(vf, rd, rn, rm); break; + case NEON_FMINNMP: fminnmp(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + switch (instr->Mask(NEON3SameMask)) { + case NEON_ADD: add(vf, rd, rn, rm); break; + case NEON_ADDP: addp(vf, rd, rn, rm); break; + case NEON_CMEQ: cmp(vf, rd, rn, rm, eq); break; + case NEON_CMGE: cmp(vf, rd, rn, rm, ge); break; + case NEON_CMGT: cmp(vf, rd, rn, rm, gt); break; + case NEON_CMHI: cmp(vf, rd, rn, rm, hi); break; + case NEON_CMHS: cmp(vf, rd, rn, rm, hs); break; + case NEON_CMTST: cmptst(vf, rd, rn, rm); break; + case NEON_MLS: mls(vf, rd, rn, rm); break; + case NEON_MLA: mla(vf, rd, rn, rm); break; + case NEON_MUL: mul(vf, rd, rn, rm); break; + case NEON_PMUL: pmul(vf, rd, rn, rm); break; + case NEON_SMAX: smax(vf, rd, rn, rm); break; + case NEON_SMAXP: smaxp(vf, rd, rn, rm); break; + case NEON_SMIN: smin(vf, rd, rn, rm); break; + case NEON_SMINP: sminp(vf, rd, rn, rm); break; + case NEON_SUB: sub(vf, rd, rn, rm); break; + case NEON_UMAX: umax(vf, rd, rn, rm); break; + case NEON_UMAXP: umaxp(vf, rd, rn, rm); break; + case NEON_UMIN: umin(vf, rd, rn, rm); break; + case NEON_UMINP: uminp(vf, rd, rn, rm); break; + case NEON_SSHL: sshl(vf, rd, rn, rm); break; + case NEON_USHL: ushl(vf, rd, rn, rm); break; + case NEON_SABD: absdiff(vf, rd, rn, rm, true); break; + case NEON_UABD: absdiff(vf, rd, rn, rm, false); break; + case NEON_SABA: saba(vf, rd, rn, rm); break; + case NEON_UABA: uaba(vf, rd, rn, rm); break; + case NEON_UQADD: add(vf, rd, rn, rm).UnsignedSaturate(vf); break; + case NEON_SQADD: add(vf, rd, rn, rm).SignedSaturate(vf); break; + case NEON_UQSUB: sub(vf, rd, rn, rm).UnsignedSaturate(vf); break; + case NEON_SQSUB: sub(vf, rd, rn, rm).SignedSaturate(vf); break; + case NEON_SQDMULH: sqdmulh(vf, rd, rn, rm); break; + case NEON_SQRDMULH: sqrdmulh(vf, rd, rn, rm); break; + case NEON_UQSHL: ushl(vf, rd, rn, rm).UnsignedSaturate(vf); break; + case NEON_SQSHL: sshl(vf, rd, rn, rm).SignedSaturate(vf); break; + case NEON_URSHL: ushl(vf, rd, rn, rm).Round(vf); break; + case NEON_SRSHL: sshl(vf, rd, rn, rm).Round(vf); break; + case NEON_UQRSHL: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + case NEON_UHADD: + add(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_URHADD: + add(vf, rd, rn, rm).Uhalve(vf).Round(vf); + break; + case NEON_SHADD: + add(vf, rd, rn, rm).Halve(vf); + break; + case NEON_SRHADD: + add(vf, rd, rn, rm).Halve(vf).Round(vf); + break; + case NEON_UHSUB: + sub(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_SHSUB: + sub(vf, rd, rn, rm).Halve(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEON3Different(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEON3DifferentMask)) { + case NEON_PMULL: pmull(vf_l, rd, rn, rm); break; + case NEON_PMULL2: pmull2(vf_l, rd, rn, rm); break; + case NEON_UADDL: uaddl(vf_l, rd, rn, rm); break; + case NEON_UADDL2: uaddl2(vf_l, rd, rn, rm); break; + case NEON_SADDL: saddl(vf_l, rd, rn, rm); break; + case NEON_SADDL2: saddl2(vf_l, rd, rn, rm); break; + case NEON_USUBL: usubl(vf_l, rd, rn, rm); break; + case NEON_USUBL2: usubl2(vf_l, rd, rn, rm); break; + case NEON_SSUBL: ssubl(vf_l, rd, rn, rm); break; + case NEON_SSUBL2: ssubl2(vf_l, rd, rn, rm); break; + case NEON_SABAL: sabal(vf_l, rd, rn, rm); break; + case NEON_SABAL2: sabal2(vf_l, rd, rn, rm); break; + case NEON_UABAL: uabal(vf_l, rd, rn, rm); break; + case NEON_UABAL2: uabal2(vf_l, rd, rn, rm); break; + case NEON_SABDL: sabdl(vf_l, rd, rn, rm); break; + case NEON_SABDL2: sabdl2(vf_l, rd, rn, rm); break; + case NEON_UABDL: uabdl(vf_l, rd, rn, rm); break; + case NEON_UABDL2: uabdl2(vf_l, rd, rn, rm); break; + case NEON_SMLAL: smlal(vf_l, rd, rn, rm); break; + case NEON_SMLAL2: smlal2(vf_l, rd, rn, rm); break; + case NEON_UMLAL: umlal(vf_l, rd, rn, rm); break; + case NEON_UMLAL2: umlal2(vf_l, rd, rn, rm); break; + case NEON_SMLSL: smlsl(vf_l, rd, rn, rm); break; + case NEON_SMLSL2: smlsl2(vf_l, rd, rn, rm); break; + case NEON_UMLSL: umlsl(vf_l, rd, rn, rm); break; + case NEON_UMLSL2: umlsl2(vf_l, rd, rn, rm); break; + case NEON_SMULL: smull(vf_l, rd, rn, rm); break; + case NEON_SMULL2: smull2(vf_l, rd, rn, rm); break; + case NEON_UMULL: umull(vf_l, rd, rn, rm); break; + case NEON_UMULL2: umull2(vf_l, rd, rn, rm); break; + case NEON_SQDMLAL: sqdmlal(vf_l, rd, rn, rm); break; + case NEON_SQDMLAL2: sqdmlal2(vf_l, rd, rn, rm); break; + case NEON_SQDMLSL: sqdmlsl(vf_l, rd, rn, rm); break; + case NEON_SQDMLSL2: sqdmlsl2(vf_l, rd, rn, rm); break; + case NEON_SQDMULL: sqdmull(vf_l, rd, rn, rm); break; + case NEON_SQDMULL2: sqdmull2(vf_l, rd, rn, rm); break; + case NEON_UADDW: uaddw(vf_l, rd, rn, rm); break; + case NEON_UADDW2: uaddw2(vf_l, rd, rn, rm); break; + case NEON_SADDW: saddw(vf_l, rd, rn, rm); break; + case NEON_SADDW2: saddw2(vf_l, rd, rn, rm); break; + case NEON_USUBW: usubw(vf_l, rd, rn, rm); break; + case NEON_USUBW2: usubw2(vf_l, rd, rn, rm); break; + case NEON_SSUBW: ssubw(vf_l, rd, rn, rm); break; + case NEON_SSUBW2: ssubw2(vf_l, rd, rn, rm); break; + case NEON_ADDHN: addhn(vf, rd, rn, rm); break; + case NEON_ADDHN2: addhn2(vf, rd, rn, rm); break; + case NEON_RADDHN: raddhn(vf, rd, rn, rm); break; + case NEON_RADDHN2: raddhn2(vf, rd, rn, rm); break; + case NEON_SUBHN: subhn(vf, rd, rn, rm); break; + case NEON_SUBHN2: subhn2(vf, rd, rn, rm); break; + case NEON_RSUBHN: rsubhn(vf, rd, rn, rm); break; + case NEON_RSUBHN2: rsubhn2(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONAcrossLanes(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + // The input operand's VectorFormat is passed for these instructions. + if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: fmaxv(vf, rd, rn); break; + case NEON_FMINV: fminv(vf, rd, rn); break; + case NEON_FMAXNMV: fmaxnmv(vf, rd, rn); break; + case NEON_FMINNMV: fminnmv(vf, rd, rn); break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: addv(vf, rd, rn); break; + case NEON_SMAXV: smaxv(vf, rd, rn); break; + case NEON_SMINV: sminv(vf, rd, rn); break; + case NEON_UMAXV: umaxv(vf, rd, rn); break; + case NEON_UMINV: uminv(vf, rd, rn); break; + case NEON_SADDLV: saddlv(vf, rd, rn); break; + case NEON_UADDLV: uaddlv(vf, rd, rn); break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEONByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf_r = nfd.GetVectorFormat(); + VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + ByElementOp Op = NULL; + + int rm_reg = instr->Rm(); + int index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->NEONM(); + } + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_MUL_byelement: Op = &Simulator::mul; vf = vf_r; break; + case NEON_MLA_byelement: Op = &Simulator::mla; vf = vf_r; break; + case NEON_MLS_byelement: Op = &Simulator::mls; vf = vf_r; break; + case NEON_SQDMULH_byelement: Op = &Simulator::sqdmulh; vf = vf_r; break; + case NEON_SQRDMULH_byelement: Op = &Simulator::sqrdmulh; vf = vf_r; break; + case NEON_SMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smull2; + } else { + Op = &Simulator::smull; + } + break; + case NEON_UMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umull2; + } else { + Op = &Simulator::umull; + } + break; + case NEON_SMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlal2; + } else { + Op = &Simulator::smlal; + } + break; + case NEON_UMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlal2; + } else { + Op = &Simulator::umlal; + } + break; + case NEON_SMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlsl2; + } else { + Op = &Simulator::smlsl; + } + break; + case NEON_UMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlsl2; + } else { + Op = &Simulator::umlsl; + } + break; + case NEON_SQDMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmull2; + } else { + Op = &Simulator::sqdmull; + } + break; + case NEON_SQDMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlal2; + } else { + Op = &Simulator::sqdmlal; + } + break; + case NEON_SQDMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlsl2; + } else { + Op = &Simulator::sqdmlsl; + } + break; + default: + index = instr->NEONH(); + if ((instr->FPType() & 1) == 0) { + index = (index << 1) | instr->NEONL(); + } + + vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: Op = &Simulator::fmul; break; + case NEON_FMLA_byelement: Op = &Simulator::fmla; break; + case NEON_FMLS_byelement: Op = &Simulator::fmls; break; + case NEON_FMULX_byelement: Op = &Simulator::fmulx; break; + default: VIXL_UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, vreg(rm_reg), index); +} + + +void Simulator::VisitNEONCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + int imm5 = instr->ImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int reg_index = imm5 >> (tz + 1); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + int imm4 = instr->ImmNEON4(); + int rn_index = imm4 >> tz; + ins_element(vf, rd, reg_index, rn, rn_index); + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + ins_immediate(vf, rd, reg_index, xreg(instr->Rn())); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + uint64_t value = LogicVRegister(rn).Uint(vf, reg_index); + value &= MaxUintFromFormat(vf); + set_xreg(instr->Rd(), value); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) { + int64_t value = LogicVRegister(rn).Int(vf, reg_index); + if (instr->NEONQ()) { + set_xreg(instr->Rd(), value); + } else { + set_wreg(instr->Rd(), (int32_t)value); + } + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + dup_element(vf, rd, rn, reg_index); + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + dup_immediate(vf, rd, xreg(instr->Rn())); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONExtract(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + int index = instr->ImmNEONExt(); + ext(vf, rd, rn, rm, index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer); + int reg_size = RegisterSizeInBytesFromFormat(vf); + + int reg[4]; + uint64_t addr[4]; + for (int i = 0; i < 4; i++) { + reg[i] = (instr->Rt() + i) % kNumberOfVRegisters; + addr[i] = addr_base + (i * reg_size); + } + int count = 1; + bool log_read = true; + + Instr itype = instr->Mask(NEONLoadStoreMultiStructMask); + if (((itype == NEON_LD1_1v) || (itype == NEON_LD1_2v) || + (itype == NEON_LD1_3v) || (itype == NEON_LD1_4v) || + (itype == NEON_ST1_1v) || (itype == NEON_ST1_2v) || + (itype == NEON_ST1_3v) || (itype == NEON_ST1_4v)) && + (instr->Bits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_4v: + case NEON_LD1_4v_post: ld1(vf, vreg(reg[3]), addr[3]); count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_3v: + case NEON_LD1_3v_post: ld1(vf, vreg(reg[2]), addr[2]); count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_2v: + case NEON_LD1_2v_post: ld1(vf, vreg(reg[1]), addr[1]); count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_1v: + case NEON_LD1_1v_post: + ld1(vf, vreg(reg[0]), addr[0]); + log_read = true; + break; + case NEON_ST1_4v: + case NEON_ST1_4v_post: st1(vf, vreg(reg[3]), addr[3]); count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_3v: + case NEON_ST1_3v_post: st1(vf, vreg(reg[2]), addr[2]); count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_2v: + case NEON_ST1_2v_post: st1(vf, vreg(reg[1]), addr[1]); count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_1v: + case NEON_ST1_1v_post: + st1(vf, vreg(reg[0]), addr[0]); + log_read = false; + break; + case NEON_LD2_post: + case NEON_LD2: + ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]); + count = 2; + break; + case NEON_ST2: + case NEON_ST2_post: + st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]); + count = 2; + break; + case NEON_LD3_post: + case NEON_LD3: + ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]); + count = 3; + break; + case NEON_ST3: + case NEON_ST3_post: + st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]); + count = 3; + break; + case NEON_ST4: + case NEON_ST4_post: + st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), + addr[0]); + count = 4; + break; + case NEON_LD4_post: + case NEON_LD4: + ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), + addr[0]); + count = 4; + break; + default: VIXL_UNIMPLEMENTED(); + } + + // Explicitly log the register update whilst we have type information. + for (int i = 0; i < count; i++) { + // For de-interleaving loads, only print the base address. + int lane_size = LaneSizeInBytesFromFormat(vf); + PrintRegisterFormat format = GetPrintRegisterFormatTryFP( + GetPrintRegisterFormatForSize(reg_size, lane_size)); + if (log_read) { + LogVRead(addr_base, reg[i], format); + } else { + LogVWrite(addr_base, reg[i], format); + } + } + + if (addr_mode == PostIndex) { + int rm = instr->Rm(); + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count + : xreg(rm); + set_xreg(instr->Rn(), addr_base); + } else { + VIXL_ASSERT(addr_mode == Offset); + } +} + + +void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, PostIndex); +} + + +void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode) { + uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer); + int rt = instr->Rt(); + + Instr itype = instr->Mask(NEONLoadStoreSingleStructMask); + if (((itype == NEON_LD1_b) || (itype == NEON_LD1_h) || + (itype == NEON_LD1_s) || (itype == NEON_LD1_d)) && + (instr->Bits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + bool do_load = false; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf_t = nfd.GetVectorFormat(); + + VectorFormat vf = kFormat16B; + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b: + case NEON_LD1_b_post: + case NEON_LD2_b: + case NEON_LD2_b_post: + case NEON_LD3_b: + case NEON_LD3_b_post: + case NEON_LD4_b: + case NEON_LD4_b_post: do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_b: + case NEON_ST1_b_post: + case NEON_ST2_b: + case NEON_ST2_b_post: + case NEON_ST3_b: + case NEON_ST3_b_post: + case NEON_ST4_b: + case NEON_ST4_b_post: break; + + case NEON_LD1_h: + case NEON_LD1_h_post: + case NEON_LD2_h: + case NEON_LD2_h_post: + case NEON_LD3_h: + case NEON_LD3_h_post: + case NEON_LD4_h: + case NEON_LD4_h_post: do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_h: + case NEON_ST1_h_post: + case NEON_ST2_h: + case NEON_ST2_h_post: + case NEON_ST3_h: + case NEON_ST3_h_post: + case NEON_ST4_h: + case NEON_ST4_h_post: vf = kFormat8H; break; + case NEON_LD1_s: + case NEON_LD1_s_post: + case NEON_LD2_s: + case NEON_LD2_s_post: + case NEON_LD3_s: + case NEON_LD3_s_post: + case NEON_LD4_s: + case NEON_LD4_s_post: do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_s: + case NEON_ST1_s_post: + case NEON_ST2_s: + case NEON_ST2_s_post: + case NEON_ST3_s: + case NEON_ST3_s_post: + case NEON_ST4_s: + case NEON_ST4_s_post: { + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + VIXL_STATIC_ASSERT( + (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post); + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + VIXL_STATIC_ASSERT( + (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post); + vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D; + break; + } + + case NEON_LD1R: + case NEON_LD1R_post: { + vf = vf_t; + ld1r(vf, vreg(rt), addr); + do_load = true; + break; + } + + case NEON_LD2R: + case NEON_LD2R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + ld2r(vf, vreg(rt), vreg(rt2), addr); + do_load = true; + break; + } + + case NEON_LD3R: + case NEON_LD3R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr); + do_load = true; + break; + } + + case NEON_LD4R: + case NEON_LD4R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr); + do_load = true; + break; + } + default: VIXL_UNIMPLEMENTED(); + } + + PrintRegisterFormat print_format = + GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf)); + // Make sure that the print_format only includes a single lane. + print_format = + static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask); + + int esize = LaneSizeInBytesFromFormat(vf); + int index_shift = LaneSizeInBytesLog2FromFormat(vf); + int lane = instr->NEONLSIndex(index_shift); + int scale = 0; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + switch (instr->Mask(NEONLoadStoreSingleLenMask)) { + case NEONLoadStoreSingle1: + scale = 1; + if (do_load) { + ld1(vf, vreg(rt), lane, addr); + LogVRead(addr, rt, print_format, lane); + } else { + st1(vf, vreg(rt), lane, addr); + LogVWrite(addr, rt, print_format, lane); + } + break; + case NEONLoadStoreSingle2: + scale = 2; + if (do_load) { + ld2(vf, vreg(rt), vreg(rt2), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + } else { + st2(vf, vreg(rt), vreg(rt2), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + } + break; + case NEONLoadStoreSingle3: + scale = 3; + if (do_load) { + ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + } else { + st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + } + break; + case NEONLoadStoreSingle4: + scale = 4; + if (do_load) { + ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + LogVRead(addr + (3 * esize), rt4, print_format, lane); + } else { + st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + LogVWrite(addr + (3 * esize), rt4, print_format, lane); + } + break; + default: VIXL_UNIMPLEMENTED(); + } + + if (addr_mode == PostIndex) { + int rm = instr->Rm(); + int lane_size = LaneSizeInBytesFromFormat(vf); + set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm))); + } +} + + +void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, PostIndex); +} + + +void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + int cmode = instr->NEONCmode(); + int cmode_3_1 = (cmode >> 1) & 7; + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->NEONQ(); + int op_bit = instr->NEONModImmOp(); + uint64_t imm8 = instr->ImmNEONabcdefgh(); + + // Find the format and immediate value + uint64_t imm = 0; + VectorFormat vform = kFormatUndefined; + switch (cmode_3_1) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + vform = (q == 1) ? kFormat4S : kFormat2S; + imm = imm8 << (8 * cmode_3_1); + break; + case 0x4: + case 0x5: + vform = (q == 1) ? kFormat8H : kFormat4H; + imm = imm8 << (8 * cmode_1); + break; + case 0x6: + vform = (q == 1) ? kFormat4S : kFormat2S; + if (cmode_0 == 0) { + imm = imm8 << 8 | 0x000000ff; + } else { + imm = imm8 << 16 | 0x0000ffff; + } + break; + case 0x7: + if (cmode_0 == 0 && op_bit == 0) { + vform = q ? kFormat16B : kFormat8B; + imm = imm8; + } else if (cmode_0 == 0 && op_bit == 1) { + vform = q ? kFormat2D : kFormat1D; + imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + } else { // cmode_0 == 1, cmode == 0xf. + if (op_bit == 0) { + vform = q ? kFormat4S : kFormat2S; + imm = float_to_rawbits(instr->ImmNEONFP32()); + } else if (q == 1) { + vform = kFormat2D; + imm = double_to_rawbits(instr->ImmNEONFP64()); + } else { + VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf)); + VisitUnallocated(instr); + } + } + break; + default: VIXL_UNREACHABLE(); break; + } + + // Find the operation + NEONModifiedImmediateOp op; + if (cmode_3 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<3> == '1' + if (cmode_2 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<2> == '1' + if (cmode_1 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<1> == '1' + if (cmode_0 == 0) { + op = NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = NEONModifiedImmediate_MOVI; + } + } + } + } + + // Call the logic function + if (op == NEONModifiedImmediate_ORR) { + orr(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_BIC) { + bic(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_MOVI) { + movi(vform, rd, imm); + } else if (op == NEONModifiedImmediate_MVNI) { + mvni(vform, rd, imm); + } else { + VisitUnimplemented(instr); + } +} + + +void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMEQ_zero_scalar: cmp(vf, rd, rn, 0, eq); break; + case NEON_CMGE_zero_scalar: cmp(vf, rd, rn, 0, ge); break; + case NEON_CMGT_zero_scalar: cmp(vf, rd, rn, 0, gt); break; + case NEON_CMLT_zero_scalar: cmp(vf, rd, rn, 0, lt); break; + case NEON_CMLE_zero_scalar: cmp(vf, rd, rn, 0, le); break; + case NEON_ABS_scalar: abs(vf, rd, rn); break; + case NEON_SQABS_scalar: abs(vf, rd, rn).SignedSaturate(vf); break; + case NEON_NEG_scalar: neg(vf, rd, rn); break; + case NEON_SQNEG_scalar: neg(vf, rd, rn).SignedSaturate(vf); break; + case NEON_SUQADD_scalar: suqadd(vf, rd, rn); break; + case NEON_USQADD_scalar: usqadd(vf, rd, rn); break; + default: VIXL_UNIMPLEMENTED(); break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode()); + + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: frecpe(fpf, rd, rn, fpcr_rounding); break; + case NEON_FRECPX_scalar: frecpx(fpf, rd, rn); break; + case NEON_FRSQRTE_scalar: frsqrte(fpf, rd, rn); break; + case NEON_FCMGT_zero_scalar: fcmp_zero(fpf, rd, rn, gt); break; + case NEON_FCMGE_zero_scalar: fcmp_zero(fpf, rd, rn, ge); break; + case NEON_FCMEQ_zero_scalar: fcmp_zero(fpf, rd, rn, eq); break; + case NEON_FCMLE_zero_scalar: fcmp_zero(fpf, rd, rn, le); break; + case NEON_FCMLT_zero_scalar: fcmp_zero(fpf, rd, rn, lt); break; + case NEON_SCVTF_scalar: scvtf(fpf, rd, rn, 0, fpcr_rounding); break; + case NEON_UCVTF_scalar: ucvtf(fpf, rd, rn, 0, fpcr_rounding); break; + case NEON_FCVTNS_scalar: fcvts(fpf, rd, rn, FPTieEven); break; + case NEON_FCVTNU_scalar: fcvtu(fpf, rd, rn, FPTieEven); break; + case NEON_FCVTPS_scalar: fcvts(fpf, rd, rn, FPPositiveInfinity); break; + case NEON_FCVTPU_scalar: fcvtu(fpf, rd, rn, FPPositiveInfinity); break; + case NEON_FCVTMS_scalar: fcvts(fpf, rd, rn, FPNegativeInfinity); break; + case NEON_FCVTMU_scalar: fcvtu(fpf, rd, rn, FPNegativeInfinity); break; + case NEON_FCVTZS_scalar: fcvts(fpf, rd, rn, FPZero); break; + case NEON_FCVTZU_scalar: fcvtu(fpf, rd, rn, FPZero); break; + case NEON_FCVTAS_scalar: fcvts(fpf, rd, rn, FPTieAway); break; + case NEON_FCVTAU_scalar: fcvtu(fpf, rd, rn, FPTieAway); break; + case NEON_FCVTXN_scalar: + // Unlike all of the other FP instructions above, fcvtxn encodes dest + // size S as size<0>=1. There's only one case, so we ignore the form. + VIXL_ASSERT(instr->Bit(22) == 1); + fcvtxn(kFormatS, rd, rn); + break; + default: + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: sqxtn(vf, rd, rn); break; + case NEON_UQXTN_scalar: uqxtn(vf, rd, rn); break; + case NEON_SQXTUN_scalar: sqxtun(vf, rd, rn); break; + default: + VIXL_UNIMPLEMENTED(); + } + } + } +} + + +void Simulator::VisitNEONScalar3Diff(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: sqdmlal(vf, rd, rn, rm); break; + case NEON_SQDMLSL_scalar: sqdmlsl(vf, rd, rn, rm); break; + case NEON_SQDMULL_scalar: sqdmull(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalar3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FMULX_scalar: fmulx(vf, rd, rn, rm); break; + case NEON_FACGE_scalar: fabscmp(vf, rd, rn, rm, ge); break; + case NEON_FACGT_scalar: fabscmp(vf, rd, rn, rm, gt); break; + case NEON_FCMEQ_scalar: fcmp(vf, rd, rn, rm, eq); break; + case NEON_FCMGE_scalar: fcmp(vf, rd, rn, rm, ge); break; + case NEON_FCMGT_scalar: fcmp(vf, rd, rn, rm, gt); break; + case NEON_FRECPS_scalar: frecps(vf, rd, rn, rm); break; + case NEON_FRSQRTS_scalar: frsqrts(vf, rd, rn, rm); break; + case NEON_FABD_scalar: fabd(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: add(vf, rd, rn, rm); break; + case NEON_SUB_scalar: sub(vf, rd, rn, rm); break; + case NEON_CMEQ_scalar: cmp(vf, rd, rn, rm, eq); break; + case NEON_CMGE_scalar: cmp(vf, rd, rn, rm, ge); break; + case NEON_CMGT_scalar: cmp(vf, rd, rn, rm, gt); break; + case NEON_CMHI_scalar: cmp(vf, rd, rn, rm, hi); break; + case NEON_CMHS_scalar: cmp(vf, rd, rn, rm, hs); break; + case NEON_CMTST_scalar: cmptst(vf, rd, rn, rm); break; + case NEON_USHL_scalar: ushl(vf, rd, rn, rm); break; + case NEON_SSHL_scalar: sshl(vf, rd, rn, rm); break; + case NEON_SQDMULH_scalar: sqdmulh(vf, rd, rn, rm); break; + case NEON_SQRDMULH_scalar: sqrdmulh(vf, rd, rn, rm); break; + case NEON_UQADD_scalar: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD_scalar: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB_scalar: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB_scalar: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSHL_scalar: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL_scalar: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + ByElementOp Op = NULL; + + int rm_reg = instr->Rm(); + int index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->NEONM(); + } + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: Op = &Simulator::sqdmull; break; + case NEON_SQDMLAL_byelement_scalar: Op = &Simulator::sqdmlal; break; + case NEON_SQDMLSL_byelement_scalar: Op = &Simulator::sqdmlsl; break; + case NEON_SQDMULH_byelement_scalar: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement_scalar: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + default: + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + index = instr->NEONH(); + if ((instr->FPType() & 1) == 0) { + index = (index << 1) | instr->NEONL(); + } + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: Op = &Simulator::fmul; break; + case NEON_FMLA_byelement_scalar: Op = &Simulator::fmla; break; + case NEON_FMLS_byelement_scalar: Op = &Simulator::fmls; break; + case NEON_FMULX_byelement_scalar: Op = &Simulator::fmulx; break; + default: VIXL_UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, vreg(rm_reg), index); +} + + +void Simulator::VisitNEONScalarCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + int imm5 = instr->ImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int rn_index = imm5 >> (tz + 1); + dup_element(vf, rd, rn, rn_index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarPairwise(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: addp(vf, rd, rn); break; + case NEON_FADDP_scalar: faddp(vf, rd, rn); break; + case NEON_FMAXP_scalar: fmaxp(vf, rd, rn); break; + case NEON_FMAXNMP_scalar: fmaxnmp(vf, rd, rn); break; + case NEON_FMINP_scalar: fminp(vf, rd, rn); break; + case NEON_FMINNMP_scalar: fminnmp(vf, rd, rn); break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode()); + + static const NEONFormatMap map = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, + NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D} + }; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh()); + int immhimmb = instr->ImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_SHL_scalar: shl(vf, rd, rn, left_shift); break; + case NEON_SLI_scalar: sli(vf, rd, rn, left_shift); break; + case NEON_SQSHL_imm_scalar: sqshl(vf, rd, rn, left_shift); break; + case NEON_UQSHL_imm_scalar: uqshl(vf, rd, rn, left_shift); break; + case NEON_SQSHLU_scalar: sqshlu(vf, rd, rn, left_shift); break; + case NEON_SRI_scalar: sri(vf, rd, rn, right_shift); break; + case NEON_SSHR_scalar: sshr(vf, rd, rn, right_shift); break; + case NEON_USHR_scalar: ushr(vf, rd, rn, right_shift); break; + case NEON_SRSHR_scalar: sshr(vf, rd, rn, right_shift).Round(vf); break; + case NEON_URSHR_scalar: ushr(vf, rd, rn, right_shift).Round(vf); break; + case NEON_SSRA_scalar: ssra(vf, rd, rn, right_shift); break; + case NEON_USRA_scalar: usra(vf, rd, rn, right_shift); break; + case NEON_SRSRA_scalar: srsra(vf, rd, rn, right_shift); break; + case NEON_URSRA_scalar: ursra(vf, rd, rn, right_shift); break; + case NEON_UQSHRN_scalar: uqshrn(vf, rd, rn, right_shift); break; + case NEON_UQRSHRN_scalar: uqrshrn(vf, rd, rn, right_shift); break; + case NEON_SQSHRN_scalar: sqshrn(vf, rd, rn, right_shift); break; + case NEON_SQRSHRN_scalar: sqrshrn(vf, rd, rn, right_shift); break; + case NEON_SQSHRUN_scalar: sqshrun(vf, rd, rn, right_shift); break; + case NEON_SQRSHRUN_scalar: sqrshrun(vf, rd, rn, right_shift); break; + case NEON_FCVTZS_imm_scalar: fcvts(vf, rd, rn, FPZero, right_shift); break; + case NEON_FCVTZU_imm_scalar: fcvtu(vf, rd, rn, FPZero, right_shift); break; + case NEON_SCVTF_imm_scalar: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm_scalar: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONShiftImmediate(const Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode()); + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map = { + {22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D} + }; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_l = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D} + }; + VectorFormat vf_l = nfd.GetVectorFormat(&map_l); + + int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh()); + int immhimmb = instr->ImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SHL: shl(vf, rd, rn, left_shift); break; + case NEON_SLI: sli(vf, rd, rn, left_shift); break; + case NEON_SQSHLU: sqshlu(vf, rd, rn, left_shift); break; + case NEON_SRI: sri(vf, rd, rn, right_shift); break; + case NEON_SSHR: sshr(vf, rd, rn, right_shift); break; + case NEON_USHR: ushr(vf, rd, rn, right_shift); break; + case NEON_SRSHR: sshr(vf, rd, rn, right_shift).Round(vf); break; + case NEON_URSHR: ushr(vf, rd, rn, right_shift).Round(vf); break; + case NEON_SSRA: ssra(vf, rd, rn, right_shift); break; + case NEON_USRA: usra(vf, rd, rn, right_shift); break; + case NEON_SRSRA: srsra(vf, rd, rn, right_shift); break; + case NEON_URSRA: ursra(vf, rd, rn, right_shift); break; + case NEON_SQSHL_imm: sqshl(vf, rd, rn, left_shift); break; + case NEON_UQSHL_imm: uqshl(vf, rd, rn, left_shift); break; + case NEON_SCVTF_imm: scvtf(vf, rd, rn, right_shift, fpcr_rounding); break; + case NEON_UCVTF_imm: ucvtf(vf, rd, rn, right_shift, fpcr_rounding); break; + case NEON_FCVTZS_imm: fcvts(vf, rd, rn, FPZero, right_shift); break; + case NEON_FCVTZU_imm: fcvtu(vf, rd, rn, FPZero, right_shift); break; + case NEON_SSHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + sshll2(vf, rd, rn, left_shift); + } else { + sshll(vf, rd, rn, left_shift); + } + break; + case NEON_USHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + ushll2(vf, rd, rn, left_shift); + } else { + ushll(vf, rd, rn, left_shift); + } + break; + case NEON_SHRN: + if (instr->Mask(NEON_Q)) { + shrn2(vf, rd, rn, right_shift); + } else { + shrn(vf, rd, rn, right_shift); + } + break; + case NEON_RSHRN: + if (instr->Mask(NEON_Q)) { + rshrn2(vf, rd, rn, right_shift); + } else { + rshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQSHRN: + if (instr->Mask(NEON_Q)) { + uqshrn2(vf, rd, rn, right_shift); + } else { + uqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQRSHRN: + if (instr->Mask(NEON_Q)) { + uqrshrn2(vf, rd, rn, right_shift); + } else { + uqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRN: + if (instr->Mask(NEON_Q)) { + sqshrn2(vf, rd, rn, right_shift); + } else { + sqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRN: + if (instr->Mask(NEON_Q)) { + sqrshrn2(vf, rd, rn, right_shift); + } else { + sqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRUN: + if (instr->Mask(NEON_Q)) { + sqshrun2(vf, rd, rn, right_shift); + } else { + sqshrun(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRUN: + if (instr->Mask(NEON_Q)) { + sqrshrun2(vf, rd, rn, right_shift); + } else { + sqrshrun(vf, rd, rn, right_shift); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONTable(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters); + SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters); + SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: tbl(vf, rd, rn, rm); break; + case NEON_TBL_2v: tbl(vf, rd, rn, rn2, rm); break; + case NEON_TBL_3v: tbl(vf, rd, rn, rn2, rn3, rm); break; + case NEON_TBL_4v: tbl(vf, rd, rn, rn2, rn3, rn4, rm); break; + case NEON_TBX_1v: tbx(vf, rd, rn, rm); break; + case NEON_TBX_2v: tbx(vf, rd, rn, rn2, rm); break; + case NEON_TBX_3v: tbx(vf, rd, rn, rn2, rn3, rm); break; + case NEON_TBX_4v: tbx(vf, rd, rn, rn2, rn3, rn4, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONPerm(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: trn1(vf, rd, rn, rm); break; + case NEON_TRN2: trn2(vf, rd, rn, rm); break; + case NEON_UZP1: uzp1(vf, rd, rn, rm); break; + case NEON_UZP2: uzp2(vf, rd, rn, rm); break; + case NEON_ZIP1: zip1(vf, rd, rn, rm); break; + case NEON_ZIP2: zip2(vf, rd, rn, rm); break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::DoUnreachable(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kUnreachableOpcode)); + + fprintf(stream_, "Hit UNREACHABLE marker at pc=%p.\n", + reinterpret_cast<const void*>(instr)); + abort(); +} + + +void Simulator::DoTrace(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kTraceOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + uint32_t command; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + memcpy(&command, instr + kTraceCommandOffset, sizeof(command)); + + switch (command) { + case TRACE_ENABLE: + set_trace_parameters(trace_parameters() | parameters); + break; + case TRACE_DISABLE: + set_trace_parameters(trace_parameters() & ~parameters); + break; + default: + VIXL_UNREACHABLE(); + } + + set_pc(instr->InstructionAtOffset(kTraceLength)); +} + + +void Simulator::DoLog(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kLogOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + + // We don't support a one-shot LOG_DISASM. + VIXL_ASSERT((parameters & LOG_DISASM) == 0); + // Print the requested information. + if (parameters & LOG_SYSREGS) PrintSystemRegisters(); + if (parameters & LOG_REGS) PrintRegisters(); + if (parameters & LOG_VREGS) PrintVRegisters(); + + set_pc(instr->InstructionAtOffset(kLogLength)); +} + + +void Simulator::DoPrintf(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kPrintfOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t arg_count; + uint32_t arg_pattern_list; + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(&arg_count, + instr + kPrintfArgCountOffset, + sizeof(arg_count)); + memcpy(&arg_pattern_list, + instr + kPrintfArgPatternListOffset, + sizeof(arg_pattern_list)); + + VIXL_ASSERT(arg_count <= kPrintfMaxArgCount); + VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + + // We need to call the host printf function with a set of arguments defined by + // arg_pattern_list. Because we don't know the types and sizes of the + // arguments, this is very difficult to do in a robust and portable way. To + // work around the problem, we pick apart the format string, and print one + // format placeholder at a time. + + // Allocate space for the format string. We take a copy, so we can modify it. + // Leave enough space for one extra character per expected argument (plus the + // '\0' termination). + const char * format_base = reg<const char *>(0); + VIXL_ASSERT(format_base != NULL); + size_t length = strlen(format_base) + 1; + char * const format = (char *)js_calloc(length + arg_count); + + // A list of chunks, each with exactly one format placeholder. + const char * chunks[kPrintfMaxArgCount]; + + // Copy the format string and search for format placeholders. + uint32_t placeholder_count = 0; + char * format_scratch = format; + for (size_t i = 0; i < length; i++) { + if (format_base[i] != '%') { + *format_scratch++ = format_base[i]; + } else { + if (format_base[i + 1] == '%') { + // Ignore explicit "%%" sequences. + *format_scratch++ = format_base[i]; + i++; + // Chunks after the first are passed as format strings to printf, so we + // need to escape '%' characters in those chunks. + if (placeholder_count > 0) *format_scratch++ = format_base[i]; + } else { + VIXL_CHECK(placeholder_count < arg_count); + // Insert '\0' before placeholders, and store their locations. + *format_scratch++ = '\0'; + chunks[placeholder_count++] = format_scratch; + *format_scratch++ = format_base[i]; + } + } + } + VIXL_CHECK(placeholder_count == arg_count); + + // Finally, call printf with each chunk, passing the appropriate register + // argument. Normally, printf returns the number of bytes transmitted, so we + // can emulate a single printf call by adding the result from each chunk. If + // any call returns a negative (error) value, though, just return that value. + + printf("%s", clr_printf); + + // Because '\0' is inserted before each placeholder, the first string in + // 'format' contains no format placeholders and should be printed literally. + int result = printf("%s", format); + int pcs_r = 1; // Start at x1. x0 holds the format string. + int pcs_f = 0; // Start at d0. + if (result >= 0) { + for (uint32_t i = 0; i < placeholder_count; i++) { + int part_result = -1; + + uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits); + arg_pattern &= (1 << kPrintfArgPatternBits) - 1; + switch (arg_pattern) { + case kPrintfArgW: part_result = printf(chunks[i], wreg(pcs_r++)); break; + case kPrintfArgX: part_result = printf(chunks[i], xreg(pcs_r++)); break; + case kPrintfArgD: part_result = printf(chunks[i], dreg(pcs_f++)); break; + default: VIXL_UNREACHABLE(); + } + + if (part_result < 0) { + // Handle error values. + result = part_result; + break; + } + + result += part_result; + } + } + + printf("%s", clr_normal); + + // Printf returns its result in x0 (just like the C library's printf). + set_xreg(0, result); + + // The printf parameters are inlined in the code, so skip them. + set_pc(instr->InstructionAtOffset(kPrintfLength)); + + // Set LR as if we'd just called a native printf function. + set_lr(pc()); + + js_free(format); +} + +} // namespace vixl + +#endif // JS_SIMULATOR_ARM64 diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.h b/js/src/jit/arm64/vixl/Simulator-vixl.h new file mode 100644 index 000000000..8755ad671 --- /dev/null +++ b/js/src/jit/arm64/vixl/Simulator-vixl.h @@ -0,0 +1,2677 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_SIMULATOR_A64_H_ +#define VIXL_A64_SIMULATOR_A64_H_ + +#include "js-config.h" + +#ifdef JS_SIMULATOR_ARM64 + +#include "mozilla/Vector.h" + +#include "jsalloc.h" + +#include "jit/arm64/vixl/Assembler-vixl.h" +#include "jit/arm64/vixl/Disasm-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" +#include "jit/arm64/vixl/Instructions-vixl.h" +#include "jit/arm64/vixl/Instrument-vixl.h" +#include "jit/arm64/vixl/Simulator-Constants-vixl.h" +#include "jit/arm64/vixl/Utils-vixl.h" +#include "jit/IonTypes.h" +#include "vm/MutexIDs.h" +#include "vm/PosixNSPR.h" + +#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \ + JS_BEGIN_MACRO \ + if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \ + js::ReportOverRecursed(cx); \ + onerror; \ + } \ + JS_END_MACRO + +namespace vixl { + +// Assemble the specified IEEE-754 components into the target type and apply +// appropriate rounding. +// sign: 0 = positive, 1 = negative +// exponent: Unbiased IEEE-754 exponent. +// mantissa: The mantissa of the input. The top bit (which is not encoded for +// normal IEEE-754 values) must not be omitted. This bit has the +// value 'pow(2, exponent)'. +// +// The input value is assumed to be a normalized value. That is, the input may +// not be infinity or NaN. If the source value is subnormal, it must be +// normalized before calling this function such that the highest set bit in the +// mantissa has the value 'pow(2, exponent)'. +// +// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than +// calling a templated FPRound. +template <class T, int ebits, int mbits> +T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, + FPRounding round_mode) { + VIXL_ASSERT((sign == 0) || (sign == 1)); + + // Only FPTieEven and FPRoundOdd rounding modes are implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + + // Rounding can promote subnormals to normals, and normals to infinities. For + // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be + // encodable as a float, but rounding based on the low-order mantissa bits + // could make it overflow. With ties-to-even rounding, this value would become + // an infinity. + + // ---- Rounding Method ---- + // + // The exponent is irrelevant in the rounding operation, so we treat the + // lowest-order bit that will fit into the result ('onebit') as having + // the value '1'. Similarly, the highest-order bit that won't fit into + // the result ('halfbit') has the value '0.5'. The 'point' sits between + // 'onebit' and 'halfbit': + // + // These bits fit into the result. + // |---------------------| + // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + // || + // / | + // / halfbit + // onebit + // + // For subnormal outputs, the range of representable bits is smaller and + // the position of onebit and halfbit depends on the exponent of the + // input, but the method is otherwise similar. + // + // onebit(frac) + // | + // | halfbit(frac) halfbit(adjusted) + // | / / + // | | | + // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00 + // 0b00.0... -> 0b00.0... -> 0b00 + // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00 + // 0b00.1... -> 0b00.1... -> 0b01 + // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01 + // 0b01.0... -> 0b01.0... -> 0b01 + // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10 + // 0b01.1... -> 0b01.1... -> 0b10 + // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10 + // 0b10.0... -> 0b10.0... -> 0b10 + // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10 + // 0b10.1... -> 0b10.1... -> 0b11 + // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11 + // ... / | / | + // / | / | + // / | + // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / | + // + // mantissa = (mantissa >> shift) + halfbit(adjusted); + + static const int mantissa_offset = 0; + static const int exponent_offset = mantissa_offset + mbits; + static const int sign_offset = exponent_offset + ebits; + VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1)); + + // Bail out early for zero inputs. + if (mantissa == 0) { + return static_cast<T>(sign << sign_offset); + } + + // If all bits in the exponent are set, the value is infinite or NaN. + // This is true for all binary IEEE-754 formats. + static const int infinite_exponent = (1 << ebits) - 1; + static const int max_normal_exponent = infinite_exponent - 1; + + // Apply the exponent bias to encode it for the result. Doing this early makes + // it easy to detect values that will be infinite or subnormal. + exponent += max_normal_exponent >> 1; + + if (exponent > max_normal_exponent) { + // Overflow: the input is too large for the result type to represent. + if (round_mode == FPTieEven) { + // FPTieEven rounding mode handles overflows using infinities. + exponent = infinite_exponent; + mantissa = 0; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // FPRoundOdd rounding mode handles overflows using the largest magnitude + // normal number. + exponent = max_normal_exponent; + mantissa = (UINT64_C(1) << exponent_offset) - 1; + } + return static_cast<T>((sign << sign_offset) | + (exponent << exponent_offset) | + (mantissa << mantissa_offset)); + } + + // Calculate the shift required to move the top mantissa bit to the proper + // place in the destination type. + const int highest_significant_bit = 63 - CountLeadingZeros(mantissa); + int shift = highest_significant_bit - mbits; + + if (exponent <= 0) { + // The output will be subnormal (before rounding). + // For subnormal outputs, the shift must be adjusted by the exponent. The +1 + // is necessary because the exponent of a subnormal value (encoded as 0) is + // the same as the exponent of the smallest normal value (encoded as 1). + shift += -exponent + 1; + + // Handle inputs that would produce a zero output. + // + // Shifts higher than highest_significant_bit+1 will always produce a zero + // result. A shift of exactly highest_significant_bit+1 might produce a + // non-zero result after rounding. + if (shift > (highest_significant_bit + 1)) { + if (round_mode == FPTieEven) { + // The result will always be +/-0.0. + return static_cast<T>(sign << sign_offset); + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + VIXL_ASSERT(mantissa != 0); + // For FPRoundOdd, if the mantissa is too small to represent and + // non-zero return the next "odd" value. + return static_cast<T>((sign << sign_offset) | 1); + } + } + + // Properly encode the exponent for a subnormal output. + exponent = 0; + } else { + // Clear the topmost mantissa bit, since this is not encoded in IEEE-754 + // normal values. + mantissa &= ~(UINT64_C(1) << highest_significant_bit); + } + + if (shift > 0) { + if (round_mode == FPTieEven) { + // We have to shift the mantissa to the right. Some precision is lost, so + // we need to apply rounding. + uint64_t onebit_mantissa = (mantissa >> (shift)) & 1; + uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1; + uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa); + uint64_t adjusted = mantissa - adjustment; + T halfbit_adjusted = (adjusted >> (shift-1)) & 1; + + T result = static_cast<T>((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + + // A very large mantissa can overflow during rounding. If this happens, + // the exponent should be incremented and the mantissa set to 1.0 + // (encoded as 0). Applying halfbit_adjusted after assembling the float + // has the nice side-effect that this case is handled for free. + // + // This also handles cases where a very large finite value overflows to + // infinity, or where a very large subnormal value overflows to become + // normal. + return result + halfbit_adjusted; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // If any bits at position halfbit or below are set, onebit (ie. the + // bottom bit of the resulting mantissa) must be set. + uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1); + if (fractional_bits != 0) { + mantissa |= UINT64_C(1) << shift; + } + + return static_cast<T>((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + } + } else { + // We have to shift the mantissa to the left (or not at all). The input + // mantissa is exactly representable in the output mantissa, so apply no + // rounding correction. + return static_cast<T>((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa << -shift) << mantissa_offset)); + } +} + + +// Representation of memory, with typed getters and setters for access. +class Memory { + public: + template <typename T> + static T AddressUntag(T address) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t bits = (uint64_t)address; + return (T)(bits & ~kAddressTagMask); + } + + template <typename T, typename A> + static T Read(A address) { + T value; + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(&value, reinterpret_cast<const char *>(address), sizeof(value)); + return value; + } + + template <typename T, typename A> + static void Write(A address, T value) { + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(reinterpret_cast<char *>(address), &value, sizeof(value)); + } +}; + +// Represent a register (r0-r31, v0-v31). +template<int kSizeInBytes> +class SimRegisterBase { + public: + SimRegisterBase() : written_since_last_log_(false) {} + + // Write the specified value. The value is zero-extended if necessary. + template<typename T> + void Set(T new_value) { + VIXL_STATIC_ASSERT(sizeof(new_value) <= kSizeInBytes); + if (sizeof(new_value) < kSizeInBytes) { + // All AArch64 registers are zero-extending. + memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value)); + } + memcpy(value_, &new_value, sizeof(new_value)); + NotifyRegisterWrite(); + } + + // Insert a typed value into a register, leaving the rest of the register + // unchanged. The lane parameter indicates where in the register the value + // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where + // 0 represents the least significant bits. + template<typename T> + void Insert(int lane, T new_value) { + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(new_value) + + (lane * sizeof(new_value))) <= kSizeInBytes); + memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value)); + NotifyRegisterWrite(); + } + + // Read the value as the specified type. The value is truncated if necessary. + template<typename T> + T Get(int lane = 0) const { + T result; + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(result) + (lane * sizeof(result))) <= kSizeInBytes); + memcpy(&result, &value_[lane * sizeof(result)], sizeof(result)); + return result; + } + + // TODO: Make this return a map of updated bytes, so that we can highlight + // updated lanes for load-and-insert. (That never happens for scalar code, but + // NEON has some instructions that can update individual lanes.) + bool WrittenSinceLastLog() const { + return written_since_last_log_; + } + + void NotifyRegisterLogged() { + written_since_last_log_ = false; + } + + protected: + uint8_t value_[kSizeInBytes]; + + // Helpers to aid with register tracing. + bool written_since_last_log_; + + void NotifyRegisterWrite() { + written_since_last_log_ = true; + } +}; +typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31 +typedef SimRegisterBase<kQRegSizeInBytes> SimVRegister; // v0-v31 + +// Representation of a vector register, with typed getters and setters for lanes +// and additional information to represent lane state. +class LogicVRegister { + public: + inline LogicVRegister(SimVRegister& other) // NOLINT + : register_(other) { + for (unsigned i = 0; i < sizeof(saturated_) / sizeof(saturated_[0]); i++) { + saturated_[i] = kNotSaturated; + } + for (unsigned i = 0; i < sizeof(round_) / sizeof(round_[0]); i++) { + round_[i] = 0; + } + } + + int64_t Int(VectorFormat vform, int index) const { + int64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: element = register_.Get<int8_t>(index); break; + case 16: element = register_.Get<int16_t>(index); break; + case 32: element = register_.Get<int32_t>(index); break; + case 64: element = register_.Get<int64_t>(index); break; + default: VIXL_UNREACHABLE(); return 0; + } + return element; + } + + uint64_t Uint(VectorFormat vform, int index) const { + uint64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: element = register_.Get<uint8_t>(index); break; + case 16: element = register_.Get<uint16_t>(index); break; + case 32: element = register_.Get<uint32_t>(index); break; + case 64: element = register_.Get<uint64_t>(index); break; + default: VIXL_UNREACHABLE(); return 0; + } + return element; + } + + int64_t IntLeftJustified(VectorFormat vform, int index) const { + return Int(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); + } + + uint64_t UintLeftJustified(VectorFormat vform, int index) const { + return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); + } + + void SetInt(VectorFormat vform, int index, int64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: register_.Insert(index, static_cast<int8_t>(value)); break; + case 16: register_.Insert(index, static_cast<int16_t>(value)); break; + case 32: register_.Insert(index, static_cast<int32_t>(value)); break; + case 64: register_.Insert(index, static_cast<int64_t>(value)); break; + default: VIXL_UNREACHABLE(); return; + } + } + + void SetUint(VectorFormat vform, int index, uint64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: register_.Insert(index, static_cast<uint8_t>(value)); break; + case 16: register_.Insert(index, static_cast<uint16_t>(value)); break; + case 32: register_.Insert(index, static_cast<uint32_t>(value)); break; + case 64: register_.Insert(index, static_cast<uint64_t>(value)); break; + default: VIXL_UNREACHABLE(); return; + } + } + + void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: register_.Insert(index, Memory::Read<uint8_t>(addr)); break; + case 16: register_.Insert(index, Memory::Read<uint16_t>(addr)); break; + case 32: register_.Insert(index, Memory::Read<uint32_t>(addr)); break; + case 64: register_.Insert(index, Memory::Read<uint64_t>(addr)); break; + default: VIXL_UNREACHABLE(); return; + } + } + + void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const { + uint64_t value = Uint(vform, index); + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: Memory::Write(addr, static_cast<uint8_t>(value)); break; + case 16: Memory::Write(addr, static_cast<uint16_t>(value)); break; + case 32: Memory::Write(addr, static_cast<uint32_t>(value)); break; + case 64: Memory::Write(addr, value); break; + } + } + + template <typename T> + T Float(int index) const { + return register_.Get<T>(index); + } + + template <typename T> + void SetFloat(int index, T value) const { + register_.Insert(index, value); + } + + // When setting a result in a register of size less than Q, the top bits of + // the Q register must be cleared. + void ClearForWrite(VectorFormat vform) const { + unsigned size = RegisterSizeInBytesFromFormat(vform); + for (unsigned i = size; i < kQRegSizeInBytes; i++) { + SetUint(kFormat16B, i, 0); + } + } + + // Saturation state for each lane of a vector. + enum Saturation { + kNotSaturated = 0, + kSignedSatPositive = 1 << 0, + kSignedSatNegative = 1 << 1, + kSignedSatMask = kSignedSatPositive | kSignedSatNegative, + kSignedSatUndefined = kSignedSatMask, + kUnsignedSatPositive = 1 << 2, + kUnsignedSatNegative = 1 << 3, + kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative, + kUnsignedSatUndefined = kUnsignedSatMask + }; + + // Getters for saturation state. + Saturation GetSignedSaturation(int index) { + return static_cast<Saturation>(saturated_[index] & kSignedSatMask); + } + + Saturation GetUnsignedSaturation(int index) { + return static_cast<Saturation>(saturated_[index] & kUnsignedSatMask); + } + + // Setters for saturation state. + void ClearSat(int index) { + saturated_[index] = kNotSaturated; + } + + void SetSignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative); + } + + void SetUnsignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative); + } + + void SetSatFlag(int index, Saturation sat) { + saturated_[index] = static_cast<Saturation>(saturated_[index] | sat); + VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined); + VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined); + } + + // Saturate lanes of a vector based on saturation state. + LogicVRegister& SignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetSignedSaturation(i); + if (sat == kSignedSatPositive) { + SetInt(vform, i, MaxIntFromFormat(vform)); + } else if (sat == kSignedSatNegative) { + SetInt(vform, i, MinIntFromFormat(vform)); + } + } + return *this; + } + + LogicVRegister& UnsignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetUnsignedSaturation(i); + if (sat == kUnsignedSatPositive) { + SetUint(vform, i, MaxUintFromFormat(vform)); + } else if (sat == kUnsignedSatNegative) { + SetUint(vform, i, 0); + } + } + return *this; + } + + // Getter for rounding state. + bool GetRounding(int index) { + return round_[index]; + } + + // Setter for rounding state. + void SetRounding(int index, bool round) { + round_[index] = round; + } + + // Round lanes of a vector based on rounding state. + LogicVRegister& Round(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetInt(vform, i, Int(vform, i) + (GetRounding(i) ? 1 : 0)); + } + return *this; + } + + // Unsigned halve lanes of a vector, and use the saturation state to set the + // top bit. + LogicVRegister& Uhalve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t val = Uint(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetUnsignedSaturation(i) != kNotSaturated) { + // If the operation causes unsigned saturation, the bit shifted into the + // most significant bit must be set. + val |= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + // Signed halve lanes of a vector, and use the carry state to set the top bit. + LogicVRegister& Halve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t val = Int(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetSignedSaturation(i) != kNotSaturated) { + // If the operation causes signed saturation, the sign bit must be + // inverted. + val ^= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + private: + SimVRegister& register_; + + // Allocate one saturation state entry per lane; largest register is type Q, + // and lanes can be a minimum of one byte wide. + Saturation saturated_[kQRegSizeInBytes]; + + // Allocate one rounding state entry per lane. + bool round_[kQRegSizeInBytes]; +}; + +// The proper way to initialize a simulated system register (such as NZCV) is as +// follows: +// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV); +class SimSystemRegister { + public: + // The default constructor represents a register which has no writable bits. + // It is not possible to set its value to anything other than 0. + SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { } + + uint32_t RawValue() const { + return value_; + } + + void SetRawValue(uint32_t new_value) { + value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_); + } + + uint32_t Bits(int msb, int lsb) const { + return unsigned_bitextract_32(msb, lsb, value_); + } + + int32_t SignedBits(int msb, int lsb) const { + return signed_bitextract_32(msb, lsb, value_); + } + + void SetBits(int msb, int lsb, uint32_t bits); + + // Default system register values. + static SimSystemRegister DefaultValueFor(SystemRegister id); + +#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + uint32_t Name() const { return Func(HighBit, LowBit); } \ + void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); } +#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \ + static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask); + + SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK) + +#undef DEFINE_ZERO_BITS +#undef DEFINE_GETTER + + protected: + // Most system registers only implement a few of the bits in the word. Other + // bits are "read-as-zero, write-ignored". The write_ignore_mask argument + // describes the bits which are not modifiable. + SimSystemRegister(uint32_t value, uint32_t write_ignore_mask) + : value_(value), write_ignore_mask_(write_ignore_mask) { } + + uint32_t value_; + uint32_t write_ignore_mask_; +}; + + +class SimExclusiveLocalMonitor { + public: + SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) { + Clear(); + } + + // Clear the exclusive monitor (like clrex). + void Clear() { + address_ = 0; + size_ = 0; + } + + // Clear the exclusive monitor most of the time. + void MaybeClear() { + if ((seed_ % kSkipClearProbability) != 0) { + Clear(); + } + + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + } + + // Mark the address range for exclusive access (like load-exclusive). + void MarkExclusive(uint64_t address, size_t size) { + address_ = address; + size_ = size; + } + + // Return true if the address range is marked (like store-exclusive). + // This helper doesn't implicitly clear the monitor. + bool IsExclusive(uint64_t address, size_t size) { + VIXL_ASSERT(size > 0); + // Be pedantic: Require both the address and the size to match. + return (size == size_) && (address == address_); + } + + private: + uint64_t address_; + size_t size_; + + const int kSkipClearProbability; + uint32_t seed_; +}; + + +// We can't accurate simulate the global monitor since it depends on external +// influences. Instead, this implementation occasionally causes accesses to +// fail, according to kPassProbability. +class SimExclusiveGlobalMonitor { + public: + SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {} + + bool IsExclusive(uint64_t address, size_t size) { + USE(address, size); + + bool pass = (seed_ % kPassProbability) != 0; + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + return pass; + } + + private: + const int kPassProbability; + uint32_t seed_; +}; + +class Redirection; + +class Simulator : public DecoderVisitor { + friend class AutoLockSimulatorCache; + + public: + explicit Simulator(Decoder* decoder, FILE* stream = stdout); + ~Simulator(); + + // Moz changes. + void init(Decoder* decoder, FILE* stream); + static Simulator* Current(); + static Simulator* Create(JSContext* cx); + static void Destroy(Simulator* sim); + uintptr_t stackLimit() const; + uintptr_t* addressOfStackLimit(); + bool overRecursed(uintptr_t newsp = 0) const; + bool overRecursedWithExtra(uint32_t extra) const; + int64_t call(uint8_t* entry, int argument_count, ...); + void setRedirection(Redirection* redirection); + Redirection* redirection() const; + static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type); + void setGPR32Result(int32_t result); + void setGPR64Result(int64_t result); + void setFP32Result(float result); + void setFP64Result(double result); + void VisitCallRedirection(const Instruction* instr); + static inline uintptr_t StackLimit() { + return Simulator::Current()->stackLimit(); + } + + void ResetState(); + + // Run the simulator. + virtual void Run(); + void RunFrom(const Instruction* first); + + // Simulation helpers. + const Instruction* pc() const { return pc_; } + const Instruction* get_pc() const { return pc_; } + + template <typename T> + T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); } + + void set_pc(const Instruction* new_pc) { + pc_ = Memory::AddressUntag(new_pc); + pc_modified_ = true; + } + + void set_resume_pc(void* new_resume_pc); + + void increment_pc() { + if (!pc_modified_) { + pc_ = pc_->NextInstruction(); + } + + pc_modified_ = false; + } + + void ExecuteInstruction(); + + // Declare all Visitor functions. + #define DECLARE(A) virtual void Visit##A(const Instruction* instr); + VISITOR_LIST_THAT_RETURN(DECLARE) + VISITOR_LIST_THAT_DONT_RETURN(DECLARE) + #undef DECLARE + + + // Integer register accessors. + + // Basic accessor: Read the register as the specified type. + template<typename T> + T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { + VIXL_ASSERT(code < kNumberOfRegisters); + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + T result; + memset(&result, 0, sizeof(result)); + return result; + } + return registers_[code].Get<T>(); + } + + // Common specialized accessors for the reg() template. + int32_t wreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return reg<int32_t>(code, r31mode); + } + + int64_t xreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return reg<int64_t>(code, r31mode); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template<typename T> + T reg(unsigned size, unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + uint64_t raw; + switch (size) { + case kWRegSize: raw = reg<uint32_t>(code, r31mode); break; + case kXRegSize: raw = reg<uint64_t>(code, r31mode); break; + default: + VIXL_UNREACHABLE(); + return 0; + } + + T result; + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + + // Use int64_t by default if T is not specified. + int64_t reg(unsigned size, unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return reg<int64_t>(size, code, r31mode); + } + + enum RegLogMode { + LogRegWrites, + NoRegLog + }; + + // Write 'value' into an integer register. The value is zero-extended. This + // behaviour matches AArch64 register writes. + template<typename T> + void set_reg(unsigned code, T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + VIXL_STATIC_ASSERT((sizeof(T) == kWRegSizeInBytes) || + (sizeof(T) == kXRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfRegisters); + + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + registers_[code].Set(value); + + if (log_mode == LogRegWrites) LogRegister(code, r31mode); + } + + // Common specialized accessors for the set_reg() template. + void set_wreg(unsigned code, int32_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + set_reg(code, value, log_mode, r31mode); + } + + void set_xreg(unsigned code, int64_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + set_reg(code, value, log_mode, r31mode); + } + + // As above, with parameterized size and type. The value is either + // zero-extended or truncated to fit, as required. + template<typename T> + void set_reg(unsigned size, unsigned code, T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + // Zero-extend the input. + uint64_t raw = 0; + VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw)); + memcpy(&raw, &value, sizeof(value)); + + // Write (and possibly truncate) the value. + switch (size) { + case kWRegSize: + set_reg(code, static_cast<uint32_t>(raw), log_mode, r31mode); + break; + case kXRegSize: + set_reg(code, raw, log_mode, r31mode); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + // Common specialized accessors for the set_reg() template. + + // Commonly-used special cases. + template<typename T> + void set_lr(T value) { + set_reg(kLinkRegCode, value); + } + + template<typename T> + void set_sp(T value) { + set_reg(31, value, LogRegWrites, Reg31IsStackPointer); + } + + // Vector register accessors. + // These are equivalent to the integer register accessors, but for vector + // registers. + + // A structure for representing a 128-bit Q register. + struct qreg_t { uint8_t val[kQRegSizeInBytes]; }; + + // Basic accessor: read the register as the specified type. + template<typename T> + T vreg(unsigned code) const { + VIXL_STATIC_ASSERT((sizeof(T) == kBRegSizeInBytes) || + (sizeof(T) == kHRegSizeInBytes) || + (sizeof(T) == kSRegSizeInBytes) || + (sizeof(T) == kDRegSizeInBytes) || + (sizeof(T) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + + return vregisters_[code].Get<T>(); + } + + // Common specialized accessors for the vreg() template. + int8_t breg(unsigned code) const { + return vreg<int8_t>(code); + } + + int16_t hreg(unsigned code) const { + return vreg<int16_t>(code); + } + + float sreg(unsigned code) const { + return vreg<float>(code); + } + + uint32_t sreg_bits(unsigned code) const { + return vreg<uint32_t>(code); + } + + double dreg(unsigned code) const { + return vreg<double>(code); + } + + uint64_t dreg_bits(unsigned code) const { + return vreg<uint64_t>(code); + } + + qreg_t qreg(unsigned code) const { + return vreg<qreg_t>(code); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template<typename T> + T vreg(unsigned size, unsigned code) const { + uint64_t raw = 0; + T result; + + switch (size) { + case kSRegSize: raw = vreg<uint32_t>(code); break; + case kDRegSize: raw = vreg<uint64_t>(code); break; + default: + VIXL_UNREACHABLE(); + break; + } + + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + + inline SimVRegister& vreg(unsigned code) { + return vregisters_[code]; + } + + // Basic accessor: Write the specified value. + template<typename T> + void set_vreg(unsigned code, T value, + RegLogMode log_mode = LogRegWrites) { + VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) || + (sizeof(value) == kHRegSizeInBytes) || + (sizeof(value) == kSRegSizeInBytes) || + (sizeof(value) == kDRegSizeInBytes) || + (sizeof(value) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + vregisters_[code].Set(value); + + if (log_mode == LogRegWrites) { + LogVRegister(code, GetPrintRegisterFormat(value)); + } + } + + // Common specialized accessors for the set_vreg() template. + void set_breg(unsigned code, int8_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_hreg(unsigned code, int16_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_sreg(unsigned code, float value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_sreg_bits(unsigned code, uint32_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_dreg(unsigned code, double value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_dreg_bits(unsigned code, uint64_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_qreg(unsigned code, qreg_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + bool N() const { return nzcv_.N() != 0; } + bool Z() const { return nzcv_.Z() != 0; } + bool C() const { return nzcv_.C() != 0; } + bool V() const { return nzcv_.V() != 0; } + SimSystemRegister& nzcv() { return nzcv_; } + + // TODO: Find a way to make the fpcr_ members return the proper types, so + // these accessors are not necessary. + FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); } + bool DN() { return fpcr_.DN() != 0; } + SimSystemRegister& fpcr() { return fpcr_; } + + // Specify relevant register formats for Print(V)Register and related helpers. + enum PrintRegisterFormat { + // The lane size. + kPrintRegLaneSizeB = 0 << 0, + kPrintRegLaneSizeH = 1 << 0, + kPrintRegLaneSizeS = 2 << 0, + kPrintRegLaneSizeW = kPrintRegLaneSizeS, + kPrintRegLaneSizeD = 3 << 0, + kPrintRegLaneSizeX = kPrintRegLaneSizeD, + kPrintRegLaneSizeQ = 4 << 0, + + kPrintRegLaneSizeOffset = 0, + kPrintRegLaneSizeMask = 7 << 0, + + // The lane count. + kPrintRegAsScalar = 0, + kPrintRegAsDVector = 1 << 3, + kPrintRegAsQVector = 2 << 3, + + kPrintRegAsVectorMask = 3 << 3, + + // Indicate floating-point format lanes. (This flag is only supported for S- + // and D-sized lanes.) + kPrintRegAsFP = 1 << 5, + + // Supported combinations. + + kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar, + kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar, + kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + + kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar, + kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector, + kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector, + kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar, + kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector, + kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector, + kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar, + kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector, + kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector, + kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar, + kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector, + kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar + }; + + unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) { + return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset; + } + + unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) { + if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2; + if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2; + + // Scalar types. + return GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegSizeInBytesLog2(format); + } + + unsigned GetPrintRegLaneCount(PrintRegisterFormat format) { + unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format); + unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format); + VIXL_ASSERT(reg_size_log2 >= lane_size_log2); + return 1 << (reg_size_log2 - lane_size_log2); + } + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size, + unsigned lane_size); + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) { + return GetPrintRegisterFormatForSize(size, size); + } + + PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) { + switch (size) { + default: VIXL_UNREACHABLE(); return kPrintDReg; + case kDRegSizeInBytes: return kPrintDReg; + case kSRegSizeInBytes: return kPrintSReg; + } + } + + PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) { + if ((GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) || + (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) { + return static_cast<PrintRegisterFormat>(format | kPrintRegAsFP); + } + return format; + } + + template<typename T> + PrintRegisterFormat GetPrintRegisterFormat(T value) { + return GetPrintRegisterFormatForSize(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(double value) { + VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(float value) { + VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform); + + // Print all registers of the specified types. + void PrintRegisters(); + void PrintVRegisters(); + void PrintSystemRegisters(); + + // As above, but only print the registers that have been updated. + void PrintWrittenRegisters(); + void PrintWrittenVRegisters(); + + // As above, but respect LOG_REG and LOG_VREG. + void LogWrittenRegisters() { + if (trace_parameters() & LOG_REGS) PrintWrittenRegisters(); + } + void LogWrittenVRegisters() { + if (trace_parameters() & LOG_VREGS) PrintWrittenVRegisters(); + } + void LogAllWrittenRegisters() { + LogWrittenRegisters(); + LogWrittenVRegisters(); + } + + // Print individual register values (after update). + void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer); + void PrintVRegister(unsigned code, PrintRegisterFormat format); + void PrintSystemRegister(SystemRegister id); + + // Like Print* (above), but respect trace_parameters(). + void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) { + if (trace_parameters() & LOG_REGS) PrintRegister(code, r31mode); + } + void LogVRegister(unsigned code, PrintRegisterFormat format) { + if (trace_parameters() & LOG_VREGS) PrintVRegister(code, format); + } + void LogSystemRegister(SystemRegister id) { + if (trace_parameters() & LOG_SYSREGS) PrintSystemRegister(id); + } + + // Print memory accesses. + void PrintRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format); + void PrintWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format); + void PrintVRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane); + void PrintVWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane); + + // Like Print* (above), but respect trace_parameters(). + void LogRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + if (trace_parameters() & LOG_REGS) PrintRead(address, reg_code, format); + } + void LogWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + if (trace_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format); + } + void LogVRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane = 0) { + if (trace_parameters() & LOG_VREGS) { + PrintVRead(address, reg_code, format, lane); + } + } + void LogVWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane = 0) { + if (trace_parameters() & LOG_WRITE) { + PrintVWrite(address, reg_code, format, lane); + } + } + + // Helper functions for register tracing. + void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode, + int size_in_bytes = kXRegSizeInBytes); + void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSizeInBytes, + int lsb = 0); + void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes, + int lane_count = 1, int rightmost_lane = 0); + + void DoUnreachable(const Instruction* instr); + void DoTrace(const Instruction* instr); + void DoLog(const Instruction* instr); + + static const char* WRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* XRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* SRegNameForCode(unsigned code); + static const char* DRegNameForCode(unsigned code); + static const char* VRegNameForCode(unsigned code); + + bool coloured_trace() const { return coloured_trace_; } + void set_coloured_trace(bool value); + + int trace_parameters() const { return trace_parameters_; } + void set_trace_parameters(int parameters); + + void set_instruction_stats(bool value); + + // Clear the simulated local monitor to force the next store-exclusive + // instruction to fail. + void ClearLocalMonitor() { + local_monitor_.Clear(); + } + + void SilenceExclusiveAccessWarning() { + print_exclusive_access_warning_ = false; + } + + protected: + const char* clr_normal; + const char* clr_flag_name; + const char* clr_flag_value; + const char* clr_reg_name; + const char* clr_reg_value; + const char* clr_vreg_name; + const char* clr_vreg_value; + const char* clr_memory_address; + const char* clr_warning; + const char* clr_warning_message; + const char* clr_printf; + + // Simulation helpers ------------------------------------ + bool ConditionPassed(Condition cond) { + switch (cond) { + case eq: + return Z(); + case ne: + return !Z(); + case hs: + return C(); + case lo: + return !C(); + case mi: + return N(); + case pl: + return !N(); + case vs: + return V(); + case vc: + return !V(); + case hi: + return C() && !Z(); + case ls: + return !(C() && !Z()); + case ge: + return N() == V(); + case lt: + return N() != V(); + case gt: + return !Z() && (N() == V()); + case le: + return !(!Z() && (N() == V())); + case nv: + VIXL_FALLTHROUGH(); + case al: + return true; + default: + VIXL_UNREACHABLE(); + return false; + } + } + + bool ConditionPassed(Instr cond) { + return ConditionPassed(static_cast<Condition>(cond)); + } + + bool ConditionFailed(Condition cond) { + return !ConditionPassed(cond); + } + + void AddSubHelper(const Instruction* instr, int64_t op2); + uint64_t AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in = 0); + void LogicalHelper(const Instruction* instr, int64_t op2); + void ConditionalCompareHelper(const Instruction* instr, int64_t op2); + void LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode); + void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode); + uintptr_t AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode); + void NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode); + void NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode); + + uint64_t AddressUntag(uint64_t address) { + return address & ~kAddressTagMask; + } + + template <typename T> + T* AddressUntag(T* address) { + uintptr_t address_raw = reinterpret_cast<uintptr_t>(address); + return reinterpret_cast<T*>(AddressUntag(address_raw)); + } + + int64_t ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount); + int64_t Rotate(unsigned reg_width, + int64_t value, + Shift shift_type, + unsigned amount); + int64_t ExtendValue(unsigned reg_width, + int64_t value, + Extend extend_type, + unsigned left_shift = 0); + uint16_t PolynomialMult(uint8_t op1, uint8_t op2); + + void ld1(VectorFormat vform, + LogicVRegister dst, + uint64_t addr); + void ld1(VectorFormat vform, + LogicVRegister dst, + int index, + uint64_t addr); + void ld1r(VectorFormat vform, + LogicVRegister dst, + uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr); + void ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr); + void ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr); + void ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void st1(VectorFormat vform, + LogicVRegister src, + uint64_t addr); + void st1(VectorFormat vform, + LogicVRegister src, + int index, + uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + int index, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + int index, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + int index, + uint64_t addr); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond); + LogicVRegister cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize); + LogicVRegister rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate); + LogicVRegister saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index); + LogicVRegister ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm); + LogicVRegister dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index); + LogicVRegister dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm); + LogicVRegister movi(VectorFormat vform, + LogicVRegister dst, + uint64_t imm); + LogicVRegister mvni(VectorFormat vform, + LogicVRegister dst, + uint64_t imm); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmaxp(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + bool max); + LogicVRegister smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxp(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + bool max); + LogicVRegister umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister extractnarrow(VectorFormat vform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned); + LogicVRegister xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned); + LogicVRegister saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + #define NEON_3VREG_LOGIC_LIST(V) \ + V(addhn) \ + V(addhn2) \ + V(raddhn) \ + V(raddhn2) \ + V(subhn) \ + V(subhn2) \ + V(rsubhn) \ + V(rsubhn2) \ + V(pmull) \ + V(pmull2) \ + V(sabal) \ + V(sabal2) \ + V(uabal) \ + V(uabal2) \ + V(sabdl) \ + V(sabdl2) \ + V(uabdl) \ + V(uabdl2) \ + V(smull) \ + V(smull2) \ + V(umull) \ + V(umull2) \ + V(smlal) \ + V(smlal2) \ + V(umlal) \ + V(umlal2) \ + V(smlsl) \ + V(smlsl2) \ + V(umlsl) \ + V(umlsl2) \ + V(sqdmlal) \ + V(sqdmlal2) \ + V(sqdmlsl) \ + V(sqdmlsl2) \ + V(sqdmull) \ + V(sqdmull2) + + #define DEFINE_LOGIC_FUNC(FXN) \ + LogicVRegister FXN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC) + #undef DEFINE_LOGIC_FUNC + + #define NEON_FP3SAME_LIST(V) \ + V(fadd, FPAdd, false) \ + V(fsub, FPSub, true) \ + V(fmul, FPMul, true) \ + V(fmulx, FPMulx, true) \ + V(fdiv, FPDiv, true) \ + V(fmax, FPMax, false) \ + V(fmin, FPMin, false) \ + V(fmaxnm, FPMaxNM, false) \ + V(fminnm, FPMinNM, false) + + #define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template <typename T> \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP) + #undef DECLARE_NEON_FP_VECTOR_OP + + #define NEON_FPPAIRWISE_LIST(V) \ + V(faddp, fadd, FPAdd) \ + V(fmaxp, fmax, FPMax) \ + V(fmaxnmp, fmaxnm, FPMaxNM) \ + V(fminp, fmin, FPMin) \ + V(fminnmp, fminnm, FPMinNM) + + #define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src); + NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP) + #undef DECLARE_NEON_FP_PAIR_OP + + template <typename T> + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template <typename T> + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template <typename T> + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template <typename T> + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + template <typename T> + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond); + + template <typename T> + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template <typename T> + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template <typename T> + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception = false); + LogicVRegister fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding); + LogicVRegister ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + typedef float (Simulator::*FPMinMaxOp)(float a, float b); + + LogicVRegister fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPMinMaxOp Op); + + LogicVRegister fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + static const uint32_t CRC32_POLY = 0x04C11DB7; + static const uint32_t CRC32C_POLY = 0x1EDC6F41; + uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly); + template <typename T> + uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly); + uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly); + + void SysOp_W(int op, int64_t val); + + template <typename T> + T FPRecipSqrtEstimate(T op); + template <typename T> + T FPRecipEstimate(T op, FPRounding rounding); + template <typename T, typename R> + R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding); + + void FPCompare(double val0, double val1, FPTrapFlags trap); + double FPRoundInt(double value, FPRounding round_mode); + double FPToDouble(float value); + float FPToFloat(double value, FPRounding round_mode); + float FPToFloat(float16 value); + float16 FPToFloat16(float value, FPRounding round_mode); + float16 FPToFloat16(double value, FPRounding round_mode); + double recip_sqrt_estimate(double a); + double recip_estimate(double a); + double FPRecipSqrtEstimate(double a); + double FPRecipEstimate(double a); + double FixedToDouble(int64_t src, int fbits, FPRounding round_mode); + double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode); + float FixedToFloat(int64_t src, int fbits, FPRounding round_mode); + float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode); + int32_t FPToInt32(double value, FPRounding rmode); + int64_t FPToInt64(double value, FPRounding rmode); + uint32_t FPToUInt32(double value, FPRounding rmode); + uint64_t FPToUInt64(double value, FPRounding rmode); + + template <typename T> + T FPAdd(T op1, T op2); + + template <typename T> + T FPDiv(T op1, T op2); + + template <typename T> + T FPMax(T a, T b); + + template <typename T> + T FPMaxNM(T a, T b); + + template <typename T> + T FPMin(T a, T b); + + template <typename T> + T FPMinNM(T a, T b); + + template <typename T> + T FPMul(T op1, T op2); + + template <typename T> + T FPMulx(T op1, T op2); + + template <typename T> + T FPMulAdd(T a, T op1, T op2); + + template <typename T> + T FPSqrt(T op); + + template <typename T> + T FPSub(T op1, T op2); + + template <typename T> + T FPRecipStepFused(T op1, T op2); + + template <typename T> + T FPRSqrtStepFused(T op1, T op2); + + // This doesn't do anything at the moment. We'll need it if we want support + // for cumulative exception bits or floating-point exceptions. + void FPProcessException() { } + + bool FPProcessNaNs(const Instruction* instr); + + // Pseudo Printf instruction + void DoPrintf(const Instruction* instr); + + // Processor state --------------------------------------- + + // Simulated monitors for exclusive access instructions. + SimExclusiveLocalMonitor local_monitor_; + SimExclusiveGlobalMonitor global_monitor_; + + // Output stream. + FILE* stream_; + PrintDisassembler* print_disasm_; + + // Instruction statistics instrumentation. + Instrument* instrumentation_; + + // General purpose registers. Register 31 is the stack pointer. + SimRegister registers_[kNumberOfRegisters]; + + // Vector registers + SimVRegister vregisters_[kNumberOfVRegisters]; + + // Program Status Register. + // bits[31, 27]: Condition flags N, Z, C, and V. + // (Negative, Zero, Carry, Overflow) + SimSystemRegister nzcv_; + + // Floating-Point Control Register + SimSystemRegister fpcr_; + + // Only a subset of FPCR features are supported by the simulator. This helper + // checks that the FPCR settings are supported. + // + // This is checked when floating-point instructions are executed, not when + // FPCR is set. This allows generated code to modify FPCR for external + // functions, or to save and restore it when entering and leaving generated + // code. + void AssertSupportedFPCR() { + VIXL_ASSERT(fpcr().FZ() == 0); // No flush-to-zero support. + VIXL_ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only. + + // The simulator does not support half-precision operations so fpcr().AHP() + // is irrelevant, and is not checked here. + } + + static int CalcNFlag(uint64_t result, unsigned reg_size) { + return (result >> (reg_size - 1)) & 1; + } + + static int CalcZFlag(uint64_t result) { + return (result == 0) ? 1 : 0; + } + + static const uint32_t kConditionFlagsMask = 0xf0000000; + + // Stack + byte* stack_; + static const int stack_protection_size_ = 128 * KBytes; + static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_); + byte* stack_limit_; + + Decoder* decoder_; + // Indicates if the pc has been modified by the instruction and should not be + // automatically incremented. + bool pc_modified_; + const Instruction* pc_; + const Instruction* resume_pc_; + + static const char* xreg_names[]; + static const char* wreg_names[]; + static const char* sreg_names[]; + static const char* dreg_names[]; + static const char* vreg_names[]; + + static const Instruction* kEndOfSimAddress; + + private: + template <typename T> + static T FPDefaultNaN(); + + // Standard NaN processing. + template <typename T> + T FPProcessNaN(T op) { + VIXL_ASSERT(std::isnan(op)); + if (IsSignallingNaN(op)) { + FPProcessException(); + } + return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op); + } + + template <typename T> + T FPProcessNaNs(T op1, T op2) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (std::isnan(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (std::isnan(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else { + return 0.0; + } + } + + template <typename T> + T FPProcessNaNs3(T op1, T op2, T op3) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsSignallingNaN(op3)) { + return FPProcessNaN(op3); + } else if (std::isnan(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (std::isnan(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else if (std::isnan(op3)) { + VIXL_ASSERT(IsQuietNaN(op3)); + return FPProcessNaN(op3); + } else { + return 0.0; + } + } + + bool coloured_trace_; + + // A set of TraceParameters flags. + int trace_parameters_; + + // Indicates whether the instruction instrumentation is active. + bool instruction_stats_; + + // Indicates whether the exclusive-access warning has been printed. + bool print_exclusive_access_warning_; + void PrintExclusiveAccessWarning(); + + // Indicates that the simulator ran out of memory at some point. + // Data structures may not be fully allocated. + bool oom_; + + public: + // True if the simulator ran out of memory during or after construction. + bool oom() const { return oom_; } + + protected: + // Moz: Synchronizes access between main thread and compilation threads. + js::Mutex lock_; + Redirection* redirection_; + mozilla::Vector<int64_t, 0, js::SystemAllocPolicy> spStack_; +}; +} // namespace vixl + +#endif // JS_SIMULATOR_ARM64 +#endif // VIXL_A64_SIMULATOR_A64_H_ diff --git a/js/src/jit/arm64/vixl/Utils-vixl.cpp b/js/src/jit/arm64/vixl/Utils-vixl.cpp new file mode 100644 index 000000000..7af311bee --- /dev/null +++ b/js/src/jit/arm64/vixl/Utils-vixl.cpp @@ -0,0 +1,145 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/arm64/vixl/Utils-vixl.h" + +#include "mozilla/MathAlgorithms.h" + +#include <stdio.h> + +namespace vixl { + +uint32_t float_to_rawbits(float value) { + uint32_t bits = 0; + memcpy(&bits, &value, 4); + return bits; +} + + +uint64_t double_to_rawbits(double value) { + uint64_t bits = 0; + memcpy(&bits, &value, 8); + return bits; +} + + +float rawbits_to_float(uint32_t bits) { + float value = 0.0; + memcpy(&value, &bits, 4); + return value; +} + + +double rawbits_to_double(uint64_t bits) { + double value = 0.0; + memcpy(&value, &bits, 8); + return value; +} + + +uint32_t float_sign(float val) { + uint32_t rawbits = float_to_rawbits(val); + return unsigned_bitextract_32(31, 31, rawbits); +} + + +uint32_t float_exp(float val) { + uint32_t rawbits = float_to_rawbits(val); + return unsigned_bitextract_32(30, 23, rawbits); +} + + +uint32_t float_mantissa(float val) { + uint32_t rawbits = float_to_rawbits(val); + return unsigned_bitextract_32(22, 0, rawbits); +} + + +uint32_t double_sign(double val) { + uint64_t rawbits = double_to_rawbits(val); + return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits)); +} + + +uint32_t double_exp(double val) { + uint64_t rawbits = double_to_rawbits(val); + return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits)); +} + + +uint64_t double_mantissa(double val) { + uint64_t rawbits = double_to_rawbits(val); + return unsigned_bitextract_64(51, 0, rawbits); +} + + +float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) { + uint32_t bits = (sign << 31) | (exp << 23) | mantissa; + return rawbits_to_float(bits); +} + + +double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) { + uint64_t bits = (sign << 63) | (exp << 52) | mantissa; + return rawbits_to_double(bits); +} + + +int float16classify(float16 value) { + uint16_t exponent_max = (1 << 5) - 1; + uint16_t exponent_mask = exponent_max << 10; + uint16_t mantissa_mask = (1 << 10) - 1; + + uint16_t exponent = (value & exponent_mask) >> 10; + uint16_t mantissa = value & mantissa_mask; + if (exponent == 0) { + if (mantissa == 0) { + return FP_ZERO; + } + return FP_SUBNORMAL; + } else if (exponent == exponent_max) { + if (mantissa == 0) { + return FP_INFINITE; + } + return FP_NAN; + } + return FP_NORMAL; +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size % 8) == 0); + int count = 0; + for (unsigned i = 0; i < (reg_size / 16); i++) { + if ((imm & 0xffff) == 0) { + count++; + } + imm >>= 16; + } + return count; +} + +} // namespace vixl diff --git a/js/src/jit/arm64/vixl/Utils-vixl.h b/js/src/jit/arm64/vixl/Utils-vixl.h new file mode 100644 index 000000000..738cfb085 --- /dev/null +++ b/js/src/jit/arm64/vixl/Utils-vixl.h @@ -0,0 +1,286 @@ +// Copyright 2015, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_UTILS_H +#define VIXL_UTILS_H + +#include "mozilla/FloatingPoint.h" + +#include "jit/arm64/vixl/CompilerIntrinsics-vixl.h" +#include "jit/arm64/vixl/Globals-vixl.h" + +namespace vixl { + +// Macros for compile-time format checking. +#if defined(__GNUC__) +#define PRINTF_CHECK(format_index, varargs_index) \ + __attribute__((format(printf, format_index, varargs_index))) +#else +#define PRINTF_CHECK(format_index, varargs_index) +#endif + +// Check number width. +inline bool is_intn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + int64_t limit = INT64_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} + +inline bool is_uintn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return !(x >> n); +} + +inline uint32_t truncate_to_intn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1)); +} + +#define INT_1_TO_63_LIST(V) \ +V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ +V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ +V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ +V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ +V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ +V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ +V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ +V(57) V(58) V(59) V(60) V(61) V(62) V(63) + +#define DECLARE_IS_INT_N(N) \ +inline bool is_int##N(int64_t x) { return is_intn(N, x); } +#define DECLARE_IS_UINT_N(N) \ +inline bool is_uint##N(int64_t x) { return is_uintn(N, x); } +#define DECLARE_TRUNCATE_TO_INT_N(N) \ +inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); } +INT_1_TO_63_LIST(DECLARE_IS_INT_N) +INT_1_TO_63_LIST(DECLARE_IS_UINT_N) +INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) +#undef DECLARE_IS_INT_N +#undef DECLARE_IS_UINT_N +#undef DECLARE_TRUNCATE_TO_INT_N + +// Bit field extraction. +inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { + return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); +} + +inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { + return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); +} + +inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { + return (x << (31 - msb)) >> (lsb + 31 - msb); +} + +inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) { + return (x << (63 - msb)) >> (lsb + 63 - msb); +} + +// Floating point representation. +uint32_t float_to_rawbits(float value); +uint64_t double_to_rawbits(double value); +float rawbits_to_float(uint32_t bits); +double rawbits_to_double(uint64_t bits); + +uint32_t float_sign(float val); +uint32_t float_exp(float val); +uint32_t float_mantissa(float val); +uint32_t double_sign(double val); +uint32_t double_exp(double val); +uint64_t double_mantissa(double val); + +float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa); +double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa); + +// An fpclassify() function for 16-bit half-precision floats. +int float16classify(float16 value); + +// NaN tests. +inline bool IsSignallingNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + uint64_t raw = double_to_rawbits(num); + if (mozilla::IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + uint32_t raw = float_to_rawbits(num); + if (mozilla::IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(float16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + return (float16classify(num) == FP_NAN) && + ((num & kFP16QuietNaNMask) == 0); +} + + +template <typename T> +inline bool IsQuietNaN(T num) { + return mozilla::IsNaN(num) && !IsSignallingNaN(num); +} + + +// Convert the NaN in 'num' to a quiet NaN. +inline double ToQuietNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + VIXL_ASSERT(mozilla::IsNaN(num)); + return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask); +} + + +inline float ToQuietNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + VIXL_ASSERT(mozilla::IsNaN(num)); + return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask); +} + + +// Fused multiply-add. +inline double FusedMultiplyAdd(double op1, double op2, double a) { + return fma(op1, op2, a); +} + + +inline float FusedMultiplyAdd(float op1, float op2, float a) { + return fmaf(op1, op2, a); +} + + +inline uint64_t LowestSetBit(uint64_t value) { + return value & -value; +} + + +template<typename T> +inline int HighestSetBitPosition(T value) { + VIXL_ASSERT(value != 0); + return (sizeof(value) * 8 - 1) - CountLeadingZeros(value); +} + + +template<typename V> +inline int WhichPowerOf2(V value) { + VIXL_ASSERT(IsPowerOf2(value)); + return CountTrailingZeros(value); +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + + +template <typename T> +T ReverseBits(T value) { + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8)); + T result = 0; + for (unsigned i = 0; i < (sizeof(value) * 8); i++) { + result = (result << 1) | (value & 1); + value >>= 1; + } + return result; +} + + +template <typename T> +T ReverseBytes(T value, int block_bytes_log2) { + VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8)); + VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value)); + // Split the 64-bit value into an 8-bit array, where b[0] is the least + // significant byte, and b[7] is the most significant. + uint8_t bytes[8]; + uint64_t mask = 0xff00000000000000; + for (int i = 7; i >= 0; i--) { + bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8); + mask >>= 8; + } + + // Permutation tables for REV instructions. + // permute_table[0] is used by REV16_x, REV16_w + // permute_table[1] is used by REV32_x, REV_w + // permute_table[2] is used by REV_x + VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4)); + static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1}, + {4, 5, 6, 7, 0, 1, 2, 3}, + {0, 1, 2, 3, 4, 5, 6, 7} }; + T result = 0; + for (int i = 0; i < 8; i++) { + result <<= 8; + result |= bytes[permute_table[block_bytes_log2 - 1][i]]; + } + return result; +} + + +// Pointer alignment +// TODO: rename/refactor to make it specific to instructions. +template<typename T> +bool IsWordAligned(T pointer) { + VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) + return ((intptr_t)(pointer) & 3) == 0; +} + +// Increment a pointer (up to 64 bits) until it has the specified alignment. +template<class T> +T AlignUp(T pointer, size_t alignment) { + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uint64_t pointer_raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t align_step = (alignment - pointer_raw) % alignment; + VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); + + return (T)(pointer_raw + align_step); +} + +// Decrement a pointer (up to 64 bits) until it has the specified alignment. +template<class T> +T AlignDown(T pointer, size_t alignment) { + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uint64_t pointer_raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t align_step = pointer_raw % alignment; + VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); + + return (T)(pointer_raw - align_step); +} + +} // namespace vixl + +#endif // VIXL_UTILS_H |