summaryrefslogtreecommitdiffstats
path: root/js/src/jit/x64
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/jit/x64
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/x64')
-rw-r--r--js/src/jit/x64/Assembler-x64.cpp303
-rw-r--r--js/src/jit/x64/Assembler-x64.h1040
-rw-r--r--js/src/jit/x64/Bailouts-x64.cpp75
-rw-r--r--js/src/jit/x64/BaseAssembler-x64.h929
-rw-r--r--js/src/jit/x64/BaselineCompiler-x64.cpp15
-rw-r--r--js/src/jit/x64/BaselineCompiler-x64.h26
-rw-r--r--js/src/jit/x64/BaselineIC-x64.cpp46
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp880
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.h89
-rw-r--r--js/src/jit/x64/LIR-x64.h183
-rw-r--r--js/src/jit/x64/LOpcodes-x64.h23
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp495
-rw-r--r--js/src/jit/x64/Lowering-x64.h80
-rw-r--r--js/src/jit/x64/MacroAssembler-x64-inl.h897
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp859
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.h966
-rw-r--r--js/src/jit/x64/SharedIC-x64.cpp234
-rw-r--r--js/src/jit/x64/SharedICHelpers-x64.h352
-rw-r--r--js/src/jit/x64/SharedICRegisters-x64.h35
-rw-r--r--js/src/jit/x64/Trampoline-x64.cpp1303
20 files changed, 8830 insertions, 0 deletions
diff --git a/js/src/jit/x64/Assembler-x64.cpp b/js/src/jit/x64/Assembler-x64.cpp
new file mode 100644
index 000000000..37f29d009
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -0,0 +1,303 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Assembler-x64.h"
+
+#include "gc/Marking.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ :
+#if defined(XP_WIN)
+ regIndex_(0),
+ stackOffset_(ShadowStackSpace),
+#else
+ intRegIndex_(0),
+ floatRegIndex_(0),
+ stackOffset_(0),
+#endif
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+#if defined(XP_WIN)
+ JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
+ if (regIndex_ == NumIntArgRegs) {
+ if (IsSimdType(type)) {
+ // On Win64, >64 bit args need to be passed by reference, but wasm
+ // doesn't allow passing SIMD values to FFIs. The only way to reach
+ // here is asm to asm calls, so we can break the ABI here.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ } else {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ }
+ return current_;
+ }
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ current_ = ABIArg(IntArgRegs[regIndex_++]);
+ break;
+ case MIRType::Float32:
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
+ break;
+ case MIRType::Double:
+ current_ = ABIArg(FloatArgRegs[regIndex_++]);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ // On Win64, >64 bit args need to be passed by reference, but wasm
+ // doesn't allow passing SIMD values to FFIs. The only way to reach
+ // here is asm to asm calls, so we can break the ABI here.
+ current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#else
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ current_ = ABIArg(IntArgRegs[intRegIndex_++]);
+ break;
+ case MIRType::Double:
+ case MIRType::Float32:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ }
+ if (type == MIRType::Float32)
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
+ else
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ }
+ current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+#endif
+}
+
+void
+Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
+{
+ if (!jumpRelocations_.length()) {
+ // The jump relocation table starts with a fixed-width integer pointing
+ // to the start of the extended jump table. But, we don't know the
+ // actual extended jump table offset yet, so write a 0 which we'll
+ // patch later.
+ jumpRelocations_.writeFixedUint32_t(0);
+ }
+ if (reloc == Relocation::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ jumpRelocations_.writeUnsigned(jumps_.length());
+ }
+}
+
+void
+Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
+{
+ MOZ_ASSERT(target.value != nullptr);
+
+ // Emit reloc before modifying the jump table, since it computes a 0-based
+ // index. This jump is not patchable at runtime.
+ if (reloc == Relocation::JITCODE)
+ writeRelocation(src, reloc);
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
+}
+
+size_t
+Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
+{
+ // This jump is patchable at runtime so we always need to make sure the
+ // jump table is emitted.
+ writeRelocation(src, reloc);
+
+ size_t index = jumps_.length();
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
+ return index;
+}
+
+/* static */
+uint8_t*
+Assembler::PatchableJumpAddress(JitCode* code, size_t index)
+{
+ // The assembler stashed the offset into the code of the fragments used
+ // for far jumps at the start of the relocation table.
+ uint32_t jumpOffset = * (uint32_t*) code->jumpRelocTable();
+ jumpOffset += index * SizeOfJumpTableEntry;
+
+ MOZ_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
+ return code->raw() + jumpOffset;
+}
+
+/* static */
+void
+Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect)
+{
+ uint8_t** index = (uint8_t**) (entry + SizeOfExtendedJump - sizeof(void*));
+ MaybeAutoWritableJitCode awjc(index, sizeof(void*), reprotect);
+ *index = target;
+}
+
+void
+Assembler::finish()
+{
+ if (!jumps_.length() || oom())
+ return;
+
+ // Emit the jump table.
+ masm.haltingAlign(SizeOfJumpTableEntry);
+ extendedJumpTable_ = masm.size();
+
+ // Now that we know the offset to the jump table, squirrel it into the
+ // jump relocation buffer if any JitCode references exist and must be
+ // tracked for GC.
+ MOZ_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
+ if (jumpRelocations_.length())
+ *(uint32_t*)jumpRelocations_.buffer() = extendedJumpTable_;
+
+ // Zero the extended jumps table.
+ for (size_t i = 0; i < jumps_.length(); i++) {
+#ifdef DEBUG
+ size_t oldSize = masm.size();
+#endif
+ masm.jmp_rip(2);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 6);
+ // Following an indirect branch with ud2 hints to the hardware that
+ // there's no fall-through. This also aligns the 64-bit immediate.
+ masm.ud2();
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8);
+ masm.immediate64(0);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump);
+ MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry);
+ }
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ AssemblerX86Shared::executableCopy(buffer);
+
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ uint8_t* src = buffer + rp.offset;
+ if (!rp.target) {
+ // The patch target is nullptr for jumps that have been linked to
+ // a label within the same code block, but may be repatched later
+ // to jump to a different code block.
+ continue;
+ }
+ if (X86Encoding::CanRelinkJump(src, rp.target)) {
+ X86Encoding::SetRel32(src, rp.target);
+ } else {
+ // An extended jump table must exist, and its offset must be in
+ // range.
+ MOZ_ASSERT(extendedJumpTable_);
+ MOZ_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
+
+ // Patch the jump to go to the extended jump entry.
+ uint8_t* entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
+ X86Encoding::SetRel32(src, entry);
+
+ // Now patch the pointer, note that we need to align it to
+ // *after* the extended jump, i.e. after the 64-bit immedate.
+ X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
+ }
+ }
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ uint32_t tableStart_;
+ uint32_t offset_;
+ uint32_t extOffset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ {
+ tableStart_ = reader_.readFixedUint32_t();
+ }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ extOffset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+ uint32_t extendedOffset() const {
+ return extOffset_;
+ }
+};
+
+JitCode*
+Assembler::CodeFromJump(JitCode* code, uint8_t* jump)
+{
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
+ // This jump is within the code buffer, so it has been redirected to
+ // the extended jump table.
+ MOZ_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
+
+ target = (uint8_t*)X86Encoding::GetPointer(target + SizeOfExtendedJump);
+ }
+
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
new file mode 100644
index 000000000..30e384158
--- /dev/null
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -0,0 +1,1040 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Assembler_x64_h
+#define jit_x64_Assembler_x64_h
+
+#include "mozilla/ArrayUtils.h"
+
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register rax = { X86Encoding::rax };
+static constexpr Register rbx = { X86Encoding::rbx };
+static constexpr Register rcx = { X86Encoding::rcx };
+static constexpr Register rdx = { X86Encoding::rdx };
+static constexpr Register rsi = { X86Encoding::rsi };
+static constexpr Register rdi = { X86Encoding::rdi };
+static constexpr Register rbp = { X86Encoding::rbp };
+static constexpr Register r8 = { X86Encoding::r8 };
+static constexpr Register r9 = { X86Encoding::r9 };
+static constexpr Register r10 = { X86Encoding::r10 };
+static constexpr Register r11 = { X86Encoding::r11 };
+static constexpr Register r12 = { X86Encoding::r12 };
+static constexpr Register r13 = { X86Encoding::r13 };
+static constexpr Register r14 = { X86Encoding::r14 };
+static constexpr Register r15 = { X86Encoding::r15 };
+static constexpr Register rsp = { X86Encoding::rsp };
+
+static constexpr FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister xmm8 = FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
+static constexpr FloatRegister xmm9 = FloatRegister(X86Encoding::xmm9, FloatRegisters::Double);
+static constexpr FloatRegister xmm10 = FloatRegister(X86Encoding::xmm10, FloatRegisters::Double);
+static constexpr FloatRegister xmm11 = FloatRegister(X86Encoding::xmm11, FloatRegisters::Double);
+static constexpr FloatRegister xmm12 = FloatRegister(X86Encoding::xmm12, FloatRegisters::Double);
+static constexpr FloatRegister xmm13 = FloatRegister(X86Encoding::xmm13, FloatRegisters::Double);
+static constexpr FloatRegister xmm14 = FloatRegister(X86Encoding::xmm14, FloatRegisters::Double);
+static constexpr FloatRegister xmm15 = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+
+// X86-common synonyms.
+static constexpr Register eax = rax;
+static constexpr Register ebx = rbx;
+static constexpr Register ecx = rcx;
+static constexpr Register edx = rdx;
+static constexpr Register esi = rsi;
+static constexpr Register edi = rdi;
+static constexpr Register ebp = rbp;
+static constexpr Register esp = rsp;
+
+static constexpr Register InvalidReg = { X86Encoding::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register StackPointer = rsp;
+static constexpr Register FramePointer = rbp;
+static constexpr Register JSReturnReg = rcx;
+// Avoid, except for assertions.
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+
+static constexpr Register ScratchReg = r11;
+
+// Helper class for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of the scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope
+{
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchReg)
+ { }
+};
+
+static constexpr Register ReturnReg = rax;
+static constexpr Register HeapReg = r15;
+static constexpr Register64 ReturnReg64(rax);
+static constexpr FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg = xmm15;
+
+// Avoid rbp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register ArgumentsRectifierReg = r8;
+static constexpr Register CallTempReg0 = rax;
+static constexpr Register CallTempReg1 = rdi;
+static constexpr Register CallTempReg2 = rbx;
+static constexpr Register CallTempReg3 = rcx;
+static constexpr Register CallTempReg4 = rsi;
+static constexpr Register CallTempReg5 = rdx;
+
+// Different argument registers for WIN64
+#if defined(_WIN64)
+static constexpr Register IntArgReg0 = rcx;
+static constexpr Register IntArgReg1 = rdx;
+static constexpr Register IntArgReg2 = r8;
+static constexpr Register IntArgReg3 = r9;
+static constexpr uint32_t NumIntArgRegs = 4;
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register IntArgRegs[NumIntArgRegs] = { rcx, rdx, r8, r9 };
+
+static const Register CallTempNonArgRegs[] = { rax, rdi, rbx, rsi };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static const uint32_t NumFloatArgRegs = 4;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3 };
+#else
+static constexpr Register IntArgReg0 = rdi;
+static constexpr Register IntArgReg1 = rsi;
+static constexpr Register IntArgReg2 = rdx;
+static constexpr Register IntArgReg3 = rcx;
+static constexpr Register IntArgReg4 = r8;
+static constexpr Register IntArgReg5 = r9;
+static constexpr uint32_t NumIntArgRegs = 6;
+static const Register IntArgRegs[NumIntArgRegs] = { rdi, rsi, rdx, rcx, r8, r9 };
+
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register CallTempNonArgRegs[] = { rax, rbx };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+static constexpr FloatRegister FloatArgReg0 = xmm0;
+static constexpr FloatRegister FloatArgReg1 = xmm1;
+static constexpr FloatRegister FloatArgReg2 = xmm2;
+static constexpr FloatRegister FloatArgReg3 = xmm3;
+static constexpr FloatRegister FloatArgReg4 = xmm4;
+static constexpr FloatRegister FloatArgReg5 = xmm5;
+static constexpr FloatRegister FloatArgReg6 = xmm6;
+static constexpr FloatRegister FloatArgReg7 = xmm7;
+static constexpr uint32_t NumFloatArgRegs = 8;
+static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 };
+#endif
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = r10;
+static constexpr Register WasmIonExitRegE0 = rax;
+static constexpr Register WasmIonExitRegE1 = rdi;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+static constexpr Register WasmIonExitRegReturnData = ecx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = rax;
+static constexpr Register WasmIonExitRegD1 = rdi;
+static constexpr Register WasmIonExitRegD2 = rbx;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg1;
+static constexpr Register RegExpTesterStringReg = CallTempReg2;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
+
+class ABIArgGenerator
+{
+#if defined(XP_WIN)
+ unsigned regIndex_;
+#else
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+#endif
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+};
+
+// Avoid r11, which is the MacroAssembler's ScratchReg.
+static constexpr Register ABINonArgReg0 = rax;
+static constexpr Register ABINonArgReg1 = rbx;
+static constexpr Register ABINonArgReg2 = r10;
+
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = r10;
+static constexpr Register ABINonArgReturnReg1 = r12;
+static constexpr Register ABINonVolatileReg = r13;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = r14;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register OsrFrameReg = IntArgReg3;
+
+static constexpr Register PreBarrierReg = rdx;
+
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = true;
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments which are used for "
+ "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+static const Scale ScalePointer = TimesEight;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+// Return operand from a JS -> JS call.
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+class Assembler : public AssemblerX86Shared
+{
+ // x64 jumps may need extra bits of relocation, because a jump may extend
+ // beyond the signed 32-bit range. To account for this we add an extended
+ // jump table at the bottom of the instruction stream, and if a jump
+ // overflows its range, it will redirect here.
+ //
+ // In our relocation table, we store two offsets instead of one: the offset
+ // to the original jump, and an offset to the extended jump if we will need
+ // to use it instead. The offsets are stored as:
+ // [unsigned] Unsigned offset to short jump, from the start of the code.
+ // [unsigned] Unsigned offset to the extended jump, from the start of
+ // the jump table, in units of SizeOfJumpTableEntry.
+ //
+ // The start of the relocation table contains the offset from the code
+ // buffer to the start of the extended jump table.
+ //
+ // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the
+ // hardware branch predictor that there is no fallthrough, followed by the
+ // eight bytes containing an immediate address. This comes out to 16 bytes.
+ // +1 byte for opcode
+ // +1 byte for mod r/m
+ // +4 bytes for rip-relative offset (2)
+ // +2 bytes for ud2 instruction
+ // +8 bytes for 64-bit address
+ //
+ static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
+ static const uint32_t SizeOfJumpTableEntry = 16;
+
+ uint32_t extendedJumpTable_;
+
+ static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
+
+ private:
+ void writeRelocation(JmpSrc src, Relocation::Kind reloc);
+ void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc);
+
+ protected:
+ size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc);
+
+ public:
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::pop;
+ using AssemblerX86Shared::vmovq;
+
+ static uint8_t* PatchableJumpAddress(JitCode* code, size_t index);
+ static void PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect);
+
+ Assembler()
+ : extendedJumpTable_(0)
+ {
+ }
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ void push(const ImmGCPtr ptr) {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ void push(const ImmWord ptr) {
+ // We often end up with ImmWords that actually fit into int32.
+ // Be aware of the sign extension behavior.
+ if (ptr.value <= INT32_MAX) {
+ push(Imm32(ptr.value));
+ } else {
+ movq(ptr, ScratchReg);
+ push(ScratchReg);
+ }
+ }
+ void push(ImmPtr imm) {
+ push(ImmWord(uintptr_t(imm.value)));
+ }
+ void push(FloatRegister src) {
+ subq(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+ CodeOffset pushWithPatch(ImmWord word) {
+ CodeOffset label = movWithPatch(word, ScratchReg);
+ push(ScratchReg);
+ return label;
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addq(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ masm.movq_i64r(word.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ // Load an ImmWord value into a register. Note that this instruction will
+ // attempt to optimize its immediate field size. When a full 64-bit
+ // immediate is needed for a relocation, use movWithPatch.
+ void movq(ImmWord word, Register dest) {
+ // Load a 64-bit immediate into a register. If the value falls into
+ // certain ranges, we can use specialized instructions which have
+ // smaller encodings.
+ if (word.value <= UINT32_MAX) {
+ // movl has a 32-bit unsigned (effectively) immediate field.
+ masm.movl_i32r((uint32_t)word.value, dest.encoding());
+ } else if ((intptr_t)word.value >= INT32_MIN && (intptr_t)word.value <= INT32_MAX) {
+ // movq has a 32-bit signed immediate field.
+ masm.movq_i32r((int32_t)(intptr_t)word.value, dest.encoding());
+ } else {
+ // Otherwise use movabs.
+ masm.movq_i64r(word.value, dest.encoding());
+ }
+ }
+ void movq(ImmPtr imm, Register dest) {
+ movq(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void movq(ImmGCPtr ptr, Register dest) {
+ masm.movq_i64r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movq(Imm32 imm32, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(imm32.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(), dest.scale());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movq_i32m(imm32.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void vmovq(Register src, FloatRegister dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void vmovq(FloatRegister src, Register dest) {
+ masm.vmovq_rr(src.encoding(), dest.encoding());
+ }
+ void movq(Register src, Register dest) {
+ masm.movq_rr(src.encoding(), dest.encoding());
+ }
+
+ void cmovzq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.cmovzq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmovzq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.cmovzq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void xchgq(Register src, Register dest) {
+ masm.xchgq_rr(src.encoding(), dest.encoding());
+ }
+
+ void movsbq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movsbq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzbq(const Operand& src, Register dest) {
+ // movzbl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzbl(src, dest);
+ }
+
+ void movswq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movswq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void movzwq(const Operand& src, Register dest) {
+ // movzwl zero-extends to 64 bits and is one byte smaller, so use that
+ // instead.
+ movzwl(src, dest);
+ }
+
+ void movslq(Register src, Register dest) {
+ masm.movslq_rr(src.encoding(), dest.encoding());
+ }
+ void movslq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.movslq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movslq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.movslq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void andq(Register src, Register dest) {
+ masm.andq_rr(src.encoding(), dest.encoding());
+ }
+ void andq(Imm32 imm, Register dest) {
+ masm.andq_ir(imm.value, dest.encoding());
+ }
+ void andq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.andq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.andq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.andq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.andq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void addq(Imm32 imm, Register dest) {
+ masm.addq_ir(imm.value, dest.encoding());
+ }
+ void addq(Imm32 imm, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.addq_ir(imm.value, dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_im(imm.value, dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_im(imm.value, dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void addq(Register src, Register dest) {
+ masm.addq_rr(src.encoding(), dest.encoding());
+ }
+ void addq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.addq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.addq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.addq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void subq(Imm32 imm, Register dest) {
+ masm.subq_ir(imm.value, dest.encoding());
+ }
+ void subq(Register src, Register dest) {
+ masm.subq_rr(src.encoding(), dest.encoding());
+ }
+ void subq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.subq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void subq(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.subq_rr(src.encoding(), dest.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.subq_rm(src.encoding(), dest.disp(), dest.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void shlq(Imm32 imm, Register dest) {
+ masm.shlq_ir(imm.value, dest.encoding());
+ }
+ void shrq(Imm32 imm, Register dest) {
+ masm.shrq_ir(imm.value, dest.encoding());
+ }
+ void sarq(Imm32 imm, Register dest) {
+ masm.sarq_ir(imm.value, dest.encoding());
+ }
+ void shlq_cl(Register dest) {
+ masm.shlq_CLr(dest.encoding());
+ }
+ void shrq_cl(Register dest) {
+ masm.shrq_CLr(dest.encoding());
+ }
+ void sarq_cl(Register dest) {
+ masm.sarq_CLr(dest.encoding());
+ }
+ void rolq(Imm32 imm, Register dest) {
+ masm.rolq_ir(imm.value, dest.encoding());
+ }
+ void rolq_cl(Register dest) {
+ masm.rolq_CLr(dest.encoding());
+ }
+ void rorq(Imm32 imm, Register dest) {
+ masm.rorq_ir(imm.value, dest.encoding());
+ }
+ void rorq_cl(Register dest) {
+ masm.rorq_CLr(dest.encoding());
+ }
+ void orq(Imm32 imm, Register dest) {
+ masm.orq_ir(imm.value, dest.encoding());
+ }
+ void orq(Register src, Register dest) {
+ masm.orq_rr(src.encoding(), dest.encoding());
+ }
+ void orq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.orq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.orq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.orq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void xorq(Register src, Register dest) {
+ masm.xorq_rr(src.encoding(), dest.encoding());
+ }
+ void xorq(Imm32 imm, Register dest) {
+ masm.xorq_ir(imm.value, dest.encoding());
+ }
+ void xorq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.xorq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.xorq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.xorq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void bsrq(const Register& src, const Register& dest) {
+ masm.bsrq_rr(src.encoding(), dest.encoding());
+ }
+ void bsfq(const Register& src, const Register& dest) {
+ masm.bsfq_rr(src.encoding(), dest.encoding());
+ }
+ void popcntq(const Register& src, const Register& dest) {
+ masm.popcntq_rr(src.encoding(), dest.encoding());
+ }
+
+ void imulq(Register src, Register dest) {
+ masm.imulq_rr(src.encoding(), dest.encoding());
+ }
+ void imulq(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::REG:
+ masm.imulq_rr(src.reg(), dest.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.imulq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ MOZ_CRASH("NYI");
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void cqo() {
+ masm.cqo();
+ }
+ void idivq(Register divisor) {
+ masm.idivq_r(divisor.encoding());
+ }
+ void udivq(Register divisor) {
+ masm.divq_r(divisor.encoding());
+ }
+
+ void vcvtsi2sdq(Register src, FloatRegister dest) {
+ masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
+ }
+
+ void negq(Register reg) {
+ masm.negq_r(reg.encoding());
+ }
+
+ void mov(ImmWord word, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though. Use xorl instead of xorq since they are functionally
+ // equivalent (32-bit instructions zero-extend their results to 64 bits)
+ // and xorl has a smaller encoding.
+ if (word.value == 0)
+ xorl(dest, dest);
+ else
+ movq(word, dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ movq(imm, dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movq_i64r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) {
+ movq(src, dest);
+ }
+ void mov(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void mov(Imm32 imm32, const Operand& dest) {
+ movq(imm32, dest);
+ }
+ void mov(Register src, Register dest) {
+ movq(src, dest);
+ }
+ void mov(CodeOffset* label, Register dest) {
+ masm.movq_i64r(/* placeholder */ 0, dest.encoding());
+ label->bind(masm.size());
+ }
+ void xchg(Register src, Register dest) {
+ xchgq(src, dest);
+ }
+ void lea(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.leaq_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexepcted operand kind");
+ }
+ }
+
+ CodeOffset loadRipRelativeInt32(Register dest) {
+ return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt64(Register dest) {
+ return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeDouble(FloatRegister dest) {
+ return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
+ return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
+ }
+ CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt32(Register dest) {
+ return CodeOffset(masm.movl_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt64(Register dest) {
+ return CodeOffset(masm.movq_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeDouble(FloatRegister dest) {
+ return CodeOffset(masm.vmovsd_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeFloat32(FloatRegister dest) {
+ return CodeOffset(masm.vmovss_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeInt32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovdqa_rrip(dest.encoding()).offset());
+ }
+ CodeOffset storeRipRelativeFloat32x4(FloatRegister dest) {
+ return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
+ }
+ CodeOffset leaRipRelative(Register dest) {
+ return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
+ }
+
+ void cmpq(Register rhs, Register lhs) {
+ masm.cmpq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpq(Register rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.encoding(), lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_rm(rhs.encoding(), lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(Imm32 rhs, Register lhs) {
+ masm.cmpq_ir(rhs.value, lhs.encoding());
+ }
+ void cmpq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_im(rhs.value, lhs.disp(), lhs.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpq_im(rhs.value, lhs.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpq(const Operand& rhs, Register lhs) {
+ switch (rhs.kind()) {
+ case Operand::REG:
+ masm.cmpq_rr(rhs.reg(), lhs.encoding());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void testq(Imm32 rhs, Register lhs) {
+ masm.testq_ir(rhs.value, lhs.encoding());
+ }
+ void testq(Register rhs, Register lhs) {
+ masm.testq_rr(rhs.encoding(), lhs.encoding());
+ }
+ void testq(Imm32 rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.testq_ir(rhs.value, lhs.reg());
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ break;
+ }
+ }
+
+ void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(ImmWord target) {
+ call(ImmPtr((void*)target.value));
+ }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, Relocation::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Do not mask shared implementations.
+ using AssemblerX86Shared::call;
+
+ void vcvttsd2sq(FloatRegister src, Register dest) {
+ masm.vcvttsd2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvttss2sq(FloatRegister src, Register dest) {
+ masm.vcvttss2sq_rr(src.encoding(), dest.encoding());
+ }
+ void vcvtsq2sd(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
+ masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+};
+
+static inline void
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+{
+ if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), label.raw());
+ } else {
+ {
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
+ }
+ Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw(), reprotect);
+ }
+}
+
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+static inline bool
+GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
+{
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = intArg;
+#endif
+ if (arg >= NumIntArgRegs)
+ return false;
+ *out = IntArgRegs[arg];
+ return true;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+#if defined(_WIN64)
+ uint32_t arg = usedIntArgs + usedFloatArgs;
+#else
+ uint32_t arg = usedIntArgs;
+#endif
+ arg -= NumIntArgRegs;
+ if (arg >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[arg];
+ return true;
+}
+
+static inline bool
+GetFloatArgReg(uint32_t intArg, uint32_t floatArg, FloatRegister* out)
+{
+#if defined(_WIN64)
+ uint32_t arg = intArg + floatArg;
+#else
+ uint32_t arg = floatArg;
+#endif
+ if (floatArg >= NumFloatArgRegs)
+ return false;
+ *out = FloatArgRegs[arg];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Assembler_x64_h */
diff --git a/js/src/jit/x64/Bailouts-x64.cpp b/js/src/jit/x64/Bailouts-x64.cpp
new file mode 100644
index 000000000..a07aa31a9
--- /dev/null
+++ b/js/src/jit/x64/Bailouts-x64.cpp
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+using namespace js;
+using namespace js::jit;
+
+#if defined(_WIN32)
+# pragma pack(push, 1)
+#endif
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ uintptr_t frameSize_;
+ uintptr_t snapshotOffset_;
+
+ public:
+ MachineState machineState() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ uint32_t snapshotOffset() const {
+ return snapshotOffset_;
+ }
+ uint32_t frameSize() const {
+ return frameSize_;
+ }
+ uint8_t* parentStackPointer() {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#if defined(_WIN32)
+# pragma pack(pop)
+#endif
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machineState())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+ snapshotOffset_ = bailout->snapshotOffset();
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/x64/BaseAssembler-x64.h b/js/src/jit/x64/BaseAssembler-x64.h
new file mode 100644
index 000000000..f26fedc07
--- /dev/null
+++ b/js/src/jit/x64/BaseAssembler-x64.h
@@ -0,0 +1,929 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_BaseAssembler_x64_h
+#define jit_x64_BaseAssembler_x64_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX64 : public BaseAssembler
+{
+ public:
+
+ // Arithmetic operations:
+
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("addq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst);
+ }
+
+ void addq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("addq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst);
+ }
+
+ void addq_mr(const void* addr, RegisterID dst)
+ {
+ spew("addq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst);
+ }
+
+ void addq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("addq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("addq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int32_t imm, const void* addr)
+ {
+ spew("addq $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("andq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, src, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("andq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst);
+ }
+
+ void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("andq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst);
+ }
+
+ void andq_mr(const void* addr, RegisterID dst)
+ {
+ spew("andq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst);
+ }
+
+ void orq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("orq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst);
+ }
+
+ void orq_mr(const void* addr, RegisterID dst)
+ {
+ spew("orq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst);
+ }
+
+ void xorq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("xorq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst);
+ }
+
+ void xorq_mr(const void* addr, RegisterID dst)
+ {
+ spew("xorq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst);
+ }
+
+ void bsrq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsrq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst);
+ }
+
+ void bsfq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("bsfq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst);
+ }
+
+ void popcntq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("popcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.legacySSEPrefix(VEX_SS);
+ m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst);
+ }
+
+ void andq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("andq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_AND_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void negq_r(RegisterID dst)
+ {
+ spew("negq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
+ }
+
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("orq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_OR_GvEv, src, dst);
+ }
+
+ void orq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("orq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void notq_r(RegisterID dst)
+ {
+ spew("notq %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
+ }
+
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("subq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst);
+ }
+
+ void subq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("subq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src);
+ }
+
+ void subq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("subq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst);
+ }
+
+ void subq_mr(const void* addr, RegisterID dst)
+ {
+ spew("subq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst);
+ }
+
+ void subq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("subq $%d, %s", imm, GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xorq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst);
+ }
+
+ void xorq_ir(int32_t imm, RegisterID dst)
+ {
+ spew("xorq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
+ m_formatter.immediate8s(imm);
+ } else {
+ if (dst == rax)
+ m_formatter.oneByteOp64(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sarq_CLr(RegisterID dst)
+ {
+ spew("sarq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
+ }
+
+ void shlq_CLr(RegisterID dst)
+ {
+ spew("shlq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
+ }
+
+ void shrq_CLr(RegisterID dst)
+ {
+ spew("shrq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
+ }
+
+ void sarq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("sarq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shlq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("shlq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void shrq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("shrq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+
+ void rolq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("rolq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rolq_CLr(RegisterID dst)
+ {
+ spew("rolq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
+ }
+
+ void rorq_ir(int32_t imm, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 64);
+ spew("rorq $%d, %s", imm, GPReg64Name(dst));
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
+ m_formatter.immediate8u(imm);
+ }
+ }
+ void rorq_CLr(RegisterID dst)
+ {
+ spew("rorq %%cl, %s", GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
+ }
+
+ void imulq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("imulq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
+ }
+
+ void imulq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
+ }
+
+ void cqo()
+ {
+ spew("cqo ");
+ m_formatter.oneByteOp64(OP_CDQ);
+ }
+
+ void idivq_r(RegisterID divisor)
+ {
+ spew("idivq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
+ }
+
+ void divq_r(RegisterID divisor)
+ {
+ spew("divq %s", GPReg64Name(divisor));
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
+ }
+
+ // Comparisons:
+
+ void cmpq_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("cmpq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs);
+ }
+
+ void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpq %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs);
+ }
+
+ void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs)
+ {
+ spew("cmpq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs);
+ }
+
+ void cmpq_ir(int32_t rhs, RegisterID lhs)
+ {
+ if (rhs == 0) {
+ testq_rr(lhs, lhs);
+ return;
+ }
+
+ spew("cmpq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ if (lhs == rax)
+ m_formatter.oneByteOp64(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("cmpq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+
+ void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("cmpq $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_im(int32_t rhs, const void* addr)
+ {
+ spew("cmpq $0x%" PRIx64 ", %p", int64_t(rhs), addr);
+ if (CAN_SIGN_EXTEND_8_32(rhs)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
+ m_formatter.immediate8s(rhs);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
+ m_formatter.immediate32(rhs);
+ }
+ }
+ void cmpq_rm(RegisterID rhs, const void* addr)
+ {
+ spew("cmpq %s, %p", GPReg64Name(rhs), addr);
+ m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs);
+ }
+
+ void testq_rr(RegisterID rhs, RegisterID lhs)
+ {
+ spew("testq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
+ m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs);
+ }
+
+ void testq_ir(int32_t rhs, RegisterID lhs)
+ {
+ // If the mask fits in a 32-bit immediate, we can use testl with a
+ // 32-bit subreg.
+ if (CAN_ZERO_EXTEND_32_64(rhs)) {
+ testl_ir(rhs, lhs);
+ return;
+ }
+ spew("testq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
+ if (lhs == rax)
+ m_formatter.oneByteOp64(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base)
+ {
+ spew("testq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ void testq_i32m(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("testq $0x%4x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale, GROUP3_OP_TEST);
+ m_formatter.immediate32(rhs);
+ }
+
+ // Various move ops:
+
+ void cmovzq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("cmovz %s, %s", GPReg16Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, src, dst);
+ }
+ void cmovzq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("cmovz " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, dst);
+ }
+ void cmovzq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("cmovz " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
+ m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, index, scale, dst);
+ }
+
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
+ }
+ void xchgq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("xchgq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
+ }
+
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movq %s, %s", GPReg64Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
+ {
+ spew("movq %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base));
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src);
+ }
+
+ void movq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src);
+ }
+
+ void movq_rm(RegisterID src, const void* addr)
+ {
+ if (src == rax && !IsAddressImmediate(addr)) {
+ movq_EAXm(addr);
+ return;
+ }
+
+ spew("movq %s, %p", GPReg64Name(src), addr);
+ m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src);
+ }
+
+ void movq_mEAX(const void* addr)
+ {
+ if (IsAddressImmediate(addr)) {
+ movq_mr(addr, rax);
+ return;
+ }
+
+ spew("movq %p, %%rax", addr);
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr)
+ {
+ if (IsAddressImmediate(addr)) {
+ movq_rm(rax, addr);
+ return;
+ }
+
+ spew("movq %%rax, %p", addr);
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movq " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst);
+ }
+
+ void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst);
+ }
+
+ void movq_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == rax && !IsAddressImmediate(addr)) {
+ movq_mEAX(addr);
+ return;
+ }
+
+ spew("movq %p, %s", addr, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst);
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("leaq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst)),
+ m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst);
+ }
+
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base)
+ {
+ spew("movq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
+ {
+ spew("movq $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+ void movq_i32m(int32_t imm, const void* addr)
+ {
+ spew("movq $%d, %p", imm, addr);
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ // Note that this instruction sign-extends its 32-bit immediate field to 64
+ // bits and loads the 64-bit value into a 64-bit register.
+ //
+ // Note also that this is similar to the movl_i32r instruction, except that
+ // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code
+ // size, so it's preferred for values which could use either.
+ void movq_i32r(int32_t imm, RegisterID dst)
+ {
+ spew("movq $%d, %s", imm, GPRegName(dst));
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ spew("movabsq $0x%" PRIx64 ", %s", imm, GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movsbq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst);
+ }
+ void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movsbq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
+ }
+
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movswq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst);
+ }
+ void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movswq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
+ }
+
+ void movslq_rr(RegisterID src, RegisterID dst)
+ {
+ spew("movslq %s, %s", GPReg32Name(src), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("movslq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst);
+ }
+ void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ spew("movslq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movl_ripr(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst);
+ JmpSrc label(m_formatter.size());
+ spew("movl " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPReg32Name(dst));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movl_rrip(RegisterID src)
+ {
+ m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movl %s, " MEM_o32r "", GPReg32Name(src), ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movq_ripr(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("movq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
+ return label;
+ }
+
+ MOZ_MUST_USE JmpSrc
+ movq_rrip(RegisterID src)
+ {
+ m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src);
+ JmpSrc label(m_formatter.size());
+ spew("movq %s, " MEM_o32r "", GPRegName(src), ADDR_o32r(label.offset()));
+ return label;
+ }
+
+ void leaq_mr(int32_t offset, RegisterID base, RegisterID dst)
+ {
+ spew("leaq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
+ m_formatter.oneByteOp64(OP_LEA, offset, base, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ leaq_rip(RegisterID dst)
+ {
+ m_formatter.oneByteRipOp64(OP_LEA, 0, dst);
+ JmpSrc label(m_formatter.size());
+ spew("leaq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
+ return label;
+ }
+
+ // Flow control:
+
+ void jmp_rip(int ripOffset)
+ {
+ // rip-relative addressing.
+ spew("jmp *%d(%%rip)", ripOffset);
+ m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ spew(".quad %lld", (long long)imm);
+ m_formatter.immediate64(imm);
+ }
+
+ // SSE operations:
+
+ void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+ void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
+ }
+
+ void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src, invalid_xmm, dst);
+ }
+
+ void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
+ }
+
+ void vmovq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst, (RegisterID)src);
+ }
+
+ void vmovq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ // While this is called "vmovq", it actually uses the vmovd encoding
+ // with a REX prefix modifying it to be 64-bit.
+ twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovsd_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovss_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovsd_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovss_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovdqa_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, invalid_xmm, src);
+ }
+ MOZ_MUST_USE JmpSrc
+ vmovaps_rrip(XMMRegisterID src)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PS, OP2_MOVAPS_WsdVsd, invalid_xmm, src);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovaps_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, invalid_xmm, dst);
+ }
+
+ MOZ_MUST_USE JmpSrc
+ vmovdqa_ripr(XMMRegisterID dst)
+ {
+ return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, invalid_xmm, dst);
+ }
+
+ private:
+
+ MOZ_MUST_USE JmpSrc
+ twoByteRipOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteRipOp(opcode, 0, dst);
+ JmpSrc label(m_formatter.size());
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32r "", legacySSEOpName(name), XMMRegName(dst), ADDR_o32r(label.offset()));
+ else
+ spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), ADDR_o32r(label.offset()), XMMRegName(dst));
+ return label;
+ }
+
+ m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
+ JmpSrc label(m_formatter.size());
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, " MEM_o32r "", name, XMMRegName(dst), ADDR_o32r(label.offset()));
+ else
+ spew("%-11s" MEM_o32r ", %s", name, ADDR_o32r(label.offset()), XMMRegName(dst));
+ } else {
+ spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
+ }
+ return label;
+ }
+
+ void twoByteOpInt64Simd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ if (useLegacySSEEncoding(src0, dst)) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), GPRegName(rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm), XMMRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, rm, dst);
+ return;
+ }
+
+ if (src0 == invalid_xmm) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
+ else
+ spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
+ } else {
+ spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0), XMMRegName(dst));
+ }
+ m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst);
+ }
+
+ void twoByteOpSimdInt64(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
+ XMMRegisterID rm, RegisterID dst)
+ {
+ if (useLegacySSEEncodingForOtherOutput()) {
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), GPRegName(dst));
+ m_formatter.legacySSEPrefix(ty);
+ m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
+ return;
+ }
+
+ if (IsXMMReversedOperands(opcode))
+ spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
+ else if (opcode == OP2_MOVD_EdVd)
+ spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
+ else
+ spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
+ m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm, (XMMRegisterID)dst);
+ }
+};
+
+typedef BaseAssemblerX64 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_BaseAssembler_x64_h */
diff --git a/js/src/jit/x64/BaselineCompiler-x64.cpp b/js/src/jit/x64/BaselineCompiler-x64.cpp
new file mode 100644
index 000000000..8735414be
--- /dev/null
+++ b/js/src/jit/x64/BaselineCompiler-x64.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/BaselineCompiler-x64.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerX64::BaselineCompilerX64(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerX86Shared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/x64/BaselineCompiler-x64.h b/js/src/jit/x64/BaselineCompiler-x64.h
new file mode 100644
index 000000000..ff75f9c09
--- /dev/null
+++ b/js/src/jit/x64/BaselineCompiler-x64.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_BaselineCompiler_x64_h
+#define jit_x64_BaselineCompiler_x64_h
+
+#include "jit/x86-shared/BaselineCompiler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerX64 : public BaselineCompilerX86Shared
+{
+ protected:
+ BaselineCompilerX64(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerX64 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_BaselineCompiler_x64_h */
diff --git a/js/src/jit/x64/BaselineIC-x64.cpp b/js/src/jit/x64/BaselineIC-x64.cpp
new file mode 100644
index 000000000..f04052b0d
--- /dev/null
+++ b/js/src/jit/x64/BaselineIC-x64.cpp
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Directly compare the int32 payload of R0 and R1.
+ ScratchRegisterScope scratch(masm);
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.mov(ImmWord(0), scratch);
+ masm.cmp32(R0.valueReg(), R1.valueReg());
+ masm.setCC(cond, scratch);
+
+ // Box the result and return
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, scratch, R0.valueReg());
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
new file mode 100644
index 000000000..a5be15072
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -0,0 +1,880 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/CodeGenerator-x64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/IonCaches.h"
+#include "jit/MIR.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+
+CodeGeneratorX64::CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm)
+{
+}
+
+ValueOperand
+CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand
+CodeGeneratorX64::ToOutValue(LInstruction* ins)
+{
+ return ValueOperand(ToRegister(ins->getDef(0)));
+}
+
+ValueOperand
+CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos)
+{
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+Operand
+CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64)
+{
+ const LAllocation& a = a64.value();
+ MOZ_ASSERT(!a.isFloatReg());
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ return Operand(masm.getStackPointer(), ToStackOffset(a));
+}
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(0);
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_CRASH("x64 does not use frame size classes");
+}
+
+void
+CodeGeneratorX64::visitValue(LValue* value)
+{
+ LDefinition* reg = value->getDef(0);
+ masm.moveValue(value->value(), ToRegister(reg));
+}
+
+void
+CodeGeneratorX64::visitBox(LBox* box)
+{
+ const LAllocation* in = box->getOperand(0);
+ const LDefinition* result = box->getDef(0);
+
+ if (IsFloatingPointType(box->type())) {
+ ScratchDoubleScope scratch(masm);
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, scratch);
+ reg = scratch;
+ }
+ masm.vmovq(reg, ToRegister(result));
+ } else {
+ masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result));
+ }
+}
+
+void
+CodeGeneratorX64::visitUnbox(LUnbox* unbox)
+{
+ MUnbox* mir = unbox->mir();
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Assembler::Condition cond;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ cond = masm.testInt32(Assembler::NotEqual, value);
+ break;
+ case MIRType::Boolean:
+ cond = masm.testBoolean(Assembler::NotEqual, value);
+ break;
+ case MIRType::Object:
+ cond = masm.testObject(Assembler::NotEqual, value);
+ break;
+ case MIRType::String:
+ cond = masm.testString(Assembler::NotEqual, value);
+ break;
+ case MIRType::Symbol:
+ cond = masm.testSymbol(Assembler::NotEqual, value);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutIf(cond, unbox->snapshot());
+ }
+
+ Operand input = ToOperand(unbox->getOperand(LUnbox::Input));
+ Register result = ToRegister(unbox->output());
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(input, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(input, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(input, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(input, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(input, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void
+CodeGeneratorX64::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ // Load boxed boolean in ScratchReg.
+ ScratchRegisterScope scratch(masm);
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
+
+ // Perform the comparison.
+ masm.cmpPtr(lhs.valueReg(), scratch);
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+}
+
+void
+CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ // Load boxed boolean in ScratchReg.
+ ScratchRegisterScope scratch(masm);
+ if (rhs->isConstant())
+ masm.moveValue(rhs->toConstant()->toJSValue(), scratch);
+ else
+ masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
+
+ // Perform the comparison.
+ masm.cmpPtr(lhs.valueReg(), scratch);
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+}
+
+void
+CodeGeneratorX64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+
+ if (IsConstant(rhs))
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ else
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
+}
+
+void
+CodeGeneratorX64::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+
+ if (IsConstant(rhs))
+ masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
+ else
+ masm.cmpPtr(lhsReg, ToOperand64(rhs));
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
+
+ Label done;
+
+ // Put the lhs in rax.
+ if (lhs != rax)
+ masm.mov(lhs, rax);
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notmin);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notmin);
+ if (lir->mir()->isMod())
+ masm.xorl(output, output);
+ else
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cqo();
+ masm.idivq(rhs);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX64::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+
+ DebugOnly<Register> output = ToRegister(lir->output());
+ MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
+ MOZ_ASSERT(rhs != rdx);
+ MOZ_ASSERT_IF(output.value == rax, ToRegister(lir->remainder()) == rdx);
+ MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
+
+ // Put the lhs in rax.
+ if (lhs != rax)
+ masm.mov(lhs, rax);
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Zero extend the lhs into rdx to make (rdx:rax).
+ masm.xorl(rdx, rdx);
+ masm.udivq(rhs);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX64::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+
+ Operand falseExpr = ToOperandOrRegister64(lir->falseExpr());
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ masm.test32(cond, cond);
+ masm.cmovzq(falseExpr, out.reg);
+}
+
+void
+CodeGeneratorX64::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.vmovq(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.vmovq(ToFloatRegister(lir->input()), ToRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorX64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorX64::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
+ Operand dstAddr)
+{
+ if (value->isConstant()) {
+ MOZ_ASSERT(!access.isSimd());
+
+ masm.memoryBarrier(access.barrierBefore());
+
+ const MConstant* mir = value->toConstant();
+ Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
+
+ size_t storeOffset = masm.size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.movb(cst, dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.movw(cst, dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.movl(cst, dstAddr);
+ break;
+ case Scalar::Int64:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ masm.append(access, storeOffset, masm.framePushed());
+
+ masm.memoryBarrier(access.barrierAfter());
+ } else {
+ masm.wasmStore(access, ToAnyRegister(value), dstAddr);
+ }
+}
+
+template <typename T>
+void
+CodeGeneratorX64::emitWasmLoad(T* ins)
+{
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ if (mir->type() == MIRType::Int64)
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ else
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
+{
+ emitWasmLoad(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmLoadI64(LWasmLoadI64* ins)
+{
+ emitWasmLoad(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX64::emitWasmStore(T* ins)
+{
+ const MWasmStore* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* value = ins->getOperand(ins->ValueIndex);
+ const LAllocation* ptr = ins->ptr();
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(HeapReg, offset)
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
+
+ wasmStore(mir->access(), value, dstAddr);
+}
+
+void
+CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(HeapReg, mir->offset())
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+
+ uint32_t before = masm.size();
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
+ uint32_t after = masm.size();
+ verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
+}
+
+void
+CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* value = ins->value();
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ canonicalizeIfDeterministic(accessType, value);
+
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(HeapReg, mir->offset())
+ : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+
+ uint32_t before = masm.size();
+ wasmStore(mir->access(), value, dstAddr);
+ uint32_t after = masm.size();
+ verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
+}
+
+void
+CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ srcAddr,
+ oldval,
+ newval,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Register ptr = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ MOZ_ASSERT(accessType <= Scalar::Uint32);
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ srcAddr,
+ value,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ AnyRegister output = ToAnyRegister(ins->output());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ if (accessType == Scalar::Uint32)
+ accessType = Scalar::Int32;
+
+ AtomicOp op = mir->operation();
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ if (value->isConstant()) {
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
+ output);
+ } else {
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
+ output);
+ }
+}
+
+void
+CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Scalar::Type accessType = mir->access().type();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
+ else
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
+}
+
+void
+CodeGeneratorX64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
+ break;
+ case MIRType::Float32:
+ label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Double:
+ label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
+ }
+
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ CodeOffset label = masm.loadRipRelativeInt64(ToRegister(ins->output()));
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->value()->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
+ break;
+ case MIRType::Float32:
+ label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
+ break;
+ case MIRType::Double:
+ label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
+ }
+
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+ MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
+ Register value = ToRegister(ins->getOperand(LWasmStoreGlobalVarI64::InputIndex));
+ CodeOffset label = masm.storeRipRelativeInt64(value);
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateDouble uses vcvttsd2sq. Unlike the x86
+ // implementation, this should handle most doubles and we can just
+ // call a stub if it fails.
+ emitTruncateDouble(input, output, ins->mir());
+}
+
+void
+CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ // On x64, branchTruncateFloat32 uses vcvttss2sq. Unlike the x86
+ // implementation, this should handle most floats and we can just
+ // call a stub if it fails.
+ emitTruncateFloat32(input, output, ins->mir());
+}
+
+void
+CodeGeneratorX64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.movl(ToOperand(input), output);
+ else
+ MOZ_CRASH("Not implemented.");
+}
+
+void
+CodeGeneratorX64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned())
+ masm.movl(ToOperand(input), output);
+ else
+ masm.movslq(ToOperand(input), output);
+}
+
+void
+CodeGeneratorX64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType inputType = mir->input()->type();
+
+ MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ FloatRegister temp = mir->isUnsigned() ? ToFloatRegister(lir->temp()) : InvalidFloatReg;
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ if (inputType == MIRType::Double) {
+ if (mir->isUnsigned())
+ masm.wasmTruncateDoubleToUInt64(input, output, oolEntry, oolRejoin, temp);
+ else
+ masm.wasmTruncateDoubleToInt64(input, output, oolEntry, oolRejoin, temp);
+ } else {
+ if (mir->isUnsigned())
+ masm.wasmTruncateFloat32ToUInt64(input, output, oolEntry, oolRejoin, temp);
+ else
+ masm.wasmTruncateFloat32ToInt64(input, output, oolEntry, oolRejoin, temp);
+ }
+}
+
+void
+CodeGeneratorX64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ else
+ masm.convertInt64ToDouble(input, output);
+ } else {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ else
+ masm.convertInt64ToFloat32(input, output);
+ }
+}
+
+void
+CodeGeneratorX64::visitNotI64(LNotI64* lir)
+{
+ masm.cmpq(Imm32(0), ToRegister(lir->input()));
+ masm.emitSet(Assembler::Equal, ToRegister(lir->output()));
+}
+
+void
+CodeGeneratorX64::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void
+CodeGeneratorX64::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void
+CodeGeneratorX64::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register input = ToRegister(lir->input());
+ masm.testq(input, input);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/x64/CodeGenerator-x64.h b/js/src/jit/x64/CodeGenerator-x64.h
new file mode 100644
index 000000000..c0290608d
--- /dev/null
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_CodeGenerator_x64_h
+#define jit_x64_CodeGenerator_x64_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorX64 : public CodeGeneratorX86Shared
+{
+ CodeGeneratorX64* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ Operand ToOperand64(const LInt64Allocation& a);
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void storeUnboxedValue(const LAllocation* value, MIRType valueType,
+ Operand dest, MIRType slotType);
+
+ void wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value, Operand dstAddr);
+ template <typename T> void emitWasmLoad(T* ins);
+ template <typename T> void emitWasmStore(T* ins);
+
+ public:
+ CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitValue(LValue* value);
+ void visitBox(LBox* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitNotI64(LNotI64* lir);
+ void visitClzI64(LClzI64* lir);
+ void visitCtzI64(LCtzI64* lir);
+ void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ void visitTruncateFToInt32(LTruncateFToInt32* ins);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitWasmSelectI64(LWasmSelectI64* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+ void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+};
+
+typedef CodeGeneratorX64 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_CodeGenerator_x64_h */
diff --git a/js/src/jit/x64/LIR-x64.h b/js/src/jit/x64/LIR-x64.h
new file mode 100644
index 000000000..f812ac692
--- /dev/null
+++ b/js/src/jit/x64/LIR-x64.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_LIR_x64_h
+#define jit_x64_LIR_x64_h
+
+namespace js {
+namespace jit {
+
+// Given an untyped input, guards on whether it's a specific type and returns
+// the unboxed payload.
+class LUnboxBase : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ explicit LUnboxBase(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+};
+
+class LUnbox : public LUnboxBase {
+ public:
+ LIR_HEADER(Unbox)
+
+ explicit LUnbox(const LAllocation& input)
+ : LUnboxBase(input)
+ { }
+
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LUnboxBase {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint)
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnboxBase(input),
+ type_(type)
+ { }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+// This class performs a simple x86 'div', yielding either a quotient or
+// remainder depending on whether this instruction is defined to output
+// rax (quotient) or rdx (remainder).
+class LUDivOrModI64 : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() {
+ return getTemp(0);
+ }
+
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_LIR_x64_h */
diff --git a/js/src/jit/x64/LOpcodes-x64.h b/js/src/jit/x64/LOpcodes-x64.h
new file mode 100644
index 000000000..b61caeb26
--- /dev/null
+++ b/js/src/jit/x64/LOpcodes-x64.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_LOpcodes_x64_h
+#define jit_x64_LOpcodes_x64_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(DivOrModConstantI) \
+ _(DivOrModI64) \
+ _(UDivOrModI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint) \
+ _(SimdValueInt32x4) \
+ _(SimdValueFloat32x4) \
+ _(UDivOrMod) \
+ _(UDivOrModConstant)
+
+#endif /* jit_x64_LOpcodes_x64_h */
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
new file mode 100644
index 000000000..70801dc05
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -0,0 +1,495 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/Lowering-x64.h"
+
+#include "jit/MIR.h"
+#include "jit/x64/Assembler-x64.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegister(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition
+LIRGeneratorX64::tempByteOpRegister()
+{
+ return temp();
+}
+
+LDefinition
+LIRGeneratorX64::tempToUnbox()
+{
+ return temp();
+}
+
+void
+LIRGeneratorX64::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ // X64 doesn't need a temp for 64bit multiplication.
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX64::visitBox(MBox* box)
+{
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void
+LIRGeneratorX64::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* box = unbox->getOperand(0);
+
+ if (box->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnboxBase* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new(alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new(alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ define(lir, unbox);
+}
+
+void
+LIRGeneratorX64::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void
+LIRGeneratorX64::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void
+LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ defineTypedPhi(phi, lirIndex);
+}
+
+void
+LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void
+LIRGeneratorX64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins)
+{
+ if (ins->type() != MIRType::Int64) {
+ lowerWasmLoad(ins);
+ return;
+ }
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+ defineInt64(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* value = ins->value();
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ valueAlloc = useRegisterOrConstantAtStart(value);
+ break;
+ case Scalar::Int64:
+ // No way to encode an int64-to-memory move on x64.
+ if (value->isConstant() && value->type() != MIRType::Int64)
+ valueAlloc = useOrConstantAtStart(value);
+ else
+ valueAlloc = useRegisterAtStart(value);
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ valueAlloc = useRegisterAtStart(value);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAsmJSStoreHeap* lir = nullptr; // initialize to silence GCC warning
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+ useRegisterOrConstantAtStart(ins->value()));
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+ useRegisterAtStart(ins->value()));
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // The output may not be used but will be clobbered regardless, so
+ // pin the output to eax.
+ //
+ // The input values must both be in registers.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval = useRegister(ins->newValue());
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
+
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ // The output may not be used but will be clobbered regardless,
+ // so ignore the case where we're not using the value and just
+ // use the output register as a temp.
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR.
+
+ if (!ins->hasUses()) {
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
+ useRegisterOrConstant(ins->value()));
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD with word and byte ops as
+ // appropriate. Any output register can be used and if value is a
+ // register it's best if it's the same as output:
+ //
+ // movl value, output ; if value != output
+ // lock xaddl output, mem
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
+ // always in rax:
+ //
+ // movl *mem, rax
+ // L: mov rax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads rax also
+ // jnz L
+ // ; result in rax
+ //
+ // Note the placement of L, cmpxchg will update rax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+ bool reuseInput = false;
+ LAllocation value;
+
+ if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ } else {
+ reuseInput = true;
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
+ value,
+ bitOp ? temp() : LDefinition::BogusTemp());
+
+ if (reuseInput)
+ defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
+ else if (bitOp)
+ defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitSubstr(MSubstr* ins)
+{
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ temp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorX64::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorX64::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorX64::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()),
+ tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void
+LIRGeneratorX64::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void
+LIRGeneratorX64::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()),
+ useRegister(div->rhs()),
+ tempFixed(rdx));
+ defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
+}
+
+void
+LIRGeneratorX64::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()),
+ useRegister(mod->rhs()),
+ tempFixed(rax));
+ defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
+}
+
+void
+LIRGeneratorX64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorX64::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
+}
diff --git a/js/src/jit/x64/Lowering-x64.h b/js/src/jit/x64/Lowering-x64.h
new file mode 100644
index 000000000..24321f97b
--- /dev/null
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_Lowering_x64_h
+#define jit_x64_Lowering_x64_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX64 : public LIRGeneratorX86Shared
+{
+ public:
+ LIRGeneratorX64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart = false);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on x64 all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+};
+
+typedef LIRGeneratorX64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_Lowering_x64_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64-inl.h b/js/src/jit/x64/MacroAssembler-x64-inl.h
new file mode 100644
index 000000000..f7a70e68e
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -0,0 +1,897 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_inl_h
+#define jit_x64_MacroAssembler_x64_inl_h
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movq(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ andq(src, dest);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ andq(imm, dest);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ andq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ andq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ orq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ orq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
+ xorq(Imm32(imm.value), dest.reg);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ movq(ImmWord(uintptr_t(imm.value)), scratch);
+ xorq(scratch, dest.reg);
+ }
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ orq(src, dest);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ orq(imm, dest);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ andq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ orq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ xorq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ xorq(src, dest);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ xorq(imm, dest);
+}
+
+void
+MacroAssembler::and64(const Operand& src, Register64 dest)
+{
+ andq(src, dest.reg);
+}
+
+void
+MacroAssembler::or64(const Operand& src, Register64 dest)
+{
+ orq(src, dest.reg);
+}
+
+void
+MacroAssembler::xor64(const Operand& src, Register64 dest)
+{
+ xorq(src, dest.reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ addq(src, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ addq(imm, dest);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ addq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ addq(scratch, dest);
+ }
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ addq(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest)
+{
+ addq(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ addq(Operand(src), dest);
+}
+
+void
+MacroAssembler::add64(const Operand& src, Register64 dest)
+{
+ addq(src, dest.reg);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ addq(imm, dest.reg);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ addPtr(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ subq(src, dest);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ subq(src, Operand(dest));
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ subq(imm, dest);
+}
+
+void
+MacroAssembler::subPtr(ImmWord imm, Register dest)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(dest != scratch);
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ subq(Imm32((int32_t)imm.value), dest);
+ } else {
+ mov(imm, scratch);
+ subq(scratch, dest);
+ }
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ subq(Operand(addr), dest);
+}
+
+void
+MacroAssembler::sub64(const Operand& src, Register64 dest)
+{
+ subq(src, dest.reg);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ subq(src.reg, dest.reg);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ subPtr(ImmWord(imm.value), dest.reg);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ movq(ImmWord(uintptr_t(imm.value)), ScratchReg);
+ imulq(ScratchReg, dest.reg);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(Operand(src.reg), dest);
+}
+
+void
+MacroAssembler::mul64(const Operand& src, const Register64& dest)
+{
+ imulq(src, dest.reg);
+}
+
+void
+MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(src, dest);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movq(imm, ScratchReg);
+ vmulsd(Operand(ScratchReg, 0), dest, dest);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ if (X86Encoding::IsAddressImmediate(dest.addr)) {
+ addPtr(Imm32(1), dest);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(dest.addr), scratch);
+ addPtr(Imm32(1), Address(scratch, 0));
+ }
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ negq(reg.reg);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shlq(imm, dest);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ lshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ shlq_cl(srcDest.reg);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ shrq(imm, dest);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ rshiftPtr(imm, dest.reg);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ shrq_cl(srcDest.reg);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ sarq(imm, dest);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ rshiftPtrArithmetic(imm, dest.reg);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == rcx);
+ sarq_cl(srcDest.reg);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rolq_cl(dest.reg);
+}
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ rorq_cl(dest.reg);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rolq(count, dest.reg);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateLeft64(count, src, dest);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ rorq(count, dest.reg);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ rotateRight64(count, src, dest);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ // On very recent chips (Haswell and newer) there is actually an
+ // LZCNT instruction that does all of this.
+
+ Label nonzero;
+ bsrq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(0x7F), dest);
+ bind(&nonzero);
+ xorq(Imm32(0x3F), dest);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label nonzero;
+ bsfq(src.reg, dest);
+ j(Assembler::NonZero, &nonzero);
+ movq(ImmWord(64), dest);
+ bind(&nonzero);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src64, Register64 dest64, Register tmp)
+{
+ Register src = src64.reg;
+ Register dest = dest64.reg;
+
+ if (AssemblerX86Shared::HasPOPCNT()) {
+ MOZ_ASSERT(tmp == InvalidReg);
+ popcntq(src, dest);
+ return;
+ }
+
+ if (src != dest)
+ movq(src, dest);
+
+ MOZ_ASSERT(tmp != dest);
+
+ ScratchRegisterScope scratch(*this);
+
+ // Equivalent to mozilla::CountPopulation32, adapted for 64 bits.
+ // x -= (x >> 1) & m1;
+ movq(src, tmp);
+ movq(ImmWord(0x5555555555555555), scratch);
+ shrq(Imm32(1), tmp);
+ andq(scratch, tmp);
+ subq(tmp, dest);
+
+ // x = (x & m2) + ((x >> 2) & m2);
+ movq(dest, tmp);
+ movq(ImmWord(0x3333333333333333), scratch);
+ andq(scratch, dest);
+ shrq(Imm32(2), tmp);
+ andq(scratch, tmp);
+ addq(tmp, dest);
+
+ // x = (x + (x >> 4)) & m4;
+ movq(dest, tmp);
+ movq(ImmWord(0x0f0f0f0f0f0f0f0f), scratch);
+ shrq(Imm32(4), tmp);
+ addq(tmp, dest);
+ andq(scratch, dest);
+
+ // (x * h01) >> 56
+ movq(ImmWord(0x0101010101010101), scratch);
+ imulq(scratch, dest);
+ shrq(Imm32(56), dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branch32(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ mov(lhs, scratch);
+ branch32(cond, Address(scratch, 0), rhs, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail)
+ jump(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ branchPtrImpl(cond, Operand(lhs), rhs, label);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+ }
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(rhs != scratch);
+ mov(lhs, scratch);
+ branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ if (rhs != scratch)
+ movePtr(rhs, scratch);
+ // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
+ rshiftPtr(Imm32(1), scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttss2sq(src, dest);
+
+ // Same trick as for Doubles
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToPtr(src, dest, fail);
+ branch32(Assembler::Above, dest, Imm32(0xffffffff), fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttsd2sq(src, dest);
+
+ // vcvttsd2sq returns 0x8000000000000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this avoids the need to
+ // materialize that value in a register).
+ cmpPtr(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToPtr(src, dest, fail);
+ movl(dest, dest); // Zero upper 32-bits.
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToPtr(src, dest, fail);
+ branch32(Assembler::Above, dest, Imm32(0xffffffff), fail);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ if (X86Encoding::IsAddressImmediate(lhs.addr)) {
+ test32(Operand(lhs), rhs);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ mov(ImmPtr(lhs.addr), scratch);
+ test32(Operand(scratch, 0), rhs);
+ }
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ test32(value.valueReg(), value.valueReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(valaddr, ImmWord(magic));
+ j(cond, label);
+}
+// ========================================================================
+// Truncate floating point.
+
+void
+MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ loadPtr(dest, temp);
+ branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ loadPtr(dest, temp);
+ or64(Imm64(0x8000000000000000), Register64(temp));
+ storePtr(temp, dest);
+
+ bind(&done);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ MOZ_CRASH("x64 should never emit a bounds check");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ MOZ_CRASH("x64 should never emit a bounds check");
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssemblerX64::incrementInt32Value(const Address& addr)
+{
+ asMasm().addPtr(Imm32(1), addr);
+}
+
+void
+MacroAssemblerX64::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr());
+ }
+}
+
+template <typename T>
+void
+MacroAssemblerX64::loadInt32OrDouble(const T& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src, dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void
+MacroAssemblerX64::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+ Register tag = splitTagForTest(source);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+
+ ScratchRegisterScope scratch(asMasm());
+ unboxInt32(source, scratch);
+ convertInt32ToDouble(scratch, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_inl_h */
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
new file mode 100644
index 000000000..9d8287824
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -0,0 +1,859 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x64/MacroAssembler-x64.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MacroAssemblerX64::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ if (maybeInlineDouble(d, dest))
+ return;
+ Double* dbl = getDouble(d);
+ if (!dbl)
+ return;
+ // The constants will be stored in a pool appended to the text (see
+ // finish()), so they will always be a fixed distance from the
+ // instructions which reference them. This allows the instructions to use
+ // PC-relative addressing. Use "jump" label support code, because we need
+ // the same PC-relative address patching that jumps use.
+ JmpSrc j = masm.vmovsd_ripr(dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ if (maybeInlineFloat(f, dest))
+ return;
+ Float* flt = getFloat(f);
+ if (!flt)
+ return;
+ // See comment in loadConstantDouble
+ JmpSrc j = masm.vmovss_ripr(dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Int(v, dest))
+ return;
+ SimdData* val = getSimdData(v);
+ if (!val)
+ return;
+ JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
+{
+ loadConstantFloat32(wasm::RawF32(f), dest);
+}
+
+void
+MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
+{
+ loadConstantDouble(wasm::RawF64(d), dest);
+}
+
+void
+MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Float(v, dest))
+ return;
+ SimdData* val = getSimdData(v);
+ if (!val)
+ return;
+ JmpSrc j = masm.vmovaps_ripr(dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(j.offset())));
+}
+
+void
+MacroAssemblerX64::convertInt64ToDouble(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ vcvtsq2sd(input.reg, output, output);
+}
+
+void
+MacroAssemblerX64::convertInt64ToFloat32(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ vcvtsq2ss(input.reg, output, output);
+}
+
+bool
+MacroAssemblerX64::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerX64::convertUInt64ToDouble(Register64 input, FloatRegister output, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ // If the input's sign bit is not set we use vcvtsq2sd directly.
+ // Else, we divide by 2, convert to double, and multiply the result by 2.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2sd(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(asMasm());
+ mov(input.reg, scratch);
+ shrq(Imm32(1), scratch);
+ vcvtsq2sd(scratch, output, output);
+ vaddsd(output, output, output);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerX64::convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroFloat32(output);
+
+ // If the input's sign bit is not set we use vcvtsq2ss directly.
+ // Else, we divide by 2, convert to float, and multiply the result by 2.
+ Label done;
+ Label isSigned;
+
+ testq(input.reg, input.reg);
+ j(Assembler::Signed, &isSigned);
+ vcvtsq2ss(input.reg, output, output);
+ jump(&done);
+
+ bind(&isSigned);
+
+ ScratchRegisterScope scratch(asMasm());
+ mov(input.reg, scratch);
+ shrq(Imm32(1), scratch);
+ vcvtsq2ss(scratch, output, output);
+ vaddss(output, output, output);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerX64::wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ vcvttsd2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ vcvttss2sq(input, output.reg);
+ cmpq(Imm32(1), output.reg);
+ j(Assembler::Overflow, oolEntry);
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ // If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchDoubleScope scratch(asMasm());
+ loadConstantDouble(double(0x8000000000000000), scratch);
+ asMasm().branchDouble(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttsd2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveDouble(input, tempReg);
+ vsubsd(scratch, tempReg, tempReg);
+ vcvttsd2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ asMasm().or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ // If the input < INT64_MAX, vcvttss2sq will do the right thing, so
+ // we use it directly. Else, we subtract INT64_MAX, convert to int64,
+ // and then add INT64_MAX to the result.
+
+ Label isLarge;
+
+ ScratchFloat32Scope scratch(asMasm());
+ loadConstantFloat32(float(0x8000000000000000), scratch);
+ asMasm().branchFloat(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
+ vcvttss2sq(input, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ jump(oolRejoin);
+
+ bind(&isLarge);
+
+ moveFloat32(input, tempReg);
+ vsubss(scratch, tempReg, tempReg);
+ vcvttss2sq(tempReg, output.reg);
+ testq(output.reg, output.reg);
+ j(Assembler::Signed, oolEntry);
+ asMasm().or64(Imm64(0x8000000000000000), output);
+
+ bind(oolRejoin);
+}
+
+void
+MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
+{
+ for (CodeOffset use : uses) {
+ JmpDst dst(currentOffset());
+ JmpSrc src(use.offset());
+ // Using linkJump here is safe, as explaind in the comment in
+ // loadConstantDouble.
+ masm.linkJump(src, dst);
+ }
+}
+
+void
+MacroAssemblerX64::finish()
+{
+ if (!doubles_.empty())
+ masm.haltingAlign(sizeof(double));
+ for (const Double& d : doubles_) {
+ bindOffsets(d.uses);
+ masm.int64Constant(d.value);
+ }
+
+ if (!floats_.empty())
+ masm.haltingAlign(sizeof(float));
+ for (const Float& f : floats_) {
+ bindOffsets(f.uses);
+ masm.int32Constant(f.value);
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty())
+ masm.haltingAlign(SimdMemoryAlignment);
+ for (const SimdData& v : simds_) {
+ bindOffsets(v.uses);
+ masm.simd128Constant(v.value.bytes());
+ }
+
+ MacroAssemblerX86Shared::finish();
+}
+
+void
+MacroAssemblerX64::boxValue(JSValueType type, Register src, Register dest)
+{
+ MOZ_ASSERT(src != dest);
+
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsZeroed;
+ movePtr(ImmWord(UINT32_MAX), dest);
+ asMasm().branchPtr(Assembler::BelowOrEqual, src, dest, &upper32BitsZeroed);
+ breakpoint();
+ bind(&upper32BitsZeroed);
+ }
+#endif
+ mov(ImmShiftedTag(tag), dest);
+ orq(src, dest);
+}
+
+void
+MacroAssemblerX64::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ subq(Imm32(sizeof(ResumeFromException)), rsp);
+ movq(rsp, rax);
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(rcx);
+ asMasm().passABIArg(rax);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ loadPtr(Address(rsp, offsetof(ResumeFromException, kind)), rax);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+ asMasm().branch32(Assembler::Equal, rax, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ jmp(Operand(rax));
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(rcx);
+ loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jmp(Operand(rax));
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+ bind(&return_);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
+ loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
+ loadValue(Address(rbp, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
+ movq(rbp, rsp);
+ pop(rbp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), r9);
+ mov(ImmWord(BAILOUT_RETURN_OK), rax);
+ jmp(Operand(rsp, offsetof(ResumeFromException, target)));
+}
+
+void
+MacroAssemblerX64::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerX64::profilerExitFrame()
+{
+ jmp(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler&
+MacroAssemblerX64::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerX64::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where the
+ // Ion frame data for a piece of code is very large. To handle this special case,
+ // for frames over 1k in size we allocate memory on the stack incrementally, touching
+ // it as we go.
+ uint32_t amountLeft = imm32.value;
+ while (amountLeft > 4096) {
+ subq(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subq(Imm32(amountLeft), StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ movq(rsp, scratch);
+ andq(Imm32(~(ABIStackAlignment - 1)), rsp);
+ push(scratch);
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ static_assert(sizeof(wasm::Frame) % ABIStackAlignment == 0,
+ "wasm::Frame should be part of the stack alignment.");
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ freeStack(stackAdjust);
+ if (dynamicAlignment_)
+ pop(rsp);
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+static bool
+IsIntArgReg(Register reg)
+{
+ for (uint32_t i = 0; i < NumIntArgRegs; i++) {
+ if (IntArgRegs[i] == reg)
+ return true;
+ }
+
+ return false;
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ if (IsIntArgReg(fun)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10),
+ MoveOp::GENERAL));
+ fun = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(fun));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ Address safeFun = fun;
+ if (IsIntArgReg(safeFun.base)) {
+ // Callee register may be clobbered for an argument. Move the callee to
+ // r10, a volatile, non-argument register.
+ propagateOOM(moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10),
+ MoveOp::GENERAL));
+ safeFun.base = r10;
+ }
+
+ MOZ_ASSERT(!IsIntArgReg(safeFun.base));
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(safeFun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != scratch);
+
+ movePtr(ptr, scratch);
+ orPtr(Imm32(gc::ChunkMask), scratch);
+ branch32(cond, Address(scratch, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, address, temp, label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+ Label* label)
+{
+ branchValueIsNurseryObjectImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void
+MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ Label done;
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+
+ extractObject(value, temp);
+ orPtr(Imm32(gc::ChunkMask), temp);
+ branch32(cond, Address(temp, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, scratch);
+ cmpPtr(lhs.valueReg(), scratch);
+ j(cond, label);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // For known integers and booleans, we can just store the unboxed value if
+ // the slot has the same type.
+ if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
+ if (value.constant()) {
+ Value val = value.value();
+ if (valueType == MIRType::Int32)
+ store32(Imm32(val.toInt32()), dest);
+ else
+ store32(Imm32(val.toBoolean() ? 1 : 0), dest);
+ } else {
+ store32(value.reg().typedReg().gpr(), dest);
+ }
+ return;
+ }
+
+ if (value.constant())
+ storeValue(value.value(), dest);
+ else
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+// ========================================================================
+// wasm support
+
+void
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ movzbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Int16:
+ movswl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ movzwl(srcAddr, out.gpr());
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ loadFloat32(srcAddr, out.fpu());
+ break;
+ case Scalar::Float64:
+ loadDouble(srcAddr, out.fpu());
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: loadFloat32(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movsd.
+ case 2: loadDouble(srcAddr, out.fpu()); break;
+ case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovd(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovq(srcAddr, out.fpu()); break;
+ case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+ loadUnalignedSimd128Int(srcAddr, out.fpu());
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+ loadUnalignedSimd128Int(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("int64 loads must use load64");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, loadOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsbq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint8:
+ movzbq(srcAddr, out.reg);
+ break;
+ case Scalar::Int16:
+ movswq(srcAddr, out.reg);
+ break;
+ case Scalar::Uint16:
+ movzwq(srcAddr, out.reg);
+ break;
+ case Scalar::Int32:
+ movslq(srcAddr, out.reg);
+ break;
+ // Int32 to int64 moves zero-extend by default.
+ case Scalar::Uint32:
+ movl(srcAddr, out.reg);
+ break;
+ case Scalar::Int64:
+ movq(srcAddr, out.reg);
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, loadOffset, framePushed());
+}
+
+void
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t storeOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ movb(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movw(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movl(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int64:
+ movq(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ storeUncanonicalizedFloat32(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ storeUncanonicalizedDouble(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
+ case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovd(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovq(value.fpu(), dstAddr); break;
+ case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+ storeUnalignedSimd128Int(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+ storeUnalignedSimd128Int(value.fpu(), dstAddr);
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ append(access, storeOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttsd2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ vcvttss2sq(input, output);
+
+ // Check that the result is in the uint32_t range.
+ ScratchRegisterScope scratch(*this);
+ move32(Imm32(0xffffffff), scratch);
+ cmpq(scratch, output);
+ j(Assembler::Above, oolEntry);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x64/MacroAssembler-x64.h b/js/src/jit/x64/MacroAssembler-x64.h
new file mode 100644
index 000000000..cb81bd7c1
--- /dev/null
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -0,0 +1,966 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_MacroAssembler_x64_h
+#define jit_x64_MacroAssembler_x64_h
+
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+struct ImmShiftedTag : public ImmWord
+{
+ explicit ImmShiftedTag(JSValueShiftedTag shtag)
+ : ImmWord((uintptr_t)shtag)
+ { }
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
+ { }
+};
+
+struct ImmTag : public Imm32
+{
+ explicit ImmTag(JSValueTag tag)
+ : Imm32(tag)
+ { }
+};
+
+class MacroAssemblerX64 : public MacroAssemblerX86Shared
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ void bindOffsets(const MacroAssemblerX86Shared::UsesVector&);
+
+ public:
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store32;
+ using MacroAssemblerX86Shared::store16;
+
+ MacroAssemblerX64()
+ {
+ }
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X64 helpers.
+ /////////////////////////////////////////////////////////////////
+ void writeDataRelocation(const Value& val) {
+ if (val.isMarkable()) {
+ gc::Cell* cell = val.toMarkablePointer();
+ if (cell && gc::IsInsideNursery(cell))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(masm.currentOffset());
+ }
+ }
+
+ // Refers to the upper 32 bits of a 64-bit Value operand.
+ // On x86_64, the upper 32 bits do not necessarily only contain the type.
+ Operand ToUpper32(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()), base.disp() + 4);
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()), Register::FromCode(base.index()),
+ base.scale(), base.disp() + 4);
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ static inline Operand ToUpper32(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ static inline Operand ToUpper32(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset + 4);
+ }
+
+ uint32_t Upper32Of(JSValueShiftedTag tag) {
+ union { // Implemented in this way to appease MSVC++.
+ uint64_t tag;
+ struct {
+ uint32_t lo32;
+ uint32_t hi32;
+ } s;
+ } e;
+ e.tag = tag;
+ return e.s.hi32;
+ }
+
+ JSValueShiftedTag GetShiftedTag(JSValueType type) {
+ return (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ Address ToPayload(Address value) {
+ return value;
+ }
+
+ void storeValue(ValueOperand val, Operand dest) {
+ movq(val.valueReg(), dest);
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ // Value types with 32-bit payloads can be emitted as two 32-bit moves.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movl(reg, Operand(dest));
+ movl(Imm32(Upper32Of(GetShiftedTag(type))), ToUpper32(Operand(dest)));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ movq(scratch, Operand(dest));
+ }
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ if (val.isMarkable()) {
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ } else {
+ mov(ImmWord(val.asRawBits()), scratch);
+ }
+ movq(scratch, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ movq(src, val.valueReg());
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.valueReg() != scratch);
+ if (payload != dest.valueReg())
+ movq(payload, dest.valueReg());
+ mov(ImmShiftedTag(type), scratch);
+ orq(scratch, dest.valueReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.valueReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.valueReg());
+ }
+ void pushValue(const Value& val) {
+ if (val.isMarkable()) {
+ ScratchRegisterScope scratch(asMasm());
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ writeDataRelocation(val);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ ScratchRegisterScope scratch(asMasm());
+ boxValue(type, reg, scratch);
+ push(scratch);
+ }
+ void pushValue(const Address& addr) {
+ push(Operand(addr));
+ }
+
+ void moveValue(const Value& val, Register dest) {
+ movWithPatch(ImmWord(val.asRawBits()), dest);
+ writeDataRelocation(val);
+ }
+ void moveValue(const Value& src, const ValueOperand& dest) {
+ moveValue(src, dest.valueReg());
+ }
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ if (src.valueReg() != dest.valueReg())
+ movq(src.valueReg(), dest.valueReg());
+ }
+ void boxValue(JSValueType type, Register src, Register dest);
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testString(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+
+
+ Condition testUndefined(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_UNDEFINED))));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_INT32))));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_BOOLEAN))));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testNumber(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNumber(cond, scratch);
+ }
+ Condition testNull(Condition cond, const Address& src) {
+ cmp32(ToUpper32(src), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_NULL))));
+ return cond;
+ }
+ Condition testString(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testObject(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testPrimitive(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testPrimitive(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const Address& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+
+
+ Condition testUndefined(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testUndefined(cond, scratch);
+ }
+ Condition testNull(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testNull(cond, scratch);
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testBoolean(cond, scratch);
+ }
+ Condition testString(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testString(cond, scratch);
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testSymbol(cond, scratch);
+ }
+ Condition testInt32(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testInt32(cond, scratch);
+ }
+ Condition testObject(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testObject(cond, scratch);
+ }
+ Condition testDouble(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testDouble(cond, scratch);
+ }
+ Condition testMagic(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testGCThing(cond, scratch);
+ }
+
+ Condition isMagic(Condition cond, const ValueOperand& src, JSWhyMagic why) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ cmpPtr(src.valueReg(), ImmWord(magic));
+ return cond;
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ if (intptr_t(rhs.value) <= INT32_MAX && intptr_t(rhs.value) >= INT32_MIN) {
+ cmpPtr(lhs, Imm32(int32_t(rhs.value)));
+ } else {
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(Register lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(Register lhs, const Imm32 rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(!lhs.containsReg(scratch));
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ if ((intptr_t)rhs.value <= INT32_MAX && (intptr_t)rhs.value >= INT32_MIN) {
+ cmpPtr(lhs, Imm32((int32_t)rhs.value));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(rhs, scratch);
+ cmpPtr(lhs, scratch);
+ }
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Operand& lhs, Register rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(Register lhs, const Operand& rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, const Imm32 rhs) {
+ cmpq(rhs, lhs);
+ }
+ void cmpPtr(const Address& lhs, Register rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ cmpq(rhs, lhs);
+ }
+ void testPtr(Register lhs, Register rhs) {
+ testq(rhs, lhs);
+ }
+ void testPtr(Register lhs, Imm32 rhs) {
+ testq(rhs, lhs);
+ }
+ void testPtr(const Operand& lhs, Imm32 rhs) {
+ testq(rhs, lhs);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+ JmpSrc src = jmpSrc(label);
+ return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond,
+ Label* documentation = nullptr)
+ {
+ JmpSrc src = jSrc(cond, label);
+ return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
+ }
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
+ return jumpWithPatch(label);
+ }
+
+ void movePtr(Register src, Register dest) {
+ movq(src, dest);
+ }
+ void movePtr(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void movePtr(ImmWord imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ movq(imm, dest);
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ loadPtr(Address(scratch, 0x0), dest);
+ }
+ }
+ void loadPtr(const Address& address, Register dest) {
+ movq(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) {
+ movq(src, dest);
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movq(Operand(src), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ shlq(Imm32(1), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(Operand(address), dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ load32(Address(scratch, 0x0), dest);
+ }
+ }
+ void load64(const Address& address, Register64 dest) {
+ movq(Operand(address), dest.reg);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
+ movq(Imm32((int32_t)imm.value), Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ ScratchRegisterScope scratch(asMasm());
+ movq(imm, scratch);
+ movq(scratch, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movq(src, Operand(address));
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movq(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) {
+ movq(src, dest);
+ }
+ void storePtr(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movq(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ storePtr(src, Address(scratch, 0x0));
+ }
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movl(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store32(src, Address(scratch, 0x0));
+ }
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ if (X86Encoding::IsAddressImmediate(address.addr)) {
+ movw(src, Operand(address));
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmPtr(address.addr), scratch);
+ store16(src, Address(scratch, 0x0));
+ }
+ }
+ void store64(Register64 src, Address address) {
+ storePtr(src.reg, address);
+ }
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void splitTag(Register src, Register dest) {
+ if (src != dest)
+ movq(src, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+ void splitTag(const Operand& operand, Register dest) {
+ movq(operand, dest);
+ shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+ }
+ void splitTag(const Address& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+ void splitTag(const BaseIndex& operand, Register dest) {
+ splitTag(Operand(operand), dest);
+ }
+
+ // Extracts the tag of a value and places it in ScratchReg.
+ Register splitTagForTest(const ValueOperand& value) {
+ splitTag(value, ScratchReg);
+ return ScratchReg;
+ }
+ void cmpTag(const ValueOperand& operand, ImmTag tag) {
+ Register reg = splitTagForTest(operand);
+ cmp32(reg, tag);
+ }
+
+ Condition testMagic(Condition cond, const ValueOperand& src) {
+ ScratchRegisterScope scratch(asMasm());
+ splitTag(src, scratch);
+ return testMagic(cond, scratch);
+ }
+ Condition testError(Condition cond, const ValueOperand& src) {
+ return testMagic(cond, src);
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest) {
+ vmovq(src, dest.valueReg());
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+ }
+
+ // Note that the |dest| register here may be ScratchReg, so we shouldn't
+ // use it.
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxInt32(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxInt32(Operand(src), dest);
+ }
+ void unboxDouble(const Address& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+
+ void unboxArgObjMagic(const ValueOperand& src, Register dest) {
+ unboxArgObjMagic(Operand(src.valueReg()), dest);
+ }
+ void unboxArgObjMagic(const Operand& src, Register dest) {
+ mov(ImmWord(0), dest);
+ }
+ void unboxArgObjMagic(const Address& src, Register dest) {
+ unboxArgObjMagic(Operand(src), dest);
+ }
+
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+ void unboxBoolean(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxBoolean(Operand(src), dest);
+ }
+
+ void unboxMagic(const ValueOperand& src, Register dest) {
+ movl(src.valueReg(), dest);
+ }
+
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ vmovq(src.valueReg(), dest);
+ }
+ void unboxPrivate(const ValueOperand& src, const Register dest) {
+ movq(src.valueReg(), dest);
+ shlq(Imm32(1), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ xorq(Imm32(1), val.valueReg());
+ }
+
+ // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean
+ // instead if the source type is known.
+ void unboxNonDouble(const ValueOperand& src, Register dest) {
+ if (src.valueReg() == dest) {
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
+ andq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
+ andq(src.valueReg(), dest);
+ }
+ }
+ void unboxNonDouble(const Operand& src, Register dest) {
+ // Explicitly permits |dest| to be used in |src|.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest != scratch);
+ if (src.containsReg(dest)) {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
+ // If src is already a register, then src and dest are the same
+ // thing and we don't need to move anything into dest.
+ if (src.kind() != Operand::REG)
+ movq(src, dest);
+ andq(scratch, dest);
+ } else {
+ mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
+ andq(src, dest);
+ }
+ }
+
+ void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+
+ void unboxSymbol(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+
+ void unboxObject(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Address& src, Register dest) { unboxNonDouble(Operand(src), dest); }
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(Operand(src), dest); }
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(address, scratch);
+ return scratch;
+ }
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ Register extractTag(const Address& address, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ loadPtr(address, scratch);
+ splitTag(scratch, scratch);
+ return scratch;
+ }
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ MOZ_ASSERT(scratch != ScratchReg);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest);
+
+ // These two functions use the low 32-bits of the full value register.
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+ }
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+
+ void convertInt64ToDouble(Register64 input, FloatRegister output);
+ void convertInt64ToFloat32(Register64 input, FloatRegister output);
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 input, FloatRegister output, Register temp);
+ void convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp);
+
+ void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ CodeOffset label = loadRipRelativeInt64(dest);
+ append(wasm::GlobalAccess(label, globalDataOffset));
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ }
+
+ public:
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.valueReg(), operand.valueReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value) {
+ ScratchRegisterScope scratch(asMasm());
+ unboxString(value, scratch);
+ cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+ }
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(src, dest.fpu());
+ else if (type == MIRType::Int32 || type == MIRType::Boolean)
+ movl(Operand(src), dest.gpr());
+ else
+ unboxNonDouble(Operand(src), dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 8: {
+ ScratchRegisterScope scratch(asMasm());
+ unboxNonDouble(value, scratch);
+ storePtr(scratch, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void loadInstructionPointerAfterCall(Register dest) {
+ loadPtr(Address(StackPointer, 0x0), dest);
+ }
+
+ void convertUInt32ToDouble(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2sd(src, dest, dest);
+ }
+
+ void convertUInt32ToFloat32(Register src, FloatRegister dest) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ vcvtsq2ss(src, dest, dest);
+ }
+
+ inline void incrementInt32Value(const Address& addr);
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ public:
+ void handleFailureWithHandlerTail(void* handler);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX64 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_MacroAssembler_x64_h */
diff --git a/js/src/jit/x64/SharedIC-x64.cpp b/js/src/jit/x64/SharedIC-x64.cpp
new file mode 100644
index 000000000..9e5898c3d
--- /dev/null
+++ b/js/src/jit/x64/SharedIC-x64.cpp
@@ -0,0 +1,234 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // The scratch register is only used in the case of JSOP_URSH.
+ mozilla::Maybe<ScratchRegisterScope> scratch;
+
+ Label revertRegister, maybeNegZero;
+ switch(op_) {
+ case JSOP_ADD:
+ masm.unboxInt32(R0, ExtractTemp0);
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
+ // the next stub.
+ masm.addl(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+
+ // Box the result
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_SUB:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.subl(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_MUL:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.imull(R1.valueReg(), ExtractTemp0);
+ masm.j(Assembler::Overflow, &failure);
+
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &maybeNegZero);
+
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_DIV:
+ {
+ MOZ_ASSERT(R2.scratchReg() == rax);
+ MOZ_ASSERT(R0.valueReg() != rdx);
+ MOZ_ASSERT(R1.valueReg() != rdx);
+ masm.unboxInt32(R0, eax);
+ masm.unboxInt32(R1, ExtractTemp0);
+
+ // Prevent division by 0.
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);
+
+ // Prevent negative 0 and -2147483648 / -1.
+ masm.branch32(Assembler::Equal, eax, Imm32(INT32_MIN), &failure);
+
+ Label notZero;
+ masm.branch32(Assembler::NotEqual, eax, Imm32(0), &notZero);
+ masm.branchTest32(Assembler::Signed, ExtractTemp0, ExtractTemp0, &failure);
+ masm.bind(&notZero);
+
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(ExtractTemp0);
+
+ // A remainder implies a double result.
+ masm.branchTest32(Assembler::NonZero, edx, edx, &failure);
+
+ masm.boxValue(JSVAL_TYPE_INT32, eax, R0.valueReg());
+ break;
+ }
+ case JSOP_MOD:
+ {
+ MOZ_ASSERT(R2.scratchReg() == rax);
+ MOZ_ASSERT(R0.valueReg() != rdx);
+ MOZ_ASSERT(R1.valueReg() != rdx);
+ masm.unboxInt32(R0, eax);
+ masm.unboxInt32(R1, ExtractTemp0);
+
+ // x % 0 always results in NaN.
+ masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);
+
+ // Prevent negative 0 and -2147483648 % -1.
+ masm.branchTest32(Assembler::Zero, eax, Imm32(0x7fffffff), &failure);
+
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(ExtractTemp0);
+
+ // Fail when we would need a negative remainder.
+ Label done;
+ masm.branchTest32(Assembler::NonZero, edx, edx, &done);
+ masm.orl(ExtractTemp0, eax);
+ masm.branchTest32(Assembler::Signed, eax, eax, &failure);
+
+ masm.bind(&done);
+ masm.boxValue(JSVAL_TYPE_INT32, edx, R0.valueReg());
+ break;
+ }
+ case JSOP_BITOR:
+ // We can overide R0, because the instruction is unfailable.
+ // Because the tag bits are the same, we don't need to retag.
+ masm.orq(R1.valueReg(), R0.valueReg());
+ break;
+ case JSOP_BITXOR:
+ masm.xorl(R1.valueReg(), R0.valueReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
+ break;
+ case JSOP_BITAND:
+ masm.andq(R1.valueReg(), R0.valueReg());
+ break;
+ case JSOP_LSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx); // Unboxing R1 to ecx, clobbers R0.
+ masm.shll_cl(ExtractTemp0);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_RSH:
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx);
+ masm.sarl_cl(ExtractTemp0);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ break;
+ case JSOP_URSH:
+ if (!allowDouble_) {
+ scratch.emplace(masm);
+ masm.movq(R0.valueReg(), *scratch);
+ }
+
+ masm.unboxInt32(R0, ExtractTemp0);
+ masm.unboxInt32(R1, ecx); // This clobbers R0
+
+ masm.shrl_cl(ExtractTemp0);
+ masm.test32(ExtractTemp0, ExtractTemp0);
+ if (allowDouble_) {
+ Label toUint;
+ masm.j(Assembler::Signed, &toUint);
+
+ // Box and return.
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ ScratchDoubleScope scratchDouble(masm);
+ masm.convertUInt32ToDouble(ExtractTemp0, scratchDouble);
+ masm.boxDouble(scratchDouble, R0);
+ } else {
+ masm.j(Assembler::Signed, &revertRegister);
+ masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op in BinaryArith_Int32");
+ }
+
+ // Return from stub.
+ EmitReturnFromIC(masm);
+
+ if (op_ == JSOP_MUL) {
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ {
+ ScratchRegisterScope scratch(masm);
+ masm.movl(R0.valueReg(), scratch);
+ masm.orl(R1.valueReg(), scratch);
+ masm.j(Assembler::Signed, &failure);
+ }
+
+ // Result is +0.
+ masm.moveValue(Int32Value(0), R0);
+ EmitReturnFromIC(masm);
+ }
+
+ // Revert the content of R0 in the fallible >>> case.
+ if (op_ == JSOP_URSH && !allowDouble_) {
+ // Scope continuation from JSOP_URSH case above.
+ masm.bind(&revertRegister);
+ // Restore tag and payload.
+ masm.movq(*scratch, R0.valueReg());
+ // Fall through to failure.
+ }
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.notl(R0.valueReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure);
+ masm.negl(R0.valueReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x64/SharedICHelpers-x64.h b/js/src/jit/x64/SharedICHelpers-x64.h
new file mode 100644
index 000000000..b59d05ddc
--- /dev/null
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -0,0 +1,352 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICHelpers_x64_h
+#define jit_x64_SharedICHelpers_x64_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from Stack top to the top Value inside an IC stub (this is the return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ masm.Pop(ICTailCallReg);
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ masm.Push(ICTailCallReg);
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) monitorStubOffset), ICStubReg);
+
+ // Jump to the stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.ret();
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.storePtr(reg, Address(StackPointer, 0));
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ ScratchRegisterScope scratch(masm);
+
+ // We an assume during this that R0 and R1 have been pushed.
+ masm.movq(BaselineFrameReg, scratch);
+ masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subq(BaselineStackReg, scratch);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.movq(scratch, rdx);
+ masm.subq(Imm32(argSize), rdx);
+ masm.store32(rdx, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ // For tail calls, find the already pushed JitFrame_IonJS signifying the
+ // end of the Ion frame. Retrieve the length of the frame and repush
+ // JitFrame_IonJS with the extra stacksize, rendering the original
+ // JitFrame_IonJS obsolete.
+
+ ScratchRegisterScope scratch(masm);
+
+ masm.loadPtr(Address(esp, stackSize), scratch);
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addq(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and previous
+ // frame pointer pushed by EmitEnterStubFrame.
+ masm.movq(BaselineFrameReg, reg);
+ masm.addq(Imm32(sizeof(void*) * 2), reg);
+ masm.subq(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ ScratchRegisterScope scratch(masm);
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ // Stubs often use the return address. Which is actually accounted by the
+ // caller of the stub. Though in the stubcode we fake that is part of the
+ // stub. In order to make it possible to pop it. As a result we have to
+ // fix it here, by subtracting it. Else it would be counted twice.
+ uint32_t framePushed = masm.framePushed() - sizeof(void*);
+
+ uint32_t descriptor = MakeFrameDescriptor(framePushed, JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.call(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+// Size of vales pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register)
+{
+ EmitRestoreTailCallReg(masm);
+
+ ScratchRegisterScope scratch(masm);
+
+ // Compute frame size.
+ masm.movq(BaselineFrameReg, scratch);
+ masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subq(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE
+ // if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(ICStubReg);
+ masm.Push(BaselineFrameReg);
+ masm.mov(BaselineStackReg, BaselineFrameReg);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register)
+{
+ masm.loadPtr(Address(masm.getStackPointer(), 0), ICTailCallReg);
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ ScratchRegisterScope scratch(masm);
+ masm.Pop(scratch);
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addq(scratch, BaselineStackReg);
+ } else {
+ masm.mov(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.Pop(BaselineFrameReg);
+ masm.Pop(ICStubReg);
+
+ // Pop return address.
+ masm.Pop(ICTailCallReg);
+
+ // Overwrite frame descriptor with return address, so that the stack matches
+ // the state before entering the stub frame.
+ masm.storePtr(ICTailCallReg, Address(BaselineStackReg, 0));
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.push(ICTailCallReg);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0
+ masm.pop(ICTailCallReg);
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Unstow R0 and R1
+ masm.pop(ICTailCallReg);
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ masm.push(ICTailCallReg);
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from stack top, excluding the return address.
+
+ // Save the current ICStubReg to stack
+ masm.push(ICStubReg);
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+
+ // Restore the old stub reg.
+ masm.pop(ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ masm.patchableCallPreBarrier(addr, type);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICHelpers_x64_h */
diff --git a/js/src/jit/x64/SharedICRegisters-x64.h b/js/src/jit/x64/SharedICRegisters-x64.h
new file mode 100644
index 000000000..75f62ccb1
--- /dev/null
+++ b/js/src/jit/x64/SharedICRegisters-x64.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x64_SharedICRegisters_x64_h
+#define jit_x64_SharedICRegisters_x64_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = rbp;
+static constexpr Register BaselineStackReg = rsp;
+
+static constexpr ValueOperand R0(rcx);
+static constexpr ValueOperand R1(rbx);
+static constexpr ValueOperand R2(rax);
+
+static constexpr Register ICTailCallReg = rsi;
+static constexpr Register ICStubReg = rdi;
+
+static constexpr Register ExtractTemp0 = r14;
+static constexpr Register ExtractTemp1 = r15;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x64_SharedICRegisters_x64_h */
diff --git a/js/src/jit/x64/Trampoline-x64.cpp b/js/src/jit/x64/Trampoline-x64.cpp
new file mode 100644
index 000000000..aebacdd1c
--- /dev/null
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -0,0 +1,1303 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "jit/x64/SharedICHelpers-x64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsPowerOfTwo;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ MacroAssembler masm(cx);
+ masm.assertStackAlignment(ABIStackAlignment, -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ MOZ_ASSERT(OsrFrameReg == IntArgReg3);
+
+#if defined(_WIN64)
+ const Address token = Address(rbp, 16 + ShadowStackSpace);
+ const Operand scopeChain = Operand(rbp, 24 + ShadowStackSpace);
+ const Operand numStackValuesAddr = Operand(rbp, 32 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 40 + ShadowStackSpace);
+#else
+ const Register token = IntArgReg4;
+ const Register scopeChain = IntArgReg5;
+ const Operand numStackValuesAddr = Operand(rbp, 16 + ShadowStackSpace);
+ const Operand result = Operand(rbp, 24 + ShadowStackSpace);
+#endif
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(rbp);
+ masm.mov(rsp, rbp);
+
+ // Save non-volatile registers. These must be saved by the trampoline, rather
+ // than by the JIT'd code, because they are scanned by the conservative scanner.
+ masm.push(rbx);
+ masm.push(r12);
+ masm.push(r13);
+ masm.push(r14);
+ masm.push(r15);
+#if defined(_WIN64)
+ masm.push(rdi);
+ masm.push(rsi);
+
+ // 16-byte aligment for vmovdqa
+ masm.subq(Imm32(16 * 10 + 8), rsp);
+
+ masm.vmovdqa(xmm6, Operand(rsp, 16 * 0));
+ masm.vmovdqa(xmm7, Operand(rsp, 16 * 1));
+ masm.vmovdqa(xmm8, Operand(rsp, 16 * 2));
+ masm.vmovdqa(xmm9, Operand(rsp, 16 * 3));
+ masm.vmovdqa(xmm10, Operand(rsp, 16 * 4));
+ masm.vmovdqa(xmm11, Operand(rsp, 16 * 5));
+ masm.vmovdqa(xmm12, Operand(rsp, 16 * 6));
+ masm.vmovdqa(xmm13, Operand(rsp, 16 * 7));
+ masm.vmovdqa(xmm14, Operand(rsp, 16 * 8));
+ masm.vmovdqa(xmm15, Operand(rsp, 16 * 9));
+#endif
+
+ // Save arguments passed in registers needed after function call.
+ masm.push(result);
+
+ // Remember stack depth without padding and arguments.
+ masm.mov(rsp, r14);
+
+ // Remember number of bytes occupied by argument vector
+ masm.mov(reg_argc, r13);
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, token, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.addq(Imm32(1), r13);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.shll(Imm32(3), r13); // r13 = argc * sizeof(Value)
+ static_assert(sizeof(Value) == 1 << 3, "Constant is baked in assembly code");
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.mov(rsp, r12);
+ masm.subq(r13, r12);
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ masm.andl(Imm32(JitStackAlignment - 1), r12);
+ masm.subq(r12, rsp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // r13 still stores the number of bytes in the argument vector.
+ masm.addq(reg_argv, r13); // r13 points above last argument or newTarget
+
+ // while r13 > rdx, push arguments.
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmpPtr(r13, reg_argv);
+ masm.j(AssemblerX86Shared::BelowOrEqual, &footer);
+
+ masm.subq(Imm32(8), r13);
+ masm.push(Operand(r13, 0));
+ masm.jmp(&header);
+
+ masm.bind(&footer);
+ }
+
+ // Push the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.movq(result, reg_argc);
+ masm.unboxInt32(Operand(reg_argc, 0), reg_argc);
+ masm.push(reg_argc);
+
+ // Push the callee token.
+ masm.push(token);
+
+ /*****************************************************************
+ Push the number of bytes we've pushed so far on the stack and call
+ *****************************************************************/
+ masm.subq(rsp, r14);
+
+ // Create a frame descriptor.
+ masm.makeFrameDescriptor(r14, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(r14);
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(OsrFrameReg);
+ regs.take(rbp);
+ regs.take(reg_code);
+
+ // Ensure that |scratch| does not end up being JSReturnOperand.
+ // Do takeUnchecked because on Win64/x64, reg_code (IntArgReg0) and JSReturnOperand are
+ // the same (rcx). See bug 849398.
+ regs.takeUnchecked(JSReturnOperand);
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.movq(numStackValuesAddr, numStackValues);
+
+ // Push return address
+ masm.mov(returnLabel.patchAt(), scratch);
+ masm.push(scratch);
+
+ // Push previous frame pointer.
+ masm.push(rbp);
+
+ // Reserve frame.
+ Register framePtr = rbp;
+ masm.subPtr(Imm32(BaselineFrame::Size()), rsp);
+ masm.mov(rsp, framePtr);
+
+#ifdef XP_WIN
+ // Can't push large frames blindly on windows. Touch frame memory incrementally.
+ masm.mov(numStackValues, scratch);
+ masm.lshiftPtr(Imm32(3), scratch);
+ masm.subPtr(scratch, framePtr);
+ {
+ masm.movePtr(rsp, scratch);
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ masm.bind(&touchFrameLoop);
+ masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
+ masm.store32(Imm32(0), Address(scratch, 0));
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+ masm.jump(&touchFrameLoop);
+ masm.bind(&touchFrameLoopEnd);
+ }
+ masm.mov(rsp, framePtr);
+#endif
+
+ // Reserve space for locals and stack values.
+ Register valuesSize = regs.takeAny();
+ masm.mov(numStackValues, valuesSize);
+ masm.shll(Imm32(3), valuesSize);
+ masm.subPtr(valuesSize, rsp);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), valuesSize);
+ masm.makeFrameDescriptor(valuesSize, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(valuesSize);
+ masm.push(Imm32(0)); // Fake return address.
+ // No GC things to mark, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ regs.add(valuesSize);
+
+ masm.push(framePtr);
+ masm.push(reg_code);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtr); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ masm.pop(reg_code);
+ masm.pop(framePtr);
+
+ MOZ_ASSERT(reg_code != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), rsp);
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.lea(Operand(framePtr, sizeof(void*)), realFramePtr);
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(reg_code);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.mov(framePtr, rsp);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), rsp);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.mov(oomReturnLabel.patchAt(), scratch);
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ masm.movq(scopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call function.
+ masm.callJitNoProfiler(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.use(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.use(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments and padding from stack.
+ masm.pop(r14); // Pop and decode descriptor.
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), r14);
+ masm.addq(r14, rsp); // Remove arguments.
+
+ /*****************************************************************
+ Place return value where it belongs, pop all saved registers
+ *****************************************************************/
+ masm.pop(r12); // vp
+ masm.storeValue(JSReturnOperand, Operand(r12, 0));
+
+ // Restore non-volatile registers.
+#if defined(_WIN64)
+ masm.vmovdqa(Operand(rsp, 16 * 0), xmm6);
+ masm.vmovdqa(Operand(rsp, 16 * 1), xmm7);
+ masm.vmovdqa(Operand(rsp, 16 * 2), xmm8);
+ masm.vmovdqa(Operand(rsp, 16 * 3), xmm9);
+ masm.vmovdqa(Operand(rsp, 16 * 4), xmm10);
+ masm.vmovdqa(Operand(rsp, 16 * 5), xmm11);
+ masm.vmovdqa(Operand(rsp, 16 * 6), xmm12);
+ masm.vmovdqa(Operand(rsp, 16 * 7), xmm13);
+ masm.vmovdqa(Operand(rsp, 16 * 8), xmm14);
+ masm.vmovdqa(Operand(rsp, 16 * 9), xmm15);
+
+ masm.addq(Imm32(16 * 10 + 8), rsp);
+
+ masm.pop(rsi);
+ masm.pop(rdi);
+#endif
+ masm.pop(r15);
+ masm.pop(r14);
+ masm.pop(r13);
+ masm.pop(r12);
+ masm.pop(rbx);
+
+ // Restore frame pointer and return.
+ masm.pop(rbp);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ AutoJitContextAlloc ajca(cx);
+ MacroAssembler masm(cx);
+
+ // See explanatory comment in x86's JitRuntime::generateInvalidator.
+
+ masm.addq(Imm32(sizeof(uintptr_t)), rsp);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ masm.movq(rsp, rax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's frameSize outparam.
+ masm.reserveStack(sizeof(size_t));
+ masm.movq(rsp, rbx);
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, r9);
+
+ masm.setupUnalignedABICall(rdx);
+ masm.passABIArg(rax);
+ masm.passABIArg(rbx);
+ masm.passABIArg(r9);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+ masm.pop(rbx); // Get the frameSize outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.lea(Operand(rsp, rbx, TimesOne, sizeof(InvalidationBailoutStack)), rsp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ // Do not erase the frame pointer in this function.
+
+ MacroAssembler masm(cx);
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- rsp
+ // '--- #r8 ---'
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
+ // Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == r8);
+
+ // Add |this|, in the counter of known arguments.
+ masm.addl(Imm32(1), r8);
+
+ // Load |nformals| into %rcx.
+ masm.loadPtr(Address(rsp, RectifierFrameLayout::offsetOfCalleeToken()), rax);
+ masm.mov(rax, rcx);
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rcx);
+ masm.movzwl(Operand(rcx, JSFunction::offsetOfNargs()), rcx);
+
+ // Stash another copy in r11, since we are going to do destructive operations
+ // on rcx
+ masm.mov(rcx, r11);
+
+ static_assert(CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(rax, rdx);
+ masm.andq(Imm32(uint32_t(CalleeToken_FunctionConstructing)), rdx);
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 + isConstructing)
+ // arguments to push to the stack. Then we push a JitFrameLayout. We
+ // compute the padding expressed in the number of extra |undefined| values
+ // to push on the stack.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */), rcx);
+ masm.addl(rdx, rcx);
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), rcx);
+
+ // Load the number of |undefined|s to push into %rcx.
+ masm.subq(r8, rcx);
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- rsp <- r9
+ // '------ #r8 -------'
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // '------- #rcx --------' '------ #r8 -------'
+
+ // Copy the number of actual arguments
+ masm.loadPtr(Address(rsp, RectifierFrameLayout::offsetOfNumActualArgs()), rdx);
+
+ masm.moveValue(UndefinedValue(), r10);
+
+ masm.movq(rsp, r9); // Save %rsp.
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(r10);
+ masm.subl(Imm32(1), rcx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // | - sizeof(Value)| is used to put rcx such that we can read the last
+ // argument, and not the value which is after.
+ BaseIndex b = BaseIndex(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
+ masm.lea(Operand(b), rcx);
+
+ // Copy & Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(rcx, 0x0));
+ masm.subq(Imm32(sizeof(Value)), rcx);
+ masm.subl(Imm32(1), r8);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, rax, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(r10);
+
+ // +1 for |this|. We want vp[argc], so don't subtract 1
+ BaseIndex newTargetSrc(r9, rdx, TimesEight, sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(rsp, r11, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- r9
+ //
+ //
+ // Rectifier frame:
+ // [undef] [undef] [undef] [arg2] [arg1] [this] <- rsp [[argc] [callee] [descr] [raddr]]
+ //
+
+ // Construct descriptor.
+ masm.subq(rsp, r9);
+ masm.makeFrameDescriptor(r9, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.push(rdx); // numActualArgs
+ masm.push(rax); // callee token
+ masm.push(r9); // descriptor
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andq(Imm32(uint32_t(CalleeTokenMask)), rax);
+ masm.loadPtr(Address(rax, JSFunction::offsetOfNativeOrScript()), rax);
+ masm.loadBaselineOrIonRaw(rax, rax, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(rax);
+
+ // Remove the rectifier frame.
+ masm.pop(r9); // r9 <- descriptor with FrameType.
+ masm.shrq(Imm32(FRAMESIZE_SHIFT), r9);
+ masm.pop(r11); // Discard calleeToken.
+ masm.pop(r11); // Discard numActualArgs.
+ masm.addq(r9, rsp); // Discard pushed arguments.
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*)(code->raw() + returnOffset);
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, Register spArg)
+{
+ // Push registers such that we can access them from [base + code].
+ if (JitSupportsSimd()) {
+ masm.PushRegsInMask(AllRegs);
+ } else {
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter)
+ masm.Push(*iter);
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+ }
+
+ // Get the stack pointer into a register, pre-alignment.
+ masm.movq(rsp, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, r8);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movq(rsp, r9);
+
+ // Call the bailout function.
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(r8);
+ masm.passABIArg(r9);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ masm.pop(r9); // Get the bailoutInfo outparam.
+
+ // Stack is:
+ // [frame]
+ // snapshotOffset
+ // frameSize
+ // [bailoutFrame]
+ //
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ static const uint32_t BailoutDataSize = sizeof(RegisterDump);
+ masm.addq(Imm32(BailoutDataSize), rsp);
+ masm.pop(rcx);
+ masm.lea(Operand(rsp, rcx, TimesOne, sizeof(void*)), rsp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r9.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MOZ_CRASH("x64 does not use bailout tables");
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ // Generate a separated code for the wrapper.
+ MacroAssembler masm;
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // The context is the first argument.
+ Register cxreg = IntArgReg0;
+ regs.take(cxreg);
+
+ // Stack is:
+ // ... frame ...
+ // +12 [args]
+ // +8 descriptor
+ // +0 returnAddress
+ //
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = r10;
+ regs.take(argsBase);
+ masm.lea(Operand(rsp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int32_t));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movq(esp, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movq(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg))
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ else
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ case VMFunction::DoubleByRef:
+ MOZ_CRASH("NYI: x64 callVM should not be used with 128bits values.");
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, rax, rax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(rax, rax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(esp, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ MOZ_ASSERT(cx->runtime()->jitSupportsFloatingPoint);
+ masm.loadDouble(Address(esp, 0), ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(esp, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm;
+
+ LiveRegisterSet regs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.PushRegsInMask(regs);
+
+ MOZ_ASSERT(PreBarrierReg == rdx);
+ masm.mov(ImmPtr(cx->runtime()), rcx);
+
+ masm.setupUnalignedABICall(rax);
+ masm.passABIArg(rcx);
+ masm.passABIArg(rdx);
+ masm.callWithABI(IonMarkFunction(type));
+
+ masm.PopRegsInMask(regs);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.setFramePushed(sizeof(intptr_t));
+#endif
+
+ Register scratch1 = rax;
+ Register scratch2 = rcx;
+ Register scratch3 = rdx;
+
+ // Load the return address in scratch1.
+ masm.loadPtr(Address(rsp, 0), scratch1);
+
+ // Load BaselineFrame pointer in scratch2.
+ masm.mov(rbp, scratch2);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is marked
+ // during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch3);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.push(scratch1);
+ masm.push(scratch2);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.ret();
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.mov(rbp, rsp);
+ masm.pop(rbp);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(rdx, r9);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = r8;
+ Register scratch2 = r9;
+ Register scratch3 = r10;
+ Register scratch4 = r11;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.movePtr(scratch1, scratch2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch2);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ BaseIndex stubFrameReturnAddr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch1, TimesOne, RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch3);
+ masm.addPtr(Imm32(sizeof(void*)), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch3, TimesOne, IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}