summaryrefslogtreecommitdiffstats
path: root/js/src/jit/x86
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/jit/x86
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/x86')
-rw-r--r--js/src/jit/x86/Assembler-x86.cpp106
-rw-r--r--js/src/jit/x86/Assembler-x86.h991
-rw-r--r--js/src/jit/x86/Bailouts-x86.cpp115
-rw-r--r--js/src/jit/x86/BaseAssembler-x86.h203
-rw-r--r--js/src/jit/x86/BaselineCompiler-x86.cpp15
-rw-r--r--js/src/jit/x86/BaselineCompiler-x86.h26
-rw-r--r--js/src/jit/x86/BaselineIC-x86.cpp48
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.cpp1298
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.h98
-rw-r--r--js/src/jit/x86/LIR-x86.h207
-rw-r--r--js/src/jit/x86/LOpcodes-x86.h24
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp658
-rw-r--r--js/src/jit/x86/Lowering-x86.h96
-rw-r--r--js/src/jit/x86/MacroAssembler-x86-inl.h1075
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp1028
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.h870
-rw-r--r--js/src/jit/x86/SharedIC-x86.cpp242
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86.h353
-rw-r--r--js/src/jit/x86/SharedICRegisters-x86.h38
-rw-r--r--js/src/jit/x86/Trampoline-x86.cpp1336
20 files changed, 8827 insertions, 0 deletions
diff --git a/js/src/jit/x86/Assembler-x86.cpp b/js/src/jit/x86/Assembler-x86.cpp
new file mode 100644
index 000000000..7fca29434
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -0,0 +1,106 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Assembler-x86.h"
+
+#include "gc/Marking.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : stackOffset_(0),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Pointer:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ case MIRType::Double:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Int64:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ // SIMD values aren't passed in or out of C++, so we can make up
+ // whatever internal ABI we like. visitWasmStackArg assumes
+ // SimdMemoryAlignment.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ AssemblerX86Shared::executableCopy(buffer);
+
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ X86Encoding::SetRel32(buffer + rp.offset, rp.target);
+ }
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ uint32_t offset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader)
+ : reader_(reader)
+ { }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+};
+
+static inline JitCode*
+CodeFromJump(uint8_t* jump)
+{
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
new file mode 100644
index 000000000..3fb5efaff
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -0,0 +1,991 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Assembler_x86_h
+#define jit_x86_Assembler_x86_h
+
+#include "mozilla/ArrayUtils.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register eax = { X86Encoding::rax };
+static constexpr Register ecx = { X86Encoding::rcx };
+static constexpr Register edx = { X86Encoding::rdx };
+static constexpr Register ebx = { X86Encoding::rbx };
+static constexpr Register esp = { X86Encoding::rsp };
+static constexpr Register ebp = { X86Encoding::rbp };
+static constexpr Register esi = { X86Encoding::rsi };
+static constexpr Register edi = { X86Encoding::rdi };
+
+static constexpr FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+
+static constexpr Register InvalidReg = { X86Encoding::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register JSReturnReg_Type = ecx;
+static constexpr Register JSReturnReg_Data = edx;
+static constexpr Register StackPointer = esp;
+static constexpr Register FramePointer = ebp;
+static constexpr Register ReturnReg = eax;
+static constexpr Register64 ReturnReg64(edi, eax);
+static constexpr FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
+
+// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register ArgumentsRectifierReg = esi;
+static constexpr Register CallTempReg0 = edi;
+static constexpr Register CallTempReg1 = eax;
+static constexpr Register CallTempReg2 = ebx;
+static constexpr Register CallTempReg3 = ecx;
+static constexpr Register CallTempReg4 = esi;
+static constexpr Register CallTempReg5 = edx;
+
+// We have no arg regs, so our NonArgRegs are just our CallTempReg*
+// Use "const" instead of constexpr here to work around a bug
+// of VS2015 Update 1. See bug 1229604.
+static const Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
+static const uint32_t NumCallTempNonArgRegs =
+ mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+
+};
+
+static constexpr Register ABINonArgReg0 = eax;
+static constexpr Register ABINonArgReg1 = ebx;
+static constexpr Register ABINonArgReg2 = ecx;
+
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = ecx;
+static constexpr Register ABINonArgReturnReg1 = edx;
+static constexpr Register ABINonVolatileReg = ebx;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = esi;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register OsrFrameReg = edx;
+static constexpr Register PreBarrierReg = edx;
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = ecx;
+static constexpr Register WasmIonExitRegE0 = edi;
+static constexpr Register WasmIonExitRegE1 = eax;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+static constexpr Register WasmIonExitRegReturnData = edx;
+static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitRegD0 = edi;
+static constexpr Register WasmIonExitRegD1 = eax;
+static constexpr Register WasmIonExitRegD2 = esi;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg2;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
+
+// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
+// calls. wasm code does.
+#if defined(__GNUC__)
+static constexpr uint32_t ABIStackAlignment = 16;
+#else
+static constexpr uint32_t ABIStackAlignment = 4;
+#endif
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = true;
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments which are used for "
+ "the constant sections of the code buffer. Thus it should be larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments which are used for "
+ "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
+
+static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+struct ImmTag : public Imm32
+{
+ explicit ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ explicit ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+static const Scale ScalePointer = TimesFour;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static inline void
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+{
+#ifdef DEBUG
+ // Assert that we're overwriting a jump instruction, either:
+ // 0F 80+cc <imm32>, or
+ // E9 <imm32>
+ unsigned char* x = (unsigned char*)jump.raw() - 5;
+ MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
+ (*x == 0xE9));
+#endif
+ MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+ X86Encoding::SetRel32(jump.raw(), label.raw());
+}
+static inline void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
+{
+ PatchJump(jump_, label);
+}
+
+// Return operand from a JS -> JS call.
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+
+class Assembler : public AssemblerX86Shared
+{
+ void writeRelocation(JmpSrc src) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ }
+ void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ public:
+ using AssemblerX86Shared::movl;
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::vmovsd;
+ using AssemblerX86Shared::vmovss;
+ using AssemblerX86Shared::retarget;
+ using AssemblerX86Shared::cmpl;
+ using AssemblerX86Shared::call;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::pop;
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ // Actual assembly emitting functions.
+
+ void push(ImmGCPtr ptr) {
+ masm.push_i32(int32_t(ptr.value));
+ writeDataRelocation(ptr);
+ }
+ void push(const ImmWord imm) {
+ push(Imm32(imm.value));
+ }
+ void push(const ImmPtr imm) {
+ push(ImmWord(uintptr_t(imm.value)));
+ }
+ void push(FloatRegister src) {
+ subl(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+
+ CodeOffset pushWithPatch(ImmWord word) {
+ masm.push_i32(int32_t(word.value));
+ return CodeOffset(masm.currentOffset());
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addl(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ movl(Imm32(word.value), dest);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void movl(ImmGCPtr ptr, Register dest) {
+ masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movl(ImmGCPtr ptr, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(), dest.index(), dest.scale());
+ writeDataRelocation(ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(ImmWord imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ }
+ void movl(ImmPtr imm, Register dest) {
+ movl(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(ImmWord imm, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though.
+ if (imm.value == 0)
+ xorl(dest, dest);
+ else
+ movl(imm, dest);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movl_i32r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void mov(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+ void mov(Imm32 imm, const Operand& dest) {
+ movl(imm, dest);
+ }
+ void mov(CodeOffset* label, Register dest) {
+ // Put a placeholder value in the instruction stream.
+ masm.movl_i32r(0, dest.encoding());
+ label->bind(masm.size());
+ }
+ void mov(Register src, Register dest) {
+ movl(src, dest);
+ }
+ void xchg(Register src, Register dest) {
+ xchgl(src, dest);
+ }
+ void lea(const Operand& src, Register dest) {
+ return leal(src, dest);
+ }
+
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void faddp() {
+ masm.faddp();
+ }
+
+ void cmpl(ImmWord rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(ImmPtr rhs, Register lhs) {
+ cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
+ }
+ void cmpl(ImmGCPtr rhs, Register lhs) {
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
+ writeDataRelocation(rhs);
+ }
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(ImmGCPtr rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
+ writeDataRelocation(rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
+ masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
+ }
+ void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
+ JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
+ }
+
+ void adcl(Imm32 imm, Register dest) {
+ masm.adcl_ir(imm.value, dest.encoding());
+ }
+ void adcl(Register src, Register dest) {
+ masm.adcl_rr(src.encoding(), dest.encoding());
+ }
+
+ void sbbl(Imm32 imm, Register dest) {
+ masm.sbbl_ir(imm.value, dest.encoding());
+ }
+ void sbbl(Register src, Register dest) {
+ masm.sbbl_rr(src.encoding(), dest.encoding());
+ }
+
+ void mull(Register multiplier) {
+ masm.mull_r(multiplier.encoding());
+ }
+
+ void shldl(const Imm32 imm, Register src, Register dest) {
+ masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+ void shrdl(const Imm32 imm, Register src, Register dest) {
+ masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+
+ void vhaddpd(FloatRegister src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE3());
+ MOZ_ASSERT(src.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vhaddpd_rr(src.encoding(), dest.encoding());
+ }
+ void vsubpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vsubpd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vsubpd_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void vpunpckldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(src1.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ masm.vpunpckldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
+ }
+ void vpunpckldq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ MOZ_ASSERT(src0.size() == 16);
+ MOZ_ASSERT(dest.size() == 16);
+ switch (src1.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vpunpckldq_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vpunpckldq_mr(src1.address(), src0.encoding(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void fild(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fild_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ Relocation::Kind reloc = Relocation::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ }
+ void call(ImmWord target) {
+ call(ImmPtr((void*)target.value));
+ }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, Relocation::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Re-routes pending jumps to an external target, flushing the label in the
+ // process.
+ void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
+ if (label->used()) {
+ bool more;
+ X86Encoding::JmpSrc jmp(label->offset());
+ do {
+ X86Encoding::JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ addPendingJump(jmp, target, reloc);
+ jmp = next;
+ } while (more);
+ }
+ label->reset();
+ }
+
+ // Move a 32-bit immediate into a register where the immediate can be
+ // patched.
+ CodeOffset movlWithPatch(Imm32 imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(base + disp32) where disp32 can be patched.
+ CodeOffset movsblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movsbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movswl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzwl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *(base + disp32) where disp32 can be patched.
+ CodeOffset movbWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movb_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movw_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ Address addr = dest.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ return movlWithPatch(regLow, low);
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand low(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
+ return movlWithPatch(regLow, low);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ Address addr = dest.toAddress();
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+ return movlWithPatch(regHigh, high);
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64HIGH_OFFSET));
+ return movlWithPatch(regHigh, high);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(addr + index*scale) where addr can be patched.
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
+ Register dest)
+ {
+ masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *src where src can be patched.
+ CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movsbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movswl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzwl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *dest where dest can be patched.
+ CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movb_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movw_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movl_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+
+ static bool canUseInSingleByteInstruction(Register reg) {
+ return X86Encoding::HasSubregL(reg.encoding());
+ }
+};
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Assembler_x86_h */
diff --git a/js/src/jit/x86/Bailouts-x86.cpp b/js/src/jit/x86/Bailouts-x86.cpp
new file mode 100644
index 000000000..42dc1468c
--- /dev/null
+++ b/js/src/jit/x86/Bailouts-x86.cpp
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+#if defined(_WIN32)
+# pragma pack(push, 1)
+#endif
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+ uintptr_t snapshotOffset_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t*)this + sizeof(BailoutStack);
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#if defined(_WIN32)
+# pragma pack(pop)
+#endif
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/x86/BaseAssembler-x86.h b/js/src/jit/x86/BaseAssembler-x86.h
new file mode 100644
index 000000000..5b16311d0
--- /dev/null
+++ b/js/src/jit/x86/BaseAssembler-x86.h
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaseAssembler_x86_h
+#define jit_x86_BaseAssembler_x86_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX86 : public BaseAssembler
+{
+ public:
+
+ // Arithmetic operations:
+
+ void adcl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("adcl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_im(int32_t imm, const void* addr)
+ {
+ spew("adcl %d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("adcl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, src, dst);
+ }
+
+ void sbbl_ir(int32_t imm, RegisterID dst)
+ {
+ spew("sbbl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SBB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SBB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sbbl_rr(RegisterID src, RegisterID dst)
+ {
+ spew("sbbl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, src, dst);
+ }
+
+ using BaseAssembler::andl_im;
+ void andl_im(int32_t imm, const void* addr)
+ {
+ spew("andl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::orl_im;
+ void orl_im(int32_t imm, const void* addr)
+ {
+ spew("orl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::subl_im;
+ void subl_im(int32_t imm, const void* addr)
+ {
+ spew("subl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void shldl_irr(int32_t imm, RegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shldl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHLD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ void shrdl_irr(int32_t imm, RegisterID src, RegisterID dst)
+ {
+ MOZ_ASSERT(imm < 32);
+ spew("shrdl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHRD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ // SSE operations:
+
+ using BaseAssembler::vcvtsi2sd_mr;
+ void vcvtsi2sd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, address, src0, dst);
+ }
+
+ using BaseAssembler::vmovaps_mr;
+ void vmovaps_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, address, invalid_xmm, dst);
+ }
+
+ using BaseAssembler::vmovdqa_mr;
+ void vmovdqa_mr(const void* address, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, address, invalid_xmm, dst);
+ }
+
+ void vhaddpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ twoByteOpSimdFlags("vhaddpd", VEX_PD, OP2_HADDPD, src, dst);
+ }
+
+ void vsubpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+ void vsubpd_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, offset, base, src0, dst);
+ }
+ void vsubpd_mr(const void* address, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, address, src0, dst);
+ }
+
+ void vpunpckldq_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, src1, src0, dst);
+ }
+ void vpunpckldq_mr(int32_t offset, RegisterID base, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, offset, base, src0, dst);
+ }
+ void vpunpckldq_mr(const void* addr, XMMRegisterID src0, XMMRegisterID dst)
+ {
+ twoByteOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ, addr, src0, dst);
+ }
+
+ void fild_m(int32_t offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_FILD, offset, base, FILD_OP_64);
+ }
+
+ // Misc instructions:
+
+ void pusha()
+ {
+ spew("pusha");
+ m_formatter.oneByteOp(OP_PUSHA);
+ }
+
+ void popa()
+ {
+ spew("popa");
+ m_formatter.oneByteOp(OP_POPA);
+ }
+};
+
+typedef BaseAssemblerX86 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaseAssembler_x86_h */
diff --git a/js/src/jit/x86/BaselineCompiler-x86.cpp b/js/src/jit/x86/BaselineCompiler-x86.cpp
new file mode 100644
index 000000000..8520fd8c7
--- /dev/null
+++ b/js/src/jit/x86/BaselineCompiler-x86.cpp
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/BaselineCompiler-x86.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerX86::BaselineCompilerX86(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : BaselineCompilerX86Shared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/x86/BaselineCompiler-x86.h b/js/src/jit/x86/BaselineCompiler-x86.h
new file mode 100644
index 000000000..a0311bc55
--- /dev/null
+++ b/js/src/jit/x86/BaselineCompiler-x86.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaselineCompiler_x86_h
+#define jit_x86_BaselineCompiler_x86_h
+
+#include "jit/x86-shared/BaselineCompiler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerX86 : public BaselineCompilerX86Shared
+{
+ protected:
+ BaselineCompilerX86(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerX86 BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaselineCompiler_x86_h */
diff --git a/js/src/jit/x86/BaselineIC-x86.cpp b/js/src/jit/x86/BaselineIC-x86.cpp
new file mode 100644
index 000000000..a2227ab0a
--- /dev/null
+++ b/js/src/jit/x86/BaselineIC-x86.cpp
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.cmp32(R0.payloadReg(), R1.payloadReg());
+ masm.setCC(cond, R0.payloadReg());
+ masm.movzbl(R0.payloadReg(), R0.payloadReg());
+
+ // Box the result and return
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
new file mode 100644
index 000000000..1fb431894
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -0,0 +1,1298 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/CodeGenerator-x86.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+
+#include "jsnum.h"
+
+#include "jit/IonCaches.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using JS::GenericNaN;
+
+CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm)
+{
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorX86::ToOutValue(LInstruction* ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void
+CodeGeneratorX86::visitValue(LValue* value)
+{
+ const ValueOperand out = ToOutValue(value);
+ masm.moveValue(value->value(), out);
+}
+
+void
+CodeGeneratorX86::visitBox(LBox* box)
+{
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ DebugOnly<const LAllocation*> a = box->getOperand(0);
+ MOZ_ASSERT(!a->isConstant());
+
+ // On x86, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void
+CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint* box)
+{
+ const LAllocation* in = box->getOperand(0);
+ const ValueOperand out = ToOutValue(box);
+
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchFloat32Reg);
+ reg = ScratchFloat32Reg;
+ }
+ masm.boxDouble(reg, out);
+}
+
+void
+CodeGeneratorX86::visitUnbox(LUnbox* unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+
+ if (mir->fallible()) {
+ masm.cmp32(ToOperand(unbox->type()), Imm32(MIRTypeToTag(mir->type())));
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ }
+}
+
+void
+CodeGeneratorX86::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
+ masm.jump(&done);
+ }
+ masm.bind(&notBoolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
+ jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
+
+ if (rhs->isConstant())
+ masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
+ else
+ masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
+ emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX86::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ Label notEqual, done;
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ masm.j(Assembler::NotEqual, &notEqual);
+ {
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ masm.emitSet(cond, output);
+ masm.jump(&done);
+ }
+ masm.bind(&notEqual);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ masm.cmp32(lhs.typeReg(), rhs.typeReg());
+ jumpToBlock(notEqual, Assembler::NotEqual);
+ masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
+ emitBranch(cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorX86::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+
+ if (input != temp)
+ masm.mov(input, temp);
+
+ // Beware: convertUInt32ToDouble clobbers input.
+ masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorX86::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (input != temp)
+ masm.mov(input, temp);
+
+ // Beware: convertUInt32ToFloat32 clobbers input.
+ masm.convertUInt32ToFloat32(temp, output);
+}
+
+void
+CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ const MLoadTypedArrayElementStatic* mir = ins->mir();
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);
+
+ Register ptr = ToRegister(ins->ptr());
+ AnyRegister out = ToAnyRegister(ins->output());
+ OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+ uint32_t offset = mir->offset();
+
+ if (mir->needsBoundsCheck()) {
+ MOZ_ASSERT(offset == 0);
+ if (!mir->fallible()) {
+ ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+ addOutOfLineCode(ool, ins->mir());
+ }
+
+ masm.cmpPtr(ptr, ImmWord(mir->length()));
+ if (ool)
+ masm.j(Assembler::AboveOrEqual, ool->entry());
+ else
+ bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
+ }
+
+ Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
+ switch (accessType) {
+ case Scalar::Int8: masm.movsblWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8: masm.movzblWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Int16: masm.movswlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Int32:
+ case Scalar::Uint32: masm.movlWithPatch(srcAddr, out.gpr()); break;
+ case Scalar::Float32: masm.vmovssWithPatch(srcAddr, out.fpu()); break;
+ case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("Unexpected type");
+ }
+
+ if (accessType == Scalar::Float64)
+ masm.canonicalizeDouble(out.fpu());
+ if (accessType == Scalar::Float32)
+ masm.canonicalizeFloat(out.fpu());
+
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::emitWasmCall(LWasmCallBase* ins)
+{
+ MWasmCall* mir = ins->mir();
+
+ emitWasmCallBase(ins);
+
+ if (IsFloatingPointType(mir->type()) && mir->callee().which() == wasm::CalleeDesc::Builtin) {
+ if (mir->type() == MIRType::Float32) {
+ masm.reserveStack(sizeof(float));
+ Operand op(esp, 0);
+ masm.fstp32(op);
+ masm.loadFloat32(op, ReturnFloat32Reg);
+ masm.freeStack(sizeof(float));
+ } else {
+ MOZ_ASSERT(mir->type() == MIRType::Double);
+ masm.reserveStack(sizeof(double));
+ Operand op(esp, 0);
+ masm.fstp(op);
+ masm.loadDouble(op, ReturnDoubleReg);
+ masm.freeStack(sizeof(double));
+ }
+ }
+}
+
+void
+CodeGeneratorX86::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCall(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCall(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX86::emitWasmLoad(T* ins)
+{
+ const MWasmLoad* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress(offset))
+ : Operand(ToRegister(ptr), offset);
+
+ if (mir->type() == MIRType::Int64)
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ else
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins)
+{
+ emitWasmLoad(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmLoadI64(LWasmLoadI64* ins)
+{
+ emitWasmLoad(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorX86::emitWasmStore(T* ins)
+{
+ const MWasmStore* mir = ins->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ const LAllocation* ptr = ins->ptr();
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress(offset))
+ : Operand(ToRegister(ptr), offset);
+
+ if (mir->access().type() == Scalar::Int64) {
+ Register64 value = ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
+ masm.wasmStoreI64(mir->access(), value, dstAddr);
+ } else {
+ AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
+ masm.wasmStore(mir->access(), value, dstAddr);
+ }
+}
+
+void
+CodeGeneratorX86::visitWasmStore(LWasmStore* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX86::visitWasmStoreI64(LWasmStoreI64* ins)
+{
+ emitWasmStore(ins);
+}
+
+void
+CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+ AnyRegister out = ToAnyRegister(ins->output());
+
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
+ OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+ if (mir->needsBoundsCheck()) {
+ ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+ addOutOfLineCode(ool, mir);
+
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ool->entry());
+ }
+
+ Operand srcAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress())
+ : Operand(ToRegister(ptr), 0);
+
+ masm.wasmLoad(mir->access(), srcAddr, out);
+
+ if (ool)
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MStoreTypedArrayElementStatic* mir = ins->mir();
+ Scalar::Type accessType = mir->accessType();
+ Register ptr = ToRegister(ins->ptr());
+ const LAllocation* value = ins->value();
+
+ canonicalizeIfDeterministic(accessType, value);
+
+ uint32_t offset = mir->offset();
+ MOZ_ASSERT_IF(mir->needsBoundsCheck(), offset == 0);
+
+ Label rejoin;
+ if (mir->needsBoundsCheck()) {
+ MOZ_ASSERT(offset == 0);
+ masm.cmpPtr(ptr, ImmWord(mir->length()));
+ masm.j(Assembler::AboveOrEqual, &rejoin);
+ }
+
+ Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
+ switch (accessType) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ masm.movbWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.movwWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.movlWithPatch(ToRegister(value), dstAddr);
+ break;
+ case Scalar::Float32:
+ masm.vmovssWithPatch(ToFloatRegister(value), dstAddr);
+ break;
+ case Scalar::Float64:
+ masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr);
+ break;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+
+ if (rejoin.used())
+ masm.bind(&rejoin);
+}
+
+void
+CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->offset() == 0);
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* value = ins->value();
+
+ Scalar::Type accessType = mir->accessType();
+ MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ canonicalizeIfDeterministic(accessType, value);
+
+ Operand dstAddr = ptr->isBogus()
+ ? Operand(PatchedAbsoluteAddress())
+ : Operand(ToRegister(ptr), 0);
+
+ Label rejoin;
+ if (mir->needsBoundsCheck())
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), &rejoin);
+
+ masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
+
+ if (rejoin.used())
+ masm.bind(&rejoin);
+}
+
+// Perform bounds checking on the access if necessary; if it fails,
+// jump to out-of-line code that throws. If the bounds check passes,
+// set up the heap address in addrTemp.
+
+void
+CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg)
+{
+ // Add in the actual heap pointer explicitly, to avoid opening up
+ // the abstraction that is atomicBinopToTypedIntArray at this time.
+ masm.movl(ptrReg, addrTemp);
+ masm.addlWithPatch(Imm32(0), addrTemp);
+ masm.append(wasm::MemoryPatch(masm.size()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register addrTemp = ToRegister(ins->addrTemp());
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ memAddr,
+ oldval,
+ newval,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register addrTemp = ToRegister(ins->addrTemp());
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ memAddr,
+ value,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ Imm32(ToInt32(value)),
+ memAddr,
+ temp,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+ } else {
+ atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
+ ToRegister(value),
+ memAddr,
+ temp,
+ InvalidReg,
+ ToAnyRegister(ins->output()));
+ }
+}
+
+void
+CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(mir->access().offset() == 0);
+ MOZ_ASSERT(!mir->hasUses());
+
+ Scalar::Type accessType = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ asmJSAtomicComputeAddress(addrTemp, ptrReg);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
+ else
+ atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
+}
+
+void
+CodeGeneratorX86::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+ MIRType type = mir->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
+ break;
+ case MIRType::Float32:
+ label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Double:
+ label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ label = masm.vmovdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ case MIRType::Float32x4:
+ label = masm.vmovapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
+ }
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX86::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ MWasmLoadGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ Register64 output = ToOutRegister64(ins);
+
+ CodeOffset labelLow = masm.movlWithPatch(PatchedAbsoluteAddress(), output.low);
+ masm.append(wasm::GlobalAccess(labelLow, mir->globalDataOffset() + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(PatchedAbsoluteAddress(), output.high);
+ masm.append(wasm::GlobalAccess(labelHigh, mir->globalDataOffset() + INT64HIGH_OFFSET));
+}
+
+void
+CodeGeneratorX86::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MIRType type = mir->value()->type();
+ MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+
+ CodeOffset label;
+ switch (type) {
+ case MIRType::Int32:
+ label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Float32:
+ label = masm.vmovssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Double:
+ label = masm.vmovsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ // Aligned access: code is aligned on PageSize + there is padding
+ // before the global data section.
+ case MIRType::Int32x4:
+ case MIRType::Bool32x4:
+ label = masm.vmovdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ case MIRType::Float32x4:
+ label = masm.vmovapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
+ break;
+ default:
+ MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
+ }
+ masm.append(wasm::GlobalAccess(label, mir->globalDataOffset()));
+}
+
+void
+CodeGeneratorX86::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ MWasmStoreGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(ins->value());
+
+ CodeOffset labelLow = masm.movlWithPatch(input.low, PatchedAbsoluteAddress());
+ masm.append(wasm::GlobalAccess(labelLow, mir->globalDataOffset() + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(input.high, PatchedAbsoluteAddress());
+ masm.append(wasm::GlobalAccess(labelHigh, mir->globalDataOffset() + INT64HIGH_OFFSET));
+}
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86>
+{
+ LTruncateDToInt32* ins_;
+
+ public:
+ explicit OutOfLineTruncate(LTruncateDToInt32* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGeneratorX86* codegen) {
+ codegen->visitOutOfLineTruncate(this);
+ }
+ LTruncateDToInt32* ins() const {
+ return ins_;
+ }
+};
+
+class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86>
+{
+ LTruncateFToInt32* ins_;
+
+ public:
+ explicit OutOfLineTruncateFloat32(LTruncateFToInt32* ins)
+ : ins_(ins)
+ { }
+
+ void accept(CodeGeneratorX86* codegen) {
+ codegen->visitOutOfLineTruncateFloat32(this);
+ }
+ LTruncateFToInt32* ins() const {
+ return ins_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+void
+CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncate* ool = new(alloc()) OutOfLineTruncate(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncateFloat32* ool = new(alloc()) OutOfLineTruncateFloat32(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool)
+{
+ LTruncateDToInt32* ins = ool->ins();
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopDouble;
+ // Push double.
+ masm.subl(Imm32(sizeof(double)), esp);
+ masm.storeDouble(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
+
+ // Load double, perform 64-bit truncation.
+ masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop double and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopDouble);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ins->tempFloat());
+
+ // Try to convert doubles representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ masm.zeroDouble(ScratchDoubleReg);
+ masm.vucomisd(ScratchDoubleReg, input);
+ masm.j(Assembler::Parity, &fail);
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantDouble(4294967296.0, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantDouble(-4294967296.0, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addDouble(input, temp);
+ masm.vcvttsd2si(temp, output);
+ masm.vcvtsi2sd(output, ScratchDoubleReg, ScratchDoubleReg);
+
+ masm.vucomisd(ScratchDoubleReg, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ saveVolatile(output);
+
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(input, MoveOp::DOUBLE);
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::ToInt32);
+ else
+ masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+ masm.storeCallInt32Result(output);
+
+ restoreVolatile(output);
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool)
+{
+ LTruncateFToInt32* ins = ool->ins();
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopFloat;
+
+ // Push float32, but subtracts 64 bits so that the value popped by fisttp fits
+ masm.subl(Imm32(sizeof(uint64_t)), esp);
+ masm.storeFloat32(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopFloat);
+
+ // Load float, perform 32-bit truncation.
+ masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop 64bits and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopFloat);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ins->tempFloat());
+
+ // Try to convert float32 representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ masm.zeroFloat32(ScratchFloat32Reg);
+ masm.vucomiss(ScratchFloat32Reg, input);
+ masm.j(Assembler::Parity, &fail);
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantFloat32(4294967296.f, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantFloat32(-4294967296.f, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addFloat32(input, temp);
+ masm.vcvttss2si(temp, output);
+ masm.vcvtsi2ss(output, ScratchFloat32Reg, ScratchFloat32Reg);
+
+ masm.vucomiss(ScratchFloat32Reg, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ saveVolatile(output);
+
+ masm.push(input);
+ masm.setupUnalignedABICall(output);
+ masm.vcvtss2sd(input, input, input);
+ masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
+
+ if (gen->compilingWasm())
+ masm.callWithABI(wasm::SymbolicAddress::ToInt32);
+ else
+ masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
+
+ masm.storeCallInt32Result(output);
+ masm.pop(input);
+
+ restoreVolatile(output);
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void
+CodeGeneratorX86::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.xorl(output, output);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void
+CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // We are free to clobber all registers, since this is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (lir->mir()->isMod())
+ masm.xor64(output, output);
+ else
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::ModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::DivI64);
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // We are free to clobber all registers, since this is a call instruction.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::UModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::UDivI64);
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+}
+
+void
+CodeGeneratorX86::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ Register64 falseExpr = ToRegister64(lir->falseExpr());
+ Register64 out = ToOutRegister64(lir);
+
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+ Label done;
+ masm.branchTest32(Assembler::NonZero, cond, cond, &done);
+ masm.movl(falseExpr.low, out.low);
+ masm.movl(falseExpr.high, out.high);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.Push(input.high);
+ masm.Push(input.low);
+ masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
+ masm.freeStack(sizeof(uint64_t));
+}
+
+void
+CodeGeneratorX86::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ Register64 output = ToOutRegister64(lir);
+
+ masm.reserveStack(sizeof(uint64_t));
+ masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
+ masm.Pop(output.low);
+ masm.Pop(output.high);
+}
+
+void
+CodeGeneratorX86::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ Register64 output = ToOutRegister64(lir);
+ Register input = ToRegister(lir->input());
+
+ if (lir->mir()->isUnsigned()) {
+ if (output.low != input)
+ masm.movl(input, output.low);
+ masm.xorl(output.high, output.high);
+ } else {
+ MOZ_ASSERT(output.low == input);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+ masm.cdq();
+ }
+}
+
+void
+CodeGeneratorX86::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.movl(ToRegister(input.low()), output);
+ else
+ masm.movl(ToRegister(input.high()), output);
+}
+
+void
+CodeGeneratorX86::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void
+CodeGeneratorX86::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void
+CodeGeneratorX86::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ if (input.high == output) {
+ masm.orl(input.low, output);
+ } else if (input.low == output) {
+ masm.orl(input.high, output);
+ } else {
+ masm.movl(input.high, output);
+ masm.orl(input.low, output);
+ }
+
+ masm.cmpl(Imm32(0), output);
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void
+CodeGeneratorX86::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ FloatRegister floatTemp = ToFloatRegister(lir->temp());
+
+ Label fail, convert;
+
+ MOZ_ASSERT (mir->input()->type() == MIRType::Double || mir->input()->type() == MIRType::Float32);
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (mir->input()->type() == MIRType::Float32) {
+ if (mir->isUnsigned())
+ masm.wasmTruncateFloat32ToUInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ else
+ masm.wasmTruncateFloat32ToInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ } else {
+ if (mir->isUnsigned())
+ masm.wasmTruncateDoubleToUInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ else
+ masm.wasmTruncateDoubleToInt64(input, output, ool->entry(), ool->rejoin(), floatTemp);
+ }
+}
+
+void
+CodeGeneratorX86::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToDouble(input, output, temp);
+ else
+ masm.convertInt64ToDouble(input, output);
+ } else {
+ if (lir->mir()->isUnsigned())
+ masm.convertUInt64ToFloat32(input, output, temp);
+ else
+ masm.convertInt64ToFloat32(input, output);
+ }
+}
+
+void
+CodeGeneratorX86::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.testl(input.high, input.high);
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.testl(input.low, input.low);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h
new file mode 100644
index 000000000..1cc8e183a
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_CodeGenerator_x86_h
+#define jit_x86_CodeGenerator_x86_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate;
+class OutOfLineTruncateFloat32;
+
+class CodeGeneratorX86 : public CodeGeneratorX86Shared
+{
+ private:
+ CodeGeneratorX86* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ template <typename T> void emitWasmLoad(T* ins);
+ template <typename T> void emitWasmStore(T* ins);
+
+ public:
+ CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitBox(LBox* box);
+ void visitBoxFloatingPoint(LBoxFloatingPoint* box);
+ void visitUnbox(LUnbox* unbox);
+ void visitValue(LValue* value);
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ void visitTruncateFToInt32(LTruncateFToInt32* ins);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void emitWasmCall(LWasmCallBase* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+
+ void visitOutOfLineTruncate(OutOfLineTruncate* ool);
+ void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
+
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitWasmSelectI64(LWasmSelectI64* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitClzI64(LClzI64* lir);
+ void visitCtzI64(LCtzI64* lir);
+ void visitNotI64(LNotI64* lir);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ private:
+ void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg);
+};
+
+typedef CodeGeneratorX86 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_CodeGenerator_x86_h */
diff --git a/js/src/jit/x86/LIR-x86.h b/js/src/jit/x86/LIR-x86.h
new file mode 100644
index 000000000..f49ec7b87
--- /dev/null
+++ b/js/src/jit/x86/LIR-x86.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LIR_x86_h
+#define jit_x86_LIR_x86_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp, MIRType type)
+ : type_(type)
+ {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* payload() {
+ return getOperand(0);
+ }
+ const LAllocation* type() {
+ return getOperand(1);
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : type_(type)
+ {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32: public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<INT64_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_LIR_x86_h */
diff --git a/js/src/jit/x86/LOpcodes-x86.h b/js/src/jit/x86/LOpcodes-x86.h
new file mode 100644
index 000000000..70c2ff384
--- /dev/null
+++ b/js/src/jit/x86/LOpcodes-x86.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LOpcodes_x86_h
+#define jit_x86_LOpcodes_x86_h
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(BoxFloatingPoint) \
+ _(DivOrModConstantI) \
+ _(SimdValueInt32x4) \
+ _(SimdValueFloat32x4) \
+ _(UDivOrMod) \
+ _(UDivOrModConstant) \
+ _(UDivOrModI64) \
+ _(DivOrModI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint)
+
+#endif /* jit_x86_LOpcodes_x86_h */
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
new file mode 100644
index 000000000..5dbaefe5b
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -0,0 +1,658 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Lowering-x86.h"
+
+#include "jit/MIR.h"
+#include "jit/x86/Assembler-x86.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegister(MDefinition* mir)
+{
+ return useFixed(mir, eax);
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useFixedAtStart(mir, eax);
+}
+
+LAllocation
+LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useFixed(mir, eax);
+}
+
+LDefinition
+LIRGeneratorX86::tempByteOpRegister()
+{
+ return tempFixed(eax);
+}
+
+void
+LIRGeneratorX86::visitBox(MBox* box)
+{
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner), tempCopy(inner, 0),
+ inner->type()), box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new(alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorX86::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* inner = unbox->getOperand(0);
+
+ if (inner->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ // An unbox on x86 reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void
+LIRGeneratorX86::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void
+LIRGeneratorX86::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+
+void
+LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void
+LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void
+LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void
+LIRGeneratorX86::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorX86Shared::visitMulI64.
+ if (constant >= -1 && constant <= 2)
+ needsTemp = false;
+ if (int64_t(1) << shift == constant)
+ needsTemp = false;
+ }
+
+ // MulI64 on x86 needs output to be in edx, eax;
+ ins->setInt64Operand(0, useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp)
+ ins->setTemp(0, temp());
+
+ defineInt64Fixed(ins, mir, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+void
+LIRGeneratorX86::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
+LIRGeneratorX86::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/ true);
+}
+
+void
+LIRGeneratorX86::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
+LIRGeneratorX86::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins)
+{
+ if (ins->type() != MIRType::Int64) {
+ lowerWasmLoad(ins);
+ return;
+ }
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+
+ Scalar::Type accessType = ins->access().type();
+ if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) {
+ // We use cdq to sign-extend the result and cdq demands these registers.
+ defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ defineInt64(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ valueAlloc = useFixed(ins->value(), eax);
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+ case Scalar::Int64: {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+ auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ add(lir, ins);
+ return;
+ }
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ // For simplicity, require a register if we're going to emit a bounds-check
+ // branch, so that we don't have special cases for constants.
+ LAllocation baseAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(base)
+ : useRegisterOrZeroAtStart(base);
+
+ LAsmJSStoreHeap* lir = nullptr;
+ switch (ins->access().type()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("NYI");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ // The code generated for StoreTypedArrayElementStatic is identical to that
+ // for AsmJSStoreHeap, and the same concerns apply.
+ LStoreTypedArrayElementStatic* lir;
+ switch (ins->accessType()) {
+ case Scalar::Int8: case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ lir = new(alloc()) LStoreTypedArrayElementStatic(useRegister(ins->ptr()),
+ useFixed(ins->value(), eax));
+ break;
+ case Scalar::Int16: case Scalar::Uint16:
+ case Scalar::Int32: case Scalar::Uint32:
+ case Scalar::Float32: case Scalar::Float64:
+ lir = new(alloc()) LStoreTypedArrayElementStatic(useRegisterAtStart(ins->ptr()),
+ useRegisterAtStart(ins->value()));
+ break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ add(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Register allocation:
+ //
+ // The output may not be used, but eax will be clobbered regardless
+ // so pin the output to eax.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: this must
+ // be ebx, ecx, or edx (eax is taken).
+ //
+ // Bug #1077036 describes some optimization opportunities.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
+
+ lir->setAddrTemp(temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void
+LIRGeneratorX86::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
+
+ lir->setAddrTemp(temp());
+ if (byteSize(ins->access().type()) == 1)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else
+ define(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. These can all take an immediate.
+
+ if (!ins->hasUses()) {
+ LAllocation value;
+ if (byteArray && !ins->value()->isConstant())
+ value = useFixed(ins->value(), ebx);
+ else
+ value = useRegisterOrConstant(ins->value());
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base), value);
+ lir->setAddrTemp(temp());
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl value, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the
+ // output only, we can still set up with movl; just pin the output
+ // to eax (or ebx / ecx / edx).
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // We want to fix eax as the output. We also need a temp for
+ // the intermediate value.
+ //
+ // For the 8-bit variants the temp must have a byte register.
+ //
+ // There are optimization opportunities:
+ // - better 8-bit register allocation and instruction selection, Bug #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (byteArray) {
+ value = useFixed(ins->value(), ebx);
+ if (bitOp)
+ tempDef = tempFixed(ecx);
+ } else if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ if (bitOp)
+ tempDef = temp();
+ } else {
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base), value, tempDef);
+
+ lir->setAddrTemp(temp());
+ if (byteArray || bitOp)
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ else if (ins->value()->isConstant())
+ define(lir, ins);
+ else
+ defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
+}
+
+void
+LIRGeneratorX86::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorX86::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorX86::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorX86::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorX86::visitSubstr(MSubstr* ins)
+{
+ // Due to lack of registers on x86, we reuse the string register as
+ // temporary. As a result we only need two temporary registers and take a
+ // bugos temporary as fifth argument.
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ LDefinition::BogusTemp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorX86::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
+
+void
+LIRGeneratorX86::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition temp = tempDouble();
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
+}
+
+void
+LIRGeneratorX86::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ LDefinition maybeTemp =
+ (ins->isUnsigned() && AssemblerX86Shared::HasSSE3()) ? temp() : LDefinition::BogusTemp();
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp), ins);
+}
+
+void
+LIRGeneratorX86::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ if (ins->isUnsigned()) {
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+ } else {
+ LExtendInt32ToInt64* lir =
+ new(alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
+ defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ }
+}
diff --git a/js/src/jit/x86/Lowering-x86.h b/js/src/jit/x86/Lowering-x86.h
new file mode 100644
index 000000000..af823bad2
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Lowering_x86_h
+#define jit_x86_Lowering_x86_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86 : public LIRGeneratorX86Shared
+{
+ public:
+ LIRGeneratorX86(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // It's a trap! On x86, the 1-byte store can only use one of
+ // {al,bl,cl,dl,ah,bh,ch,dh}. That means if the register allocator
+ // gives us one of {edi,esi,ebp,esp}, we're out of luck. (The formatter
+ // will assert on us.) Ideally, we'd just ask the register allocator to
+ // give us one of {al,bl,cl,dl}. For now, just useFixed(al).
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitRandom(MRandom* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+ void lowerPhi(MPhi* phi);
+
+ static bool allowTypedElementHoleCheck() {
+ return true;
+ }
+
+ static bool allowStaticTypedArrayAccesses() {
+ return true;
+ }
+};
+
+typedef LIRGeneratorX86 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Lowering_x86_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86-inl.h b/js/src/jit/x86/MacroAssembler-x86-inl.h
new file mode 100644
index 000000000..11520c78f
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -0,0 +1,1075 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_inl_h
+#define jit_x86_MacroAssembler_x86_inl_h
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ movl(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ movl(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ movl(src.low, dest.low);
+ movl(src.high, dest.high);
+}
+
+// ===============================================================
+// Logical functions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ andl(src, dest);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ andl(imm, dest);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != int32_t(0xFFFFFFFF))
+ andl(imm.low(), dest.low);
+ if (imm.hi().value != int32_t(0xFFFFFFFF))
+ andl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != 0)
+ orl(imm.low(), dest.low);
+ if (imm.hi().value != 0)
+ orl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != 0)
+ xorl(imm.low(), dest.low);
+ if (imm.hi().value != 0)
+ xorl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ orl(src, dest);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ orl(imm, dest);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ andl(src.low, dest.low);
+ andl(src.high, dest.high);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ orl(src.low, dest.low);
+ orl(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ xorl(src.low, dest.low);
+ xorl(src.high, dest.high);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ xorl(src, dest);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ xorl(imm, dest);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ addl(src, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ addl(imm, dest);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ addl(Imm32(imm.value), dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest)
+{
+ addl(imm, Operand(dest));
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ addl(Operand(src), dest);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ addl(src.low, dest.low);
+ adcl(src.high, dest.high);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ addl(imm, dest.low);
+ adcl(Imm32(0), dest.high);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value == 0) {
+ addl(imm.hi(), dest.high);
+ return;
+ }
+ addl(imm.low(), dest.low);
+ adcl(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::addConstantDouble(double d, FloatRegister dest)
+{
+ Double* dbl = getDouble(wasm::RawF64(d));
+ if (!dbl)
+ return;
+ masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ subl(src, dest);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ subl(src, Operand(dest));
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ subl(imm, dest);
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ subl(Operand(addr), dest);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ subl(src.low, dest.low);
+ sbbl(src.high, dest.high);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value == 0) {
+ subl(imm.hi(), dest.high);
+ return;
+ }
+ subl(imm.low(), dest.low);
+ sbbl(imm.hi(), dest.high);
+}
+
+// Note: this function clobbers eax and edx.
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest.low != eax && dest.low != edx);
+ MOZ_ASSERT(dest.high != eax && dest.high != edx);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ imull(edx, dest.high);
+
+ // edx:eax = LOW(dest) * LOW(imm);
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ movl(dest.low, eax);
+ mull(edx);
+
+ // HIGH(dest) += edx;
+ addl(edx, dest.high);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5)
+ leal(Operand(dest.low, dest.low, TimesFour), edx);
+ else
+ MOZ_CRASH("Unsupported imm");
+ addl(edx, dest.high);
+
+ // LOW(dest) = eax;
+ movl(eax, dest.low);
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(temp != edx && temp != eax);
+
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(imm.low(), dest.high); // (2)
+ imull(imm.hi(), temp); // (3)
+ addl(dest.high, temp);
+ movl(imm.low(), dest.high);
+ mull(dest.high/*, dest.low*/); // (4) + (1) output in edx:eax (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(src != Register64(edx, eax) && src != Register64(eax, edx));
+
+ // Make sure the rhs.high isn't the dest.high register anymore.
+ // This saves us from doing other register moves.
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(src.low, dest.high); // (2)
+ imull(src.high, temp); // (3)
+ addl(dest.high, temp);
+ movl(src.low, dest.high);
+ mull(dest.high/*, dest.low*/); // (4) + (1) output in edx:eax (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movl(imm, temp);
+ vmulsd(Operand(temp, 0), dest, dest);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ addl(Imm32(1), Operand(dest));
+ Label noOverflow;
+ j(NonZero, &noOverflow);
+ addl(Imm32(1), Operand(dest.offset(4)));
+ bind(&noOverflow);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ negl(reg.low);
+ adcl(Imm32(0), reg.high);
+ negl(reg.high);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shll(imm, dest);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shldl(imm, dest.low, dest.high);
+ shll(imm, dest.low);
+ return;
+ }
+
+ mov(dest.low, dest.high);
+ shll(Imm32(imm.value & 0x1f), dest.high);
+ xorl(dest.low, dest.low);
+}
+
+void
+MacroAssembler::lshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shldl_cl(srcDest.low, srcDest.high);
+ shll_cl(srcDest.low);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.low, srcDest.high);
+ xorl(srcDest.low, srcDest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shrl(imm, dest);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ shrl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ shrl(Imm32(imm.value & 0x1f), dest.low);
+ xorl(dest.high, dest.high);
+}
+
+void
+MacroAssembler::rshift64(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ shrl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ xorl(srcDest.high, srcDest.high);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ sarl(imm, dest);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ sarl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ sarl(Imm32(imm.value & 0x1f), dest.low);
+ sarl(Imm32(0x1f), dest.high);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
+{
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ sarl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ sarl(Imm32(0x1f), srcDest.high);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shldl_cl(dest.low, dest.high);
+ shldl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shrdl_cl(dest.low, dest.high);
+ shrdl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shldl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shldl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20))
+ xchgl(dest.high, dest.low);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shrdl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shrdl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20))
+ xchgl(dest.high, dest.low);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ Label nonzero, zero;
+
+ bsrl(src.high, dest);
+ j(Assembler::Zero, &zero);
+ orl(Imm32(32), dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ bsrl(src.low, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x7F), dest);
+
+ bind(&nonzero);
+ xorl(Imm32(0x3F), dest);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label done, nonzero;
+
+ bsfl(src.low, dest);
+ j(Assembler::NonZero, &done);
+ bsfl(src.high, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(64), dest);
+ jump(&done);
+
+ bind(&nonzero);
+ orl(Imm32(32), dest);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp)
+{
+ // The tmp register is only needed if there is no native POPCNT.
+
+ MOZ_ASSERT(src.low != tmp && src.high != tmp);
+ MOZ_ASSERT(dest.low != tmp && dest.high != tmp);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ addl(dest.high, dest.low);
+ xorl(dest.high, dest.high);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
+{
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, val.low());
+ j(cond3, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, rhs.low);
+ j(cond3, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal)
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ else
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal)
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ else
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttss2si(src, dest);
+
+ // vcvttss2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
+{
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ // TODO: X64 supports supports integers up till 64bits. Here we only support 32bits,
+ // before failing. Implementing this for x86 might give a x86 kraken win.
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ vcvttsd2si(src, dest);
+
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ if (cond == Assembler::Zero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ movl(lhs.low, temp);
+ orl(lhs.high, temp);
+ branchTestPtr(cond, temp, temp, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label)
+{
+ test32(value.payloadReg(), value.payloadReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ branchTestMagic(cond, valaddr, label);
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void
+MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, Address(dest.base, dest.offset + INT64HIGH_OFFSET));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::truncateDoubleToUInt64(Address src, Address dest, Register temp,
+ FloatRegister floatTemp)
+{
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle seperately.
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, Address(dest.base, dest.offset + INT64HIGH_OFFSET));
+
+ bind(&done);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ CodeOffset off = cmp32WithPatch(index, Imm32(0));
+ append(wasm::BoundsCheck(off.offset()));
+
+ j(cond, label);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ reinterpret_cast<uint32_t*>(patchAt)[-1] = limit;
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// Note: this function clobbers the source register.
+void
+MacroAssemblerX86::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+ // src is [0, 2^32-1]
+ subl(Imm32(0x80000000), src);
+
+ // Now src is [-2^31, 2^31-1] - int range, but not the same value.
+ convertInt32ToDouble(src, dest);
+
+ // dest is now a double with the int range.
+ // correct the double value by adding 0x80000000.
+ asMasm().addConstantDouble(2147483648.0, dest);
+}
+
+// Note: this function clobbers the source register.
+void
+MacroAssemblerX86::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+}
+
+void
+MacroAssemblerX86::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ if (src.payloadReg() != dest.gpr())
+ movl(src.payloadReg(), dest.gpr());
+ }
+}
+
+template <typename T>
+void
+MacroAssemblerX86::loadInt32OrDouble(const T& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(ToPayload(src), dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+template <typename T>
+void
+MacroAssemblerX86::loadUnboxedValue(const T& src, MIRType type, AnyRegister dest)
+{
+ if (dest.isFloat())
+ loadInt32OrDouble(src, dest.fpu());
+ else
+ movl(Operand(src), dest.gpr());
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void
+MacroAssemblerX86::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
+{
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_inl_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
new file mode 100644
index 000000000..754b29c2d
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -0,0 +1,1028 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Casting.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+
+#include "jsscriptinlines.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// vpunpckldq requires 16-byte boundary for memory operand.
+// See convertUInt64ToDouble for the details.
+MOZ_ALIGNED_DECL(static const uint64_t, 16) TO_DOUBLE[4] = {
+ 0x4530000043300000LL,
+ 0x0LL,
+ 0x4330000000000000LL,
+ 0x4530000000000000LL
+};
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool
+MacroAssemblerX86::convertUInt64ToDoubleNeedsTemp()
+{
+ return HasSSE3();
+}
+
+void
+MacroAssemblerX86::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ // SUBPD needs SSE2, HADDPD needs SSE3.
+ if (!HasSSE3()) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ asMasm().Push(src.high);
+ asMasm().Push(src.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ asMasm().branch32(Assembler::NotSigned, src.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ store64(Imm64(mozilla::BitwiseCast<uint64_t>(add_constant)), Address(esp, 0));
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), dest);
+ asMasm().freeStack(2*sizeof(intptr_t));
+ return;
+ }
+
+ // Following operation uses entire 128-bit of dest XMM register.
+ // Currently higher 64-bit is free when we have access to lower 64-bit.
+ MOZ_ASSERT(dest.size() == 8);
+ FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Simd128);
+
+ // Assume that src is represented as following:
+ // src = 0x HHHHHHHH LLLLLLLL
+
+ // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
+ // dest = 0x 00000000 00000000 00000000 LLLLLLLL
+ // scratch = 0x 00000000 00000000 00000000 HHHHHHHH
+ vmovd(src.low, dest128);
+ vmovd(src.high, ScratchSimd128Reg);
+
+ // Unpack and interleave dest and scratch to dest:
+ // dest = 0x 00000000 00000000 HHHHHHHH LLLLLLLL
+ vpunpckldq(ScratchSimd128Reg, dest128, dest128);
+
+ // Unpack and interleave dest and a constant C1 to dest:
+ // C1 = 0x 00000000 00000000 45300000 43300000
+ // dest = 0x 45300000 HHHHHHHH 43300000 LLLLLLLL
+ // here, each 64-bit part of dest represents following double:
+ // HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
+ // LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
+ movePtr(ImmWord((uintptr_t)TO_DOUBLE), temp);
+ vpunpckldq(Operand(temp, 0), dest128, dest128);
+
+ // Subtract a constant C2 from dest, for each 64-bit part:
+ // C2 = 0x 45300000 00000000 43300000 00000000
+ // here, each 64-bit part of C2 represents following double:
+ // HI(C2) = 0x 1.0000000000000 * 2**84 == 2**84
+ // LO(C2) = 0x 1.0000000000000 * 2**52 == 2**52
+ // after the operation each 64-bit part of dest represents following:
+ // HI(dest) = double(0x HHHHHHHH 00000000)
+ // LO(dest) = double(0x 00000000 LLLLLLLL)
+ vsubpd(Operand(temp, sizeof(uint64_t) * 2), dest128, dest128);
+
+ // Add HI(dest) and LO(dest) in double and store it into LO(dest),
+ // LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL)
+ // = double(0x HHHHHHHH LLLLLLLL)
+ // = double(src)
+ vhaddpd(dest128, dest128);
+}
+
+void
+MacroAssemblerX86::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ if (maybeInlineDouble(d, dest))
+ return;
+ Double* dbl = getDouble(d);
+ if (!dbl)
+ return;
+ masm.vmovsd_mr(nullptr, dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
+{
+ loadConstantDouble(wasm::RawF64(d), dest);
+}
+
+void
+MacroAssemblerX86::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ if (maybeInlineFloat(f, dest))
+ return;
+ Float* flt = getFloat(f);
+ if (!flt)
+ return;
+ masm.vmovss_mr(nullptr, dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
+{
+ loadConstantFloat32(wasm::RawF32(f), dest);
+}
+
+void
+MacroAssemblerX86::loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Int(v, dest))
+ return;
+ SimdData* i4 = getSimdData(v);
+ if (!i4)
+ return;
+ masm.vmovdqa_mr(nullptr, dest.encoding());
+ propagateOOM(i4->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest)
+{
+ if (maybeInlineSimd128Float(v, dest))
+ return;
+ SimdData* f4 = getSimdData(v);
+ if (!f4)
+ return;
+ masm.vmovaps_mr(nullptr, dest.encoding());
+ propagateOOM(f4->uses.append(CodeOffset(masm.size())));
+}
+
+void
+MacroAssemblerX86::finish()
+{
+ if (!doubles_.empty())
+ masm.haltingAlign(sizeof(double));
+ for (const Double& d : doubles_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : d.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.int64Constant(d.value);
+ if (!enoughMemory_)
+ return;
+ }
+
+ if (!floats_.empty())
+ masm.haltingAlign(sizeof(float));
+ for (const Float& f : floats_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : f.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.int32Constant(f.value);
+ if (!enoughMemory_)
+ return;
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty())
+ masm.haltingAlign(SimdMemoryAlignment);
+ for (const SimdData& v : simds_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : v.uses)
+ addCodeLabel(CodeLabel(use, cst));
+ masm.simd128Constant(v.value.bytes());
+ if (!enoughMemory_)
+ return;
+ }
+}
+
+void
+MacroAssemblerX86::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ subl(Imm32(sizeof(ResumeFromException)), esp);
+ movl(esp, eax);
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(ecx);
+ asMasm().passABIArg(eax);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ loadPtr(Address(esp, offsetof(ResumeFromException, kind)), eax);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ jmp(Operand(eax));
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(ecx, edx);
+ loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jmp(Operand(eax));
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+ bind(&return_);
+ loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp);
+ loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp);
+ loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
+ movl(ebp, esp);
+ pop(ebp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), ecx);
+ movl(Imm32(BAILOUT_RETURN_OK), eax);
+ jmp(Operand(esp, offsetof(ResumeFromException, target)));
+}
+
+void
+MacroAssemblerX86::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerX86::profilerExitFrame()
+{
+ jmp(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler&
+MacroAssemblerX86::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerX86::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where the
+ // Ion frame data for a piece of code is very large. To handle this special case,
+ // for frames over 1k in size we allocate memory on the stack incrementally, touching
+ // it as we go.
+ uint32_t amountLeft = imm32.value;
+ while (amountLeft > 4096) {
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subl(Imm32(amountLeft), StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ movl(esp, scratch);
+ andl(Imm32(~(ABIStackAlignment - 1)), esp);
+ push(scratch);
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ freeStack(stackAdjust);
+ if (result == MoveOp::DOUBLE) {
+ reserveStack(sizeof(double));
+ fstp(Operand(esp, 0));
+ loadDouble(Operand(esp, 0), ReturnDoubleReg);
+ freeStack(sizeof(double));
+ } else if (result == MoveOp::FLOAT32) {
+ reserveStack(sizeof(float));
+ fstp32(Operand(esp, 0));
+ loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
+ freeStack(sizeof(float));
+ }
+ if (dynamicAlignment_)
+ pop(esp);
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ MOZ_ASSERT(ptr != temp);
+ movePtr(ptr, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ loadPtr(address, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ orPtr(Imm32(gc::ChunkMask), ptr);
+ branch32(cond, Address(ptr, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, address, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, address, temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ if (rhs.isMarkable())
+ cmpPtr(lhs.payloadReg(), ImmGCPtr(rhs.toMarkablePointer()));
+ else
+ cmpPtr(lhs.payloadReg(), ImmWord(rhs.toNunboxPayload()));
+
+ if (cond == Equal) {
+ Label done;
+ j(NotEqual, &done);
+ {
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(Equal, label);
+ }
+ bind(&done);
+ } else {
+ j(NotEqual, label);
+
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(NotEqual, label);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType)
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(dest));
+
+ // Store the payload.
+ if (value.constant())
+ storePayload(value.value(), Operand(dest));
+ else
+ storePayload(value.reg().typedReg().gpr(), Operand(dest));
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+// wasm specific methods, used in both the wasm baseline compiler and ion.
+
+void
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ movsblWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ movzblWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Int16:
+ movswlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ movzwlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movlWithPatch(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ vmovssWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Float64:
+ vmovsdWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: vmovssWithPatch(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovsdWithPatch(srcAddr, out.fpu()); break;
+ case 4: vmovupsWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovdWithPatch(srcAddr, out.fpu()); break;
+ // See comment above, which also applies to movq.
+ case 2: vmovqWithPatch(srcAddr, out.fpu()); break;
+ case 4: vmovdquWithPatch(srcAddr, out.fpu()); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+ vmovdquWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+ vmovdquWithPatch(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t loadOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movsblWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint8:
+ movzblWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int16:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movswlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint16:
+ movzwlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int32:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ movlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ cdq();
+ break;
+ case Scalar::Uint32:
+ movlWithPatch(srcAddr, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int64:
+ if (srcAddr.kind() == Operand::MEM_ADDRESS32) {
+ Operand low(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64LOW_OFFSET));
+ Operand high(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64HIGH_OFFSET));
+
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ } else {
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP);
+ Address addr = srcAddr.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+
+ if (addr.base != out.low) {
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ } else {
+ MOZ_ASSERT(addr.base != out.high);
+ movlWithPatch(high, out.high);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+
+ loadOffset = size();
+ movlWithPatch(low, out.low);
+ append(wasm::MemoryPatch(size()));
+ append(access, loadOffset, framePushed());
+ }
+ }
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+}
+
+void
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
+{
+ memoryBarrier(access.barrierBefore());
+
+ size_t storeOffset = size();
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ movbWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ movwWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ movlWithPatch(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ vmovssWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ vmovsdWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movss zeroes out the high lanes.
+ case 1: vmovssWithPatch(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovsdWithPatch(value.fpu(), dstAddr); break;
+ case 4: vmovupsWithPatch(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int32x4:
+ switch (access.numSimdElems()) {
+ // In memory-to-register mode, movd zeroes out the high lanes.
+ case 1: vmovdWithPatch(value.fpu(), dstAddr); break;
+ // See comment above, which also applies to movsd.
+ case 2: vmovqWithPatch(value.fpu(), dstAddr); break;
+ case 4: vmovdquWithPatch(value.fpu(), dstAddr); break;
+ default: MOZ_CRASH("unexpected size for partial load");
+ }
+ break;
+ case Scalar::Int8x16:
+ MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+ vmovdquWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int16x8:
+ MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+ vmovdquWithPatch(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("Should be handled in storeI64.");
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr)
+{
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(!access.isSimd());
+
+ size_t storeOffset = size();
+ if (dstAddr.kind() == Operand::MEM_ADDRESS32) {
+ Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET));
+ Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET));
+
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ } else {
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP);
+ Address addr = dstAddr.toAddress();
+ Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
+ Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
+
+ if (addr.base != value.low) {
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ } else {
+ MOZ_ASSERT(addr.base != value.high);
+
+ movlWithPatch(value.high, high);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+
+ storeOffset = size();
+ movlWithPatch(value.low, low);
+ append(wasm::MemoryPatch(size()));
+ append(access, storeOffset, framePushed());
+ }
+ }
+}
+
+void
+MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ Label done;
+ vcvttsd2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ loadConstantDouble(double(int32_t(0x80000000)), ScratchDoubleReg);
+ addDouble(input, ScratchDoubleReg);
+ vcvttsd2si(ScratchDoubleReg, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry)
+{
+ Label done;
+ vcvttss2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ loadConstantFloat32(float(int32_t(0x80000000)), ScratchFloat32Reg);
+ addFloat32(input, ScratchFloat32Reg);
+ vcvttss2si(ScratchFloat32Reg, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+//}}} check_macroassembler_style
+
+void
+MacroAssemblerX86::convertInt64ToDouble(Register64 input, FloatRegister output)
+{
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ asMasm().Push(input.high);
+ asMasm().Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), output);
+ asMasm().freeStack(2*sizeof(intptr_t));
+}
+
+void
+MacroAssemblerX86::convertInt64ToFloat32(Register64 input, FloatRegister output)
+{
+ convertInt64ToDouble(input, output);
+ convertDoubleToFloat32(output, output);
+}
+
+void
+MacroAssemblerX86::convertUInt64ToFloat32(Register64 input, FloatRegister output, Register temp)
+{
+ convertUInt64ToDouble(input, output.asDouble(), temp);
+ convertDoubleToFloat32(output, output);
+}
+
+void
+MacroAssemblerX86::wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+ asMasm().branchDoubleNotInInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), temp);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+ asMasm().branchFloat32NotInInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), temp);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+ asMasm().branchDoubleNotInUInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeDouble(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateDoubleToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
+void
+MacroAssemblerX86::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg)
+{
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in (u)int64.
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+ asMasm().branchFloat32NotInUInt64Range(Address(esp, 0), temp, &fail);
+ jump(&convert);
+
+ // Handle failure in ool.
+ bind(&fail);
+ asMasm().freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ bind(oolRejoin);
+ asMasm().reserveStack(2 * sizeof(int32_t));
+ asMasm().storeFloat32(input, Operand(esp, 0));
+
+ // Convert the double/float to int64.
+ bind(&convert);
+ asMasm().truncateFloat32ToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ asMasm().freeStack(2 * sizeof(int32_t));
+}
+
diff --git a/js/src/jit/x86/MacroAssembler-x86.h b/js/src/jit/x86/MacroAssembler-x86.h
new file mode 100644
index 000000000..21cd63a0c
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -0,0 +1,870 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_h
+#define jit_x86_MacroAssembler_x86_h
+
+#include "jscompartment.h"
+
+#include "jit/JitFrames.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Shared
+{
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ MoveResolver moveResolver_;
+
+ private:
+ Operand payloadOfAfterStackPush(const Address& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer)
+ return Operand(address.base, address.offset + 4);
+ return payloadOf(address);
+ }
+ Operand payloadOf(const Address& address) {
+ return Operand(address.base, address.offset);
+ }
+ Operand payloadOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset);
+ }
+ Operand tagOf(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ Operand tagOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset + 4);
+ }
+
+ void setupABICall(uint32_t args);
+
+ public:
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store32;
+ using MacroAssemblerX86Shared::store16;
+ using MacroAssemblerX86Shared::call;
+
+ MacroAssemblerX86()
+ {
+ }
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X86-specific interface.
+ /////////////////////////////////////////////////////////////////
+
+ Operand ToPayload(Operand base) {
+ return base;
+ }
+ Address ToPayload(Address base) {
+ return base;
+ }
+ BaseIndex ToPayload(BaseIndex base) {
+ return base;
+ }
+ Operand ToType(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()), Register::FromCode(base.index()),
+ base.scale(), base.disp() + sizeof(void*));
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ Address ToType(Address base) {
+ return ToType(Operand(base)).toAddress();
+ }
+ void moveValue(const Value& val, Register type, Register data) {
+ movl(Imm32(val.toNunboxTag()), type);
+ if (val.isMarkable())
+ movl(ImmGCPtr(val.toMarkablePointer()), data);
+ else
+ movl(Imm32(val.toNunboxPayload()), data);
+ }
+ void moveValue(const Value& val, const ValueOperand& dest) {
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+ }
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(),
+ s1 = src.payloadReg(), d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ xchgl(d0, d1);
+ return;
+ }
+ // If only one is, copy that source first.
+ mozilla::Swap(s0, s1);
+ mozilla::Swap(d0, d1);
+ }
+
+ if (s0 != d0)
+ movl(s0, d0);
+ if (s1 != d1)
+ movl(s1, d1);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ void storeValue(ValueOperand val, Operand dest) {
+ movl(val.payloadReg(), ToPayload(dest));
+ movl(val.typeReg(), ToType(dest));
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ storeTypeTag(ImmTag(JSVAL_TYPE_TO_TAG(type)), Operand(dest));
+ storePayload(reg, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ storeTypeTag(ImmTag(val.toNunboxTag()), Operand(dest));
+ storePayload(val, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ MOZ_ASSERT(src.base != temp);
+ MOZ_ASSERT(dest.base != temp);
+
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ Operand payload = ToPayload(src);
+ Operand type = ToType(src);
+
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory or the index.
+ Register baseReg = Register::FromCode(src.base());
+ Register indexReg = (src.kind() == Operand::MEM_SCALE) ? Register::FromCode(src.index()) : InvalidReg;
+
+ // If we have a BaseIndex that uses both result registers, first compute
+ // the address and then load the Value from there.
+ if ((baseReg == val.payloadReg() && indexReg == val.typeReg()) ||
+ (baseReg == val.typeReg() && indexReg == val.payloadReg()))
+ {
+ computeEffectiveAddress(src, val.scratchReg());
+ loadValue(Address(val.scratchReg(), 0), val);
+ return;
+ }
+
+ if (baseReg == val.payloadReg() || indexReg == val.payloadReg()) {
+ MOZ_ASSERT(baseReg != val.typeReg());
+ MOZ_ASSERT(indexReg != val.typeReg());
+
+ movl(type, val.typeReg());
+ movl(payload, val.payloadReg());
+ } else {
+ MOZ_ASSERT(baseReg != val.payloadReg());
+ MOZ_ASSERT(indexReg != val.payloadReg());
+
+ movl(payload, val.payloadReg());
+ movl(type, val.typeReg());
+ }
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg())
+ movl(payload, dest.payloadReg());
+ movl(ImmType(type), dest.typeReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.typeReg());
+ push(val.payloadReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.payloadReg());
+ pop(val.typeReg());
+ }
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isMarkable())
+ push(ImmGCPtr(val.toMarkablePointer()));
+ else
+ push(Imm32(val.toNunboxPayload()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ push(reg);
+ }
+ void pushValue(const Address& addr) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void push64(Register64 src) {
+ push(src.high);
+ push(src.low);
+ }
+ void pop64(Register64 dest) {
+ pop(dest.low);
+ pop(dest.high);
+ }
+ void storePayload(const Value& val, Operand dest) {
+ if (val.isMarkable())
+ movl(ImmGCPtr(val.toMarkablePointer()), ToPayload(dest));
+ else
+ movl(Imm32(val.toNunboxPayload()), ToPayload(dest));
+ }
+ void storePayload(Register src, Operand dest) {
+ movl(src, ToPayload(dest));
+ }
+ void storeTypeTag(ImmTag tag, Operand dest) {
+ movl(tag, ToType(dest));
+ }
+
+ void movePtr(Register src, Register dest) {
+ movl(src, dest);
+ }
+ void movePtr(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ return value.typeReg();
+ }
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testGCThing(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(Operand(ToType(address)), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testInt32(cond, Operand(address));
+ }
+ Condition testObject(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testObject(cond, Operand(address));
+ }
+ Condition testDouble(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testDouble(cond, Operand(address));
+ }
+
+
+ Condition testUndefined(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testUndefined(Condition cond, const Address& addr) {
+ return testUndefined(cond, Operand(addr));
+ }
+ Condition testNull(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testNull(Condition cond, const Address& addr) {
+ return testNull(cond, Operand(addr));
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ return testUndefined(cond, value.typeReg());
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ return testBoolean(cond, value.typeReg());
+ }
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ return testInt32(cond, value.typeReg());
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ return testDouble(cond, value.typeReg());
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ return testNull(cond, value.typeReg());
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ return testString(cond, value.typeReg());
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ return testSymbol(cond, value.typeReg());
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ return testObject(cond, value.typeReg());
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value.typeReg());
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ return testNumber(cond, value.typeReg());
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& value) {
+ return testGCThing(cond, value.typeReg());
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ return testPrimitive(cond, value.typeReg());
+ }
+
+
+ Condition testUndefined(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testNull(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testString(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testObject(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testMagic(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) {
+ cmpl(Imm32(rhs.value), lhs);
+ }
+ void cmpPtr(Register lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmpPtr(const Operand& lhs, Imm32 rhs) {
+ cmp32(lhs, rhs);
+ }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ cmp32(lhs, Imm32(rhs.value));
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
+ cmpl(rhs, lhs);
+ }
+ void cmpPtr(const Address& lhs, Register rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Operand& lhs, Register rhs) {
+ cmp32(lhs, rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) {
+ cmp32(lhs, rhs);
+ }
+ void testPtr(Register lhs, Register rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(Register lhs, Imm32 rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(Register lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+ void testPtr(const Operand& lhs, Imm32 rhs) {
+ test32(lhs, rhs);
+ }
+ void testPtr(const Operand& lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ template <typename T, typename S>
+ void branchPtr(Condition cond, T lhs, S ptr, RepatchLabel* label) {
+ cmpPtr(Operand(lhs), ptr);
+ j(cond, label);
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+ jump(label);
+ return CodeOffsetJump(size());
+ }
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Assembler::Condition cond,
+ Label* documentation = nullptr)
+ {
+ j(cond, label);
+ return CodeOffsetJump(size());
+ }
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
+ return jumpWithPatch(label);
+ }
+
+ void branchPtr(Condition cond, Register lhs, Register rhs, RepatchLabel* label) {
+ cmpPtr(lhs, rhs);
+ j(cond, label);
+ }
+
+ void movePtr(ImmWord imm, Register dest) {
+ movl(Imm32(imm.value), dest);
+ }
+ void movePtr(ImmPtr imm, Register dest) {
+ movl(imm, dest);
+ }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) {
+ mov(imm, dest);
+ }
+ void movePtr(ImmGCPtr imm, Register dest) {
+ movl(imm, dest);
+ }
+ void loadPtr(const Address& address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPtr(const Operand& src, Register dest) {
+ movl(src, dest);
+ }
+ void loadPtr(const BaseIndex& src, Register dest) {
+ movl(Operand(src), dest);
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void load64(const Address& address, Register64 dest) {
+ movl(Operand(Address(address.base, address.offset + INT64LOW_OFFSET)), dest.low);
+ int32_t highOffset = (address.offset < 0) ? -int32_t(INT64HIGH_OFFSET) : INT64HIGH_OFFSET;
+ movl(Operand(Address(address.base, address.offset + highOffset)), dest.high);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ movl(Imm32(imm.value), Operand(address));
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ movl(imm, Operand(address));
+ }
+ void storePtr(Register src, const Address& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const BaseIndex& address) {
+ movl(src, Operand(address));
+ }
+ void storePtr(Register src, const Operand& dest) {
+ movl(src, dest);
+ }
+ void storePtr(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ movw(src, Operand(address));
+ }
+ void store64(Register64 src, Address address) {
+ movl(src.low, Operand(Address(address.base, address.offset + INT64LOW_OFFSET)));
+ movl(src.high, Operand(Address(address.base, address.offset + INT64HIGH_OFFSET)));
+ }
+ void store64(Imm64 imm, Address address) {
+ movl(imm.low(), Operand(Address(address.base, address.offset + INT64LOW_OFFSET)));
+ movl(imm.hi(), Operand(Address(address.base, address.offset + INT64HIGH_OFFSET)));
+ }
+
+ void setStackArg(Register reg, uint32_t arg) {
+ movl(reg, Operand(esp, arg * sizeof(intptr_t)));
+ }
+
+ // Note: this function clobbers the source register.
+ void boxDouble(FloatRegister src, const ValueOperand& dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.payloadReg());
+ vpextrd(1, src, dest.typeReg());
+ } else {
+ vmovd(src, dest.payloadReg());
+ vpsrldq(Imm32(4), src, src);
+ vmovd(src, dest.typeReg());
+ }
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ if (src != dest.payloadReg())
+ movl(src, dest.payloadReg());
+ movl(ImmType(type), dest.typeReg());
+ }
+
+ void unboxNonDouble(const ValueOperand& src, Register dest) {
+ if (src.payloadReg() != dest)
+ movl(src.payloadReg(), dest);
+ }
+ void unboxNonDouble(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void unboxNonDouble(const BaseIndex& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void unboxInt32(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxInt32(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxBoolean(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxString(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxSymbol(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const Address& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxDouble(const Address& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ MOZ_ASSERT(dest != ScratchDoubleReg);
+ if (Assembler::HasSSE41()) {
+ vmovd(src.payloadReg(), dest);
+ vpinsrd(1, src.typeReg(), dest, dest);
+ } else {
+ vmovd(src.payloadReg(), dest);
+ vmovd(src.typeReg(), ScratchDoubleReg);
+ vunpcklps(ScratchDoubleReg, dest, dest);
+ }
+ }
+ void unboxDouble(const Operand& payload, const Operand& type,
+ Register scratch, FloatRegister dest) {
+ MOZ_ASSERT(dest != ScratchDoubleReg);
+ if (Assembler::HasSSE41()) {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vpinsrd(1, scratch, dest, dest);
+ } else {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vmovd(scratch, ScratchDoubleReg);
+ vunpcklps(ScratchDoubleReg, dest, dest);
+ }
+ }
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest) {
+ if (src.payloadReg() != dest)
+ movl(src.payloadReg(), dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ xorl(Imm32(1), val.payloadReg());
+ }
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch) {
+ movl(payloadOf(address), scratch);
+ return scratch;
+ }
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address& address, Register scratch) {
+ movl(tagOf(address), scratch);
+ return scratch;
+ }
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value) {
+ Register string = value.payloadReg();
+ cmp32(Operand(string, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+ }
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ inline void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void loadInstructionPointerAfterCall(Register dest) {
+ movl(Operand(StackPointer, 0x0), dest);
+ }
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToDouble(Register src, FloatRegister dest);
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToFloat32(Register src, FloatRegister dest);
+
+ void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp);
+ void convertInt64ToFloat32(Register64 src, FloatRegister dest);
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+ void convertInt64ToDouble(Register64 src, FloatRegister dest);
+
+ void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+ void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble);
+
+ void incrementInt32Value(const Address& addr) {
+ addl(Imm32(1), payloadOf(addr));
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ CodeOffset label = movlWithPatch(PatchedAbsoluteAddress(), dest);
+ append(wasm::GlobalAccess(label, globalDataOffset));
+ }
+ void loadWasmPinnedRegsFromTls() {
+ // x86 doesn't have any pinned registers.
+ }
+
+ public:
+ // Used from within an Exit frame to handle a pending exception.
+ void handleFailureWithHandlerTail(void* handler);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX86 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_h */
diff --git a/js/src/jit/x86/SharedIC-x86.cpp b/js/src/jit/x86/SharedIC-x86.cpp
new file mode 100644
index 000000000..355b73096
--- /dev/null
+++ b/js/src/jit/x86/SharedIC-x86.cpp
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use the TailCallReg which
+ // should be available.
+ Register scratchReg = ICTailCallReg;
+
+ Label revertRegister, maybeNegZero;
+ switch(op_) {
+ case JSOP_ADD:
+ // Add R0 and R1. Don't need to explicitly unbox.
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.addl(R1.payloadReg(), scratchReg);
+
+ // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
+ // the next stub.
+ masm.j(Assembler::Overflow, &failure);
+
+ // Just overwrite the payload, the tag is still fine.
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_SUB:
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.subl(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Overflow, &failure);
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_MUL:
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.imull(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Overflow, &failure);
+
+ masm.test32(scratchReg, scratchReg);
+ masm.j(Assembler::Zero, &maybeNegZero);
+
+ masm.movl(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_DIV:
+ {
+ // Prevent division by 0.
+ masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);
+
+ // Prevent negative 0 and -2147483648 / -1.
+ masm.branch32(Assembler::Equal, R0.payloadReg(), Imm32(INT32_MIN), &failure);
+
+ Label notZero;
+ masm.branch32(Assembler::NotEqual, R0.payloadReg(), Imm32(0), &notZero);
+ masm.branchTest32(Assembler::Signed, R1.payloadReg(), R1.payloadReg(), &failure);
+ masm.bind(&notZero);
+
+ // For idiv we need eax.
+ MOZ_ASSERT(R1.typeReg() == eax);
+ masm.movl(R0.payloadReg(), eax);
+ // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
+ masm.movl(R0.payloadReg(), scratchReg);
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(R1.payloadReg());
+
+ // A remainder implies a double result.
+ masm.branchTest32(Assembler::NonZero, edx, edx, &revertRegister);
+
+ masm.movl(eax, R0.payloadReg());
+ break;
+ }
+ case JSOP_MOD:
+ {
+ // x % 0 always results in NaN.
+ masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);
+
+ // Prevent negative 0 and -2147483648 % -1.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
+
+ // For idiv we need eax.
+ MOZ_ASSERT(R1.typeReg() == eax);
+ masm.movl(R0.payloadReg(), eax);
+ // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
+ masm.movl(R0.payloadReg(), scratchReg);
+ // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
+ masm.cdq();
+ masm.idiv(R1.payloadReg());
+
+ // Fail when we would need a negative remainder.
+ Label done;
+ masm.branchTest32(Assembler::NonZero, edx, edx, &done);
+ masm.branchTest32(Assembler::Signed, scratchReg, scratchReg, &revertRegister);
+ masm.branchTest32(Assembler::Signed, R1.payloadReg(), R1.payloadReg(), &revertRegister);
+
+ masm.bind(&done);
+ // Result is in edx, tag in ecx remains untouched.
+ MOZ_ASSERT(R0.payloadReg() == edx);
+ MOZ_ASSERT(R0.typeReg() == ecx);
+ break;
+ }
+ case JSOP_BITOR:
+ // We can overide R0, because the instruction is unfailable.
+ // The R0.typeReg() is also still intact.
+ masm.orl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.xorl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.andl(R1.payloadReg(), R0.payloadReg());
+ break;
+ case JSOP_LSH:
+ // RHS needs to be in ecx for shift operations.
+ MOZ_ASSERT(R0.typeReg() == ecx);
+ masm.movl(R1.payloadReg(), ecx);
+ masm.shll_cl(R0.payloadReg());
+ // We need to tag again, because we overwrote it.
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ break;
+ case JSOP_RSH:
+ masm.movl(R1.payloadReg(), ecx);
+ masm.sarl_cl(R0.payloadReg());
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ break;
+ case JSOP_URSH:
+ if (!allowDouble_)
+ masm.movl(R0.payloadReg(), scratchReg);
+
+ masm.movl(R1.payloadReg(), ecx);
+ masm.shrl_cl(R0.payloadReg());
+ masm.test32(R0.payloadReg(), R0.payloadReg());
+ if (allowDouble_) {
+ Label toUint;
+ masm.j(Assembler::Signed, &toUint);
+
+ // Box and return.
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(R0.payloadReg(), ScratchDoubleReg);
+ masm.boxDouble(ScratchDoubleReg, R0);
+ } else {
+ masm.j(Assembler::Signed, &revertRegister);
+ masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ // Return.
+ EmitReturnFromIC(masm);
+
+ switch(op_) {
+ case JSOP_MUL:
+ masm.bind(&maybeNegZero);
+
+ // Result is -0 if exactly one of lhs or rhs is negative.
+ masm.movl(R0.payloadReg(), scratchReg);
+ masm.orl(R1.payloadReg(), scratchReg);
+ masm.j(Assembler::Signed, &failure);
+
+ // Result is +0.
+ masm.mov(ImmWord(0), R0.payloadReg());
+ EmitReturnFromIC(masm);
+ break;
+ case JSOP_DIV:
+ case JSOP_MOD:
+ masm.bind(&revertRegister);
+ masm.movl(scratchReg, R0.payloadReg());
+ masm.movl(ImmType(JSVAL_TYPE_INT32), R1.typeReg());
+ break;
+ case JSOP_URSH:
+ // Revert the content of R0 in the fallible >>> case.
+ if (!allowDouble_) {
+ masm.bind(&revertRegister);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ default:
+ // No special failure handling required.
+ // Fall through to failure.
+ break;
+ }
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.notl(R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
+ masm.negl(R0.payloadReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/x86/SharedICHelpers-x86.h b/js/src/jit/x86/SharedICHelpers-x86.h
new file mode 100644
index 000000000..e7f75cc95
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -0,0 +1,353 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_h
+#define jit_x86_SharedICHelpers_x86_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from stack top to the top Value inside an IC stub (this is the return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ masm.Pop(ICTailCallReg);
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ masm.Push(ICTailCallReg);
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
+ ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into ICTailCallReg
+ // ICTailCallReg will always be unused in the contexts where ICs are called.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) monitorStubOffset), ICStubReg);
+
+ // Jump to the stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.ret();
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.storePtr(reg, Address(StackPointer, 0));
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ // We assume during this that R0 and R1 have been pushed.
+
+ // Compute frame size.
+ masm.movl(BaselineFrameReg, eax);
+ masm.addl(Imm32(BaselineFrame::FramePointerOffset), eax);
+ masm.subl(BaselineStackReg, eax);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.movl(eax, ebx);
+ masm.subl(Imm32(argSize), ebx);
+ masm.store32(ebx, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(eax, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ // For tail calls, find the already pushed JitFrame_IonJS signifying the
+ // end of the Ion frame. Retrieve the length of the frame and repush
+ // JitFrame_IonJS with the extra stacksize, rendering the original
+ // JitFrame_IonJS obsolete.
+
+ masm.loadPtr(Address(esp, stackSize), eax);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), eax);
+ masm.addl(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), eax);
+
+ // Push frame descriptor and perform the tail call.
+ masm.makeFrameDescriptor(eax, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.push(ICTailCallReg);
+ masm.jmp(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and previous
+ // frame pointer pushed by EmitEnterStubFrame.
+ masm.movl(BaselineFrameReg, reg);
+ masm.addl(Imm32(sizeof(void*) * 2), reg);
+ masm.subl(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ EmitBaselineCreateStubFrameDescriptor(masm, eax, ExitFrameLayout::Size());
+ masm.push(eax);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ // Stubs often use the return address. Which is actually accounted by the
+ // caller of the stub. Though in the stubcode we fake that is part of the
+ // stub. In order to make it possible to pop it. As a result we have to
+ // fix it here, by subtracting it. Else it would be counted twice.
+ uint32_t framePushed = masm.framePushed() - sizeof(void*);
+
+ uint32_t descriptor = MakeFrameDescriptor(framePushed, JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.call(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+// Size of vales pushed by EmitEnterStubFrame.
+static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ EmitRestoreTailCallReg(masm);
+
+ // Compute frame size.
+ masm.movl(BaselineFrameReg, scratch);
+ masm.addl(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subl(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update STUB_FRAME_SIZE
+ // if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.Push(scratch);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(ICStubReg);
+ masm.Push(BaselineFrameReg);
+ masm.mov(BaselineStackReg, BaselineFrameReg);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ masm.loadPtr(Address(masm.getStackPointer(), 0), ICTailCallReg);
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ Register scratch = ICTailCallReg;
+ masm.Pop(scratch);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addl(scratch, BaselineStackReg);
+ } else {
+ masm.mov(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.Pop(BaselineFrameReg);
+ masm.Pop(ICStubReg);
+
+ // Pop return address.
+ masm.Pop(ICTailCallReg);
+
+ // Overwrite frame descriptor with return address, so that the stack matches
+ // the state before entering the stub frame.
+ masm.storePtr(ICTailCallReg, Address(BaselineStackReg, 0));
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.pop(ICTailCallReg);
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.push(ICTailCallReg);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0
+ masm.pop(ICTailCallReg);
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ masm.push(ICTailCallReg);
+ break;
+ case 2:
+ // Unstow R0 and R1
+ masm.pop(ICTailCallReg);
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ masm.push(ICTailCallReg);
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from stack top, excluding the return address.
+
+ // Save the current ICStubReg to stack
+ masm.push(ICStubReg);
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+
+ // Restore the old stub reg.
+ masm.pop(ICStubReg);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.cmp32(R1.scratchReg(), Imm32(1));
+ masm.j(Assembler::Equal, &success);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ masm.patchableCallPreBarrier(addr, type);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in the
+ // same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, (int32_t) ICStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, (int32_t) ICStub::offsetOfStubCode()));
+}
+
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_h */
diff --git a/js/src/jit/x86/SharedICRegisters-x86.h b/js/src/jit/x86/SharedICRegisters-x86.h
new file mode 100644
index 000000000..d34999b74
--- /dev/null
+++ b/js/src/jit/x86/SharedICRegisters-x86.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICRegisters_x86_h
+#define jit_x86_SharedICRegisters_x86_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = ebp;
+static constexpr Register BaselineStackReg = esp;
+
+// ValueOperands R0, R1, and R2
+static constexpr ValueOperand R0(ecx, edx);
+static constexpr ValueOperand R1(eax, ebx);
+static constexpr ValueOperand R2(esi, edi);
+
+// ICTailCallReg and ICStubReg reuse
+// registers from R2.
+static constexpr Register ICTailCallReg = esi;
+static constexpr Register ICStubReg = edi;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICRegisters_x86_h */
diff --git a/js/src/jit/x86/Trampoline-x86.cpp b/js/src/jit/x86/Trampoline-x86.cpp
new file mode 100644
index 000000000..d91379cd3
--- /dev/null
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -0,0 +1,1336 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "jit/x86/SharedICHelpers-x86.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::IsPowerOfTwo;
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+enum EnterJitEbpArgumentOffset {
+ ARG_JITCODE = 2 * sizeof(void*),
+ ARG_ARGC = 3 * sizeof(void*),
+ ARG_ARGV = 4 * sizeof(void*),
+ ARG_STACKFRAME = 5 * sizeof(void*),
+ ARG_CALLEETOKEN = 6 * sizeof(void*),
+ ARG_SCOPECHAIN = 7 * sizeof(void*),
+ ARG_STACKVALUES = 8 * sizeof(void*),
+ ARG_RESULT = 9 * sizeof(void*)
+};
+
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard cdecl
+// calling convention.
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ MacroAssembler masm(cx);
+ masm.assertStackAlignment(ABIStackAlignment, -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(ebp);
+ masm.movl(esp, ebp);
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.push(ebx);
+ masm.push(esi);
+ masm.push(edi);
+
+ // Keep track of the stack which has to be unwound after returning from the
+ // compiled function.
+ masm.movl(esp, esi);
+
+ // Load the number of values to be copied (argc) into eax
+ masm.loadPtr(Address(ebp, ARG_ARGC), eax);
+
+ // If we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.loadPtr(Address(ebp, ARG_CALLEETOKEN), edx);
+ masm.branchTest32(Assembler::Zero, edx, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.addl(Imm32(1), eax);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // eax <- 8*numValues, eax is now the offset betwen argv and the last value.
+ masm.shll(Imm32(3), eax);
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.movl(esp, ecx);
+ masm.subl(eax, ecx);
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // ecx = ecx & 15, holds alignment.
+ masm.andl(Imm32(JitStackAlignment - 1), ecx);
+ masm.subl(ecx, esp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // ebx = argv --argv pointer is in ebp + 16
+ masm.loadPtr(Address(ebp, ARG_ARGV), ebx);
+
+ // eax = argv[8(argc)] --eax now points one value past the last argument
+ masm.addl(ebx, eax);
+
+ // while (eax > ebx) --while still looping through arguments
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmp32(eax, ebx);
+ masm.j(Assembler::BelowOrEqual, &footer);
+
+ // eax -= 8 --move to previous argument
+ masm.subl(Imm32(8), eax);
+
+ // Push what eax points to on stack, a Value is 2 words
+ masm.push(Operand(eax, 4));
+ masm.push(Operand(eax, 0));
+
+ masm.jmp(&header);
+ masm.bind(&footer);
+ }
+
+
+ // Push the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.mov(Operand(ebp, ARG_RESULT), eax);
+ masm.unboxInt32(Address(eax, 0x0), eax);
+ masm.push(eax);
+
+ // Push the callee token.
+ masm.push(Operand(ebp, ARG_CALLEETOKEN));
+
+ // Load the InterpreterFrame address into the OsrFrameReg.
+ // This address is also used for setting the constructing bit on all paths.
+ masm.loadPtr(Address(ebp, ARG_STACKFRAME), OsrFrameReg);
+
+ /*****************************************************************
+ Push the number of bytes we've pushed so far on the stack and call
+ *****************************************************************/
+ // Create a frame descriptor.
+ masm.subl(esp, esi);
+ masm.makeFrameDescriptor(esi, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(esi);
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(JSReturnOperand);
+ regs.takeUnchecked(OsrFrameReg);
+ regs.take(ebp);
+ regs.take(ReturnReg);
+
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_STACKVALUES), numStackValues);
+
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
+
+ // Push return address.
+ masm.mov(returnLabel.patchAt(), scratch);
+ masm.push(scratch);
+
+ // Push previous frame pointer.
+ masm.push(ebp);
+
+ // Reserve frame.
+ Register framePtr = ebp;
+ masm.subPtr(Imm32(BaselineFrame::Size()), esp);
+ masm.mov(esp, framePtr);
+
+#ifdef XP_WIN
+ // Can't push large frames blindly on windows. Touch frame memory incrementally.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, framePtr);
+ {
+ masm.movePtr(esp, scratch);
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+
+ Label touchFrameLoop;
+ Label touchFrameLoopEnd;
+ masm.bind(&touchFrameLoop);
+ masm.branchPtr(Assembler::Below, scratch, framePtr, &touchFrameLoopEnd);
+ masm.store32(Imm32(0), Address(scratch, 0));
+ masm.subPtr(Imm32(WINDOWS_BIG_FRAME_TOUCH_INCREMENT), scratch);
+ masm.jump(&touchFrameLoop);
+ masm.bind(&touchFrameLoopEnd);
+ }
+ masm.mov(esp, framePtr);
+#endif
+
+ // Reserve space for locals and stack values.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, esp);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(scratch); // Fake return address.
+ masm.push(Imm32(0));
+ // No GC things to mark on the stack, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.push(framePtr);
+ masm.push(jitcode);
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtr); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ masm.pop(jitcode);
+ masm.pop(framePtr);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), esp);
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.lea(Operand(framePtr, sizeof(void*)), realFramePtr);
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.mov(framePtr, esp);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), esp);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.mov(oomReturnLabel.patchAt(), scratch);
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ masm.loadPtr(Address(ebp, ARG_SCOPECHAIN), R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ /***************************************************************
+ Call passed-in code, get return value and fill in the
+ passed in return value pointer
+ ***************************************************************/
+ masm.call(Address(ebp, ARG_JITCODE));
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.use(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.use(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // eax <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(eax);
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), eax); // Unmark EntryFrame.
+ masm.addl(eax, esp);
+
+ // |ebp| could have been clobbered by the inner function.
+ // Grab the address for the Value result from the argument stack.
+ // +20 ... arguments ...
+ // +16 <return>
+ // +12 ebp <- original %ebp pointing here.
+ // +8 ebx
+ // +4 esi
+ // +0 edi
+ masm.loadPtr(Address(esp, ARG_RESULT + 3 * sizeof(void*)), eax);
+ masm.storeValue(JSReturnOperand, Operand(eax, 0));
+
+ /**************************************************************
+ Return stack and registers to correct state
+ **************************************************************/
+
+ // Restore non-volatile registers
+ masm.pop(edi);
+ masm.pop(esi);
+ masm.pop(ebx);
+
+ // Restore old stack frame pointer
+ masm.pop(ebp);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ AutoJitContextAlloc ajca(cx);
+ MacroAssembler masm(cx);
+
+ // We do the minimum amount of work in assembly and shunt the rest
+ // off to InvalidationBailout. Assembly does:
+ //
+ // - Pop the return address from the invalidation epilogue call.
+ // - Push the machine state onto the stack.
+ // - Call the InvalidationBailout routine with the stack pointer.
+ // - Now that the frame has been bailed out, convert the invalidated
+ // frame into an exit frame.
+ // - Do the normal check-return-code-and-thunk-to-the-interpreter dance.
+
+ masm.addl(Imm32(sizeof(uintptr_t)), esp);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ masm.movl(esp, eax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's frameSize outparam.
+ masm.reserveStack(sizeof(size_t));
+ masm.movl(esp, ebx);
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ecx);
+
+ masm.setupUnalignedABICall(edx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.passABIArg(ecx);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+ masm.pop(ebx); // Get the frameSize outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.lea(Operand(esp, ebx, TimesOne, sizeof(InvalidationBailoutStack)), esp);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm(cx);
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- esp
+ // '-- #esi ---'
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
+ // Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == esi);
+
+ // Load the number of |undefined|s to push into %ecx.
+ masm.loadPtr(Address(esp, RectifierFrameLayout::offsetOfCalleeToken()), eax);
+ masm.mov(eax, ecx);
+ masm.andl(Imm32(CalleeTokenMask), ecx);
+ masm.movzwl(Operand(ecx, JSFunction::offsetOfNargs()), ecx);
+
+ // The frame pointer and its padding are pushed on the stack.
+ // Including |this|, there are (|nformals| + 1) arguments to push to the
+ // stack. Then we push a JitFrameLayout. We compute the padding expressed
+ // in the number of extra |undefined| values to push on the stack.
+ static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert((sizeof(Value) + 2 * sizeof(void*)) % JitStackAlignment == 0,
+ "No need to consider |this| and the frame pointer and its padding for aligning the stack");
+ static_assert(JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(Imm32(JitStackValueAlignment - 1 /* for padding */), ecx);
+
+ // Account for newTarget, if necessary.
+ static_assert(CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count an extra push");
+ masm.mov(eax, edx);
+ masm.andl(Imm32(CalleeToken_FunctionConstructing), edx);
+ masm.addl(edx, ecx);
+
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), ecx);
+ masm.subl(esi, ecx);
+
+ // Copy the number of actual arguments.
+ masm.loadPtr(Address(esp, RectifierFrameLayout::offsetOfNumActualArgs()), edx);
+
+ masm.moveValue(UndefinedValue(), ebx, edi);
+
+ // NOTE: The fact that x86 ArgumentsRectifier saves the FramePointer is relied upon
+ // by the baseline bailout code. If this changes, fix that code! See
+ // BaselineJIT.cpp/BaselineStackBuilder::calculatePrevFramePtr, and
+ // BaselineJIT.cpp/InitFromBailout. Check for the |#if defined(JS_CODEGEN_X86)| portions.
+ masm.push(FramePointer);
+ masm.movl(esp, FramePointer); // Save %esp.
+ masm.push(FramePointer /* padding */);
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
+ // '-- #esi ---'
+ //
+ // Rectifier frame:
+ // [ebp'] <- ebp [padding] <- esp [undef] [undef] [arg2] [arg1] [this]
+ // '--- #ecx ----' '-- #esi ---'
+ //
+ // [[argc] [callee] [descr] [raddr]]
+
+ // Push undefined.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(ebx); // type(undefined);
+ masm.push(edi); // payload(undefined);
+ masm.subl(Imm32(1), ecx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument. We did a push of %ebp earlier, so be sure to
+ // account for this in the offset
+ BaseIndex b = BaseIndex(FramePointer, esi, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(void*));
+ masm.lea(Operand(b), ecx);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addl(Imm32(1), esi);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(ecx, sizeof(Value)/2));
+ masm.push(Operand(ecx, 0x0));
+ masm.subl(Imm32(sizeof(Value)), ecx);
+ masm.subl(Imm32(1), esi);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.mov(eax, ebx);
+ masm.branchTest32(Assembler::Zero, ebx, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ BaseValueIndex src(FramePointer, edx,
+ sizeof(RectifierFrameLayout) +
+ sizeof(Value) +
+ sizeof(void*));
+
+ masm.andl(Imm32(CalleeTokenMask), ebx);
+ masm.movzwl(Operand(ebx, JSFunction::offsetOfNargs()), ebx);
+
+ BaseValueIndex dst(esp, ebx, sizeof(Value));
+
+ ValueOperand newTarget(ecx, edi);
+
+ masm.loadValue(src, newTarget);
+ masm.storeValue(newTarget, dst);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Construct descriptor, accounting for pushed frame pointer above
+ masm.lea(Operand(FramePointer, sizeof(void*)), ebx);
+ masm.subl(esp, ebx);
+ masm.makeFrameDescriptor(ebx, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.push(edx); // number of actual arguments
+ masm.push(eax); // callee token
+ masm.push(ebx); // descriptor
+
+ // Call the target function.
+ // Note that this assumes the function is JITted.
+ masm.andl(Imm32(CalleeTokenMask), eax);
+ masm.loadPtr(Address(eax, JSFunction::offsetOfNativeOrScript()), eax);
+ masm.loadBaselineOrIonRaw(eax, eax, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(eax);
+
+ // Remove the rectifier frame.
+ masm.pop(ebx); // ebx <- descriptor with FrameType.
+ masm.shrl(Imm32(FRAMESIZE_SHIFT), ebx); // ebx <- descriptor.
+ masm.pop(edi); // Discard calleeToken.
+ masm.pop(edi); // Discard number of actual arguments.
+
+ // Discard pushed arguments, but not the pushed frame pointer.
+ BaseIndex unwind = BaseIndex(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
+ masm.lea(Operand(unwind), esp);
+
+ masm.pop(FramePointer);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+ return code;
+}
+
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // Push registers such that we can access them from [base + code].
+ if (JitSupportsSimd()) {
+ masm.PushRegsInMask(AllRegs);
+ } else {
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter)
+ masm.Push(*iter);
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+ }
+
+ // Push the bailout table number.
+ masm.push(Imm32(frameClass));
+
+ // The current stack pointer is the first argument to jit::Bailout.
+ masm.movl(esp, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, eax);
+
+ // Make space for Bailout's baioutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ // Call the bailout function. This will correct the size of the bailout.
+ masm.setupUnalignedABICall(ecx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+
+ // Common size of stuff we've pushed.
+ static const uint32_t BailoutDataSize = 0
+ + sizeof(void*) // frameClass
+ + sizeof(RegisterDump);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // We want the frameSize. Stack is:
+ // ... frame ...
+ // snapshotOffset
+ // frameSize
+ // ... bailoutFrame ...
+ masm.addl(Imm32(BailoutDataSize), esp);
+ masm.pop(ebx);
+ masm.addl(Imm32(sizeof(uint32_t)), esp);
+ masm.addl(ebx, esp);
+ } else {
+ // Stack is:
+ // ... frame ...
+ // bailoutId
+ // ... bailoutFrame ...
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ masm.addl(Imm32(BailoutDataSize + sizeof(void*) + frameSize), esp);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.jmp(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MacroAssembler masm;
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++)
+ masm.call(&bailout);
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ // Generate a separated code for the wrapper.
+ MacroAssembler masm;
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ // Wrapper register set is a superset of Volatile register set.
+ JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
+
+ // The context is the first argument.
+ Register cxreg = regs.takeAny();
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args]
+ // +4 descriptor
+ // +0 returnAddress
+ //
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the current stack pointer as the base for copying arguments.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = regs.takeAny();
+ masm.lea(Operand(esp, ExitFrameLayout::SizeWithFooter()), argsBase);
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.Push(UndefinedValue());
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ case Type_Bool:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(int32_t));
+ masm.movl(esp, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movl(esp, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByValue:
+ // We don't pass doubles in float registers on x86, so no need
+ // to check for argPassedInFloatReg.
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunction::DoubleByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, eax, eax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(eax, eax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.Pop(JSReturnOperand);
+ break;
+
+ case Type_Int32:
+ case Type_Pointer:
+ masm.Pop(ReturnReg);
+ break;
+
+ case Type_Bool:
+ masm.Pop(ReturnReg);
+ masm.movzbl(ReturnReg, ReturnReg);
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint)
+ masm.Pop(ReturnDoubleReg);
+ else
+ masm.assumeUnreachable("Unable to pop to float reg, with no FP support.");
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+ // use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm;
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == edx);
+ masm.movl(ImmPtr(cx->runtime()), ecx);
+
+ masm.setupUnalignedABICall(eax);
+ masm.passABIArg(ecx);
+ masm.passABIArg(edx);
+ masm.callWithABI(IonMarkFunction(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm;
+#ifndef JS_USE_LINK_REGISTER
+ // The first value contains the return addres,
+ // which we pull into ICTailCallReg for tail calls.
+ masm.setFramePushed(sizeof(intptr_t));
+#endif
+
+ Register scratch1 = eax;
+ Register scratch2 = ecx;
+ Register scratch3 = edx;
+
+ // Load the return address in scratch1.
+ masm.loadPtr(Address(esp, 0), scratch1);
+
+ // Load BaselineFrame pointer in scratch2.
+ masm.mov(ebp, scratch2);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch3);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.push(scratch1);
+ masm.push(scratch2);
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+ masm.ret();
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.mov(ebp, esp);
+ masm.pop(ebp);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(edx, ecx);
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = eax;
+ Register scratch2 = ebx;
+ Register scratch3 = esi;
+ Register scratch4 = edi;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do
+ // depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.movePtr(scratch1, scratch2);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch2);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ BaseIndex stubFrameReturnAddr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(StackPointer, scratch1, TimesOne,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch1, TimesOne, RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ BaseIndex stubFrameReturnAddr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ BaseIndex stubFrameSavedFramePtr(scratch2, scratch1, TimesOne,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch3);
+ masm.addPtr(Imm32(sizeof(void*)), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.lea(Operand(StackPointer, scratch1, TimesOne, JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.lea(Operand(scratch2, scratch3, TimesOne, IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}